repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
hermantai/sorno-py-scripts | sorno/mathlib.py | 1 | 2471 | """A library for math related things
Copyright 2015 Heung Ming Tai
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
class Interval(object):
"""An interval with a starting and a ending points, open or closed.
It's a read-only class.
Attributes:
start (int or float): The starting point of the interval.
end (int or float): The ending point of the interval.
is_start_opened (Optional[bool]): True if the starting point is open.
It's False by default.
is_end_opened (Optional[bool]): True if the ending point is open.
It's False by default.
"""
def __init__(self, start, end, is_start_opened=False, is_end_opened=False):
self._start = start
self._end = end
self._is_start_opened = is_start_opened
self._is_end_opened = is_end_opened
@property
def start(self):
return self._start
@property
def end(self):
return self._end
@property
def is_start_opened(self):
return self._is_start_opened
@property
def is_end_opened(self):
return self._is_end_opened
def __str__(self):
tmp = "Interval(start=%r,end=%r,is_start_opened=%r,is_end_opened=%r)"
return tmp % (
self._start,
self._end,
self._is_start_opened,
self._is_end_opened,
)
def __repr__(self):
return str(self)
def __eq__(self, other):
if not isinstance(other, Interval):
return False
return (
self._start,
self._end,
self._is_start_opened,
self._is_end_opened,
) == (
other._start,
other._end,
other._is_start_opened,
other._is_end_opened,
)
| apache-2.0 | -2,402,409,308,476,059,000 | 27.402299 | 79 | 0.617159 | false |
iwm911/plaso | plaso/parsers/utmpx.py | 1 | 6216 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parser for utmpx files."""
# TODO: Add support for other implementations than Mac OS X.
# The parser should be checked against IOS UTMPX file.
import construct
import logging
from plaso.lib import errors
from plaso.lib import event
from plaso.lib import eventdata
from plaso.lib import parser
from plaso.lib import timelib
__author__ = 'Joaquin Moreno Garijo ([email protected])'
class UtmpxMacOsXEvent(event.EventObject):
"""Convenience class for an event utmpx."""
DATA_TYPE = 'mac:utmpx:event'
def __init__(self, timestamp, user, terminal, status, computer_name):
"""Initializes the event object.
Args:
timestamp: when the terminal was started
user: active user name
terminal: name of the terminal
status: terminal status
computer_name: name of the host or IP.
"""
super(UtmpxMacOsXEvent, self).__init__()
self.timestamp = timestamp
self.timestamp_desc = eventdata.EventTimestamp.START_TIME
self.user = user
self.terminal = terminal
self.status = status
self.computer_name = computer_name
class UtmpxParser(parser.BaseParser):
"""Parser for UTMPX files. """
NAME = 'utmpx'
# INFO: Type is suppose to be a short (2 bytes),
# however if we analyze the file it is always
# byte follow by 3 bytes with \x00 value.
MAC_UTMPX_ENTRY = construct.Struct(
'utmpx_mac',
construct.String('user', 256),
construct.ULInt32('id'),
construct.String('tty_name', 32),
construct.ULInt32('pid'),
construct.ULInt16('status_type'),
construct.ULInt16('unknown'),
construct.ULInt32('timestamp'),
construct.ULInt32('microsecond'),
construct.String('hostname', 256),
construct.Padding(64))
# 9, 10 and 11 are only for Darwin and IOS.
MAC_STATUS_TYPE = {
0: 'EMPTY',
1: 'RUN_LVL',
2: 'BOOT_TIME',
3: 'OLD_TIME',
4: 'NEW_TIME',
5: 'INIT_PROCESS',
6: 'LOGIN_PROCESS',
7: 'USER_PROCESS',
8: 'DEAD_PROCESS',
9: 'ACCOUNTING',
10: 'SIGNATURE',
11: 'SHUTDOWN_TIME'}
def __init__(self, pre_obj, config):
"""Initializes the parser.
Args:
pre_obj: pre-parsing object.
config: configuration object.
"""
super(UtmpxParser, self).__init__(pre_obj, config)
self._utmpx_record_size = self.MAC_UTMPX_ENTRY.sizeof()
def _ReadEntry(self, file_object):
"""Reads an UTMPX entry.
Args:
file_object: a file-like object that points to an UTMPX file.
Returns:
An event object constructed from the UTMPX entry.
"""
data = file_object.read(self._utmpx_record_size)
if len(data) != self._utmpx_record_size:
return
try:
entry = self.MAC_UTMPX_ENTRY.parse(data)
except (IOError, construct.FieldError) as exception:
logging.warning(
u'Unable to parse Mac OS X UTMPX entry with error: {0:s}'.format(
exception))
return
user, _, _ = entry.user.partition('\x00')
if not user:
user = u'N/A'
terminal, _, _ = entry.tty_name.partition('\x00')
if not terminal:
terminal = u'N/A'
computer_name, _, _ = entry.hostname.partition('\x00')
if not computer_name:
computer_name = u'localhost'
value_status = self.MAC_STATUS_TYPE.get(entry.status_type, u'N/A')
status = u'{0}'.format(value_status)
timestamp = timelib.Timestamp.FromPosixTimeWithMicrosecond(
entry.timestamp, entry.microsecond)
return UtmpxMacOsXEvent(timestamp, user, terminal, status, computer_name)
def _VerifyStructure(self, file_object):
"""Verify that we are dealing with an UTMPX entry.
Args:
file_object: a file-like object that points to an UTMPX file.
Returns:
True if it is a UTMPX entry or False otherwise.
"""
# First entry is a SIGNAL entry of the file ("header").
try:
header = self.MAC_UTMPX_ENTRY.parse_stream(file_object)
except (IOError, construct.FieldError):
return False
user, _, _ = header.user.partition('\x00')
# The UTMPX_ENTRY structure will often succesfully compile on various
# structures, such as binary plist files, and thus we need to do some
# additional validation. The first one is to check if the user name
# can be converted into a unicode string, otherwise we can assume
# we are dealing with non UTMPX data.
try:
_ = unicode(user)
except UnicodeDecodeError:
return False
if user != u'utmpx-1.00':
return False
if self.MAC_STATUS_TYPE[header.status_type] != 'SIGNATURE':
return False
if header.timestamp != 0 or header.microsecond != 0 or header.pid != 0:
return False
tty_name, _, _ = header.tty_name.partition('\x00')
hostname, _, _ = header.hostname.partition('\x00')
if tty_name or hostname:
return False
return True
def Parse(self, file_entry):
"""Extract data from a UTMPX file.
Args:
file_entry: a file entry object.
Returns:
An event object (instance of UtmpxMacOsXEvent) for each logon/logoff
event.
"""
file_object = file_entry.GetFileObject()
if not self._VerifyStructure(file_object):
raise errors.UnableToParseFile(
u'The file is not an UTMPX file.')
event_object = self._ReadEntry(file_object)
while event_object:
event_object.offset = file_object.tell()
yield event_object
event_object = self._ReadEntry(file_object)
file_object.close()
| apache-2.0 | 8,046,042,707,812,847,000 | 29.470588 | 80 | 0.660071 | false |
steeve/plugin.video.pulsar | resources/site-packages/bjsonrpc/request.py | 9 | 6728 | """
bjson/request.py
Asynchronous Bidirectional JSON-RPC protocol implementation over TCP/IP
Copyright (c) 2010 David Martinez Marti
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of copyright holders nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS OR CONTRIBUTORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
try:
from Queue import Queue
except ImportError:
from queue import Queue
import logging
from threading import Event
import traceback
from bjsonrpc.exceptions import ServerError
import bjsonrpc.jsonlib as json
_log = logging.getLogger(__name__)
class Request(object):
"""
Represents a request to the other end which may be not be completed yet.
This class is automatically created by *method* Proxy.
Parameters:
**conn**
Connection instance which this Request belongs to.
(internally stored as Request.conn)
**request_data**
Dictionary object to serialize as JSON to send to the other end.
(internally stored as Request.data)
Attributes:
**responses**
Queue of JSON Objects of the response, each as a dictionary. If
no response has been received, this is empty.
**event_response**
A threading.Event object, which is set to true when a response has
been received. Useful to wake up threads or to wait exactly until
the response is received.
**callbacks**
List array where the developer can append functions to call when
the response is received. The function will get the Request object
as a first argument.
**request_id**
Number of ID that identifies the call. For notifications this is None.
Be careful because it may be not an integer. Strings and other objects
may be valid for other implementations.
"""
def __init__(self, conn, request_data):
self.conn = conn
self.data = request_data
self.responses = Queue()
# TODO: Now that we have a Queue, do we need an Event (and a cv)?
self.event_response = Event()
self.callbacks = []
self.thread_wait = self.event_response.wait
self.request_id = None
self.auto_close = False
if 'id' in self.data:
self.request_id = self.data['id']
if self.request_id:
self.auto_close = True
self.conn.addrequest(self)
data = json.dumps(self.data, self.conn)
self.conn.write(data)
def hasresponse(self):
"""
Method thet checks if there's a response or not.
Returns True if there it is or False if it haven't arrived yet.
"""
if not self.responses.empty(): return True
self.conn.dispatch_until_empty()
return not self.responses.empty()
def setresponse(self, value):
"""
Method used by Connection instance to tell Request that a Response
is available to this request.
Parameters:
**value**
Value (JSON decoded) received from socket.
"""
self.responses.put(value)
for callback in self.callbacks:
try:
callback(self)
except Exception as exc:
_log.error("Error on callback: %r", exc)
_log.debug(traceback.format_exc())
self.event_response.set() # helper for threads.
if self.auto_close:
self.close()
def wait(self):
"""
Block until there is a response. Will manage the socket and dispatch
messages until the response is found.
"""
#if self.response is None:
# self.conn.read_ensure_thread()
while self.responses.empty():
self.conn.read_and_dispatch(condition=lambda: self.responses.empty())
def __call__(self):
return self.value
def __iter__(self):
return self
def __next__(self):
return self.value
def next(self):
return self.__next__()
def close(self):
reqid, self.request_id, self.auto_close = self.request_id, None, False
if reqid:
self.conn.delrequest(reqid)
def __del__(self):
self.close()
@property
def value(self):
"""
Property to get value response. If the response is not available, it waits
to it (see *wait* method). If the response contains an Error, this
method raises *exceptions.ServerError* with the error text inside.
From version 0.2.0 you can also call the class itself to get the value::
req_stime = rpcconn.method.getServerTime()
print req_stime.value
print req_stime() # equivalent to the prior line.
"""
self.wait()
response = self.responses.get()
err = response.get('error', None)
if err is not None:
raise ServerError(err)
return response['result']
| bsd-3-clause | -6,765,377,454,528,165,000 | 34.597884 | 86 | 0.608502 | false |
david-ragazzi/nupic | nupic/research/TP_shim.py | 6 | 3224 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
A shim for the TP class that transparently implements TemporalMemory,
for use with OPF.
"""
import numpy
from nupic.research.temporal_memory import TemporalMemory
class TPShim(TemporalMemory):
"""
TP => Temporal Memory shim class.
"""
def __init__(self,
numberOfCols=500,
cellsPerColumn=10,
initialPerm=0.11,
connectedPerm=0.50,
minThreshold=8,
newSynapseCount=15,
permanenceInc=0.10,
permanenceDec=0.10,
permanenceMax=1.0,
globalDecay=0.10,
activationThreshold=12,
seed=42):
"""
Translate parameters and initialize member variables specific to `TP.py`.
"""
super(TPShim, self).__init__(
columnDimensions=(numberOfCols,),
cellsPerColumn=cellsPerColumn,
activationThreshold=activationThreshold,
initialPermanence=initialPerm,
connectedPermanence=connectedPerm,
minThreshold=minThreshold,
maxNewSynapseCount=newSynapseCount,
permanenceIncrement=permanenceInc,
permanenceDecrement=permanenceDec,
seed=seed)
self.infActiveState = {"t": None}
def compute(self, bottomUpInput, enableLearn, computeInfOutput=None):
"""
(From `TP.py`)
Handle one compute, possibly learning.
@param bottomUpInput The bottom-up input, typically from a spatial pooler
@param enableLearn If true, perform learning
@param computeInfOutput If None, default behavior is to disable the inference
output when enableLearn is on.
If true, compute the inference output
If false, do not compute the inference output
"""
super(TPShim, self).compute(set(bottomUpInput.nonzero()[0]),
learn=enableLearn)
numberOfCells = self.numberOfCells()
activeState = numpy.zeros(numberOfCells)
activeState[self.getCellIndices(self.activeCells)] = 1
self.infActiveState["t"] = activeState
output = numpy.zeros(numberOfCells)
output[self.getCellIndices(self.predictiveCells | self.activeCells)] = 1
return output
| gpl-3.0 | -4,474,777,136,522,623,500 | 34.822222 | 82 | 0.637097 | false |
jordanemedlock/psychtruths | temboo/Library/SendGrid/NewsletterAPI/Recipients/GetAttachedRecipientLists.py | 5 | 4071 | # -*- coding: utf-8 -*-
###############################################################################
#
# GetAttachedRecipientLists
# Retrieve Recipient Lists attached to a specified newsletter.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetAttachedRecipientLists(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetAttachedRecipientLists Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetAttachedRecipientLists, self).__init__(temboo_session, '/Library/SendGrid/NewsletterAPI/Recipients/GetAttachedRecipientLists')
def new_input_set(self):
return GetAttachedRecipientListsInputSet()
def _make_result_set(self, result, path):
return GetAttachedRecipientListsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetAttachedRecipientListsChoreographyExecution(session, exec_id, path)
class GetAttachedRecipientListsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetAttachedRecipientLists
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key obtained from SendGrid.)
"""
super(GetAttachedRecipientListsInputSet, self)._set_input('APIKey', value)
def set_APIUser(self, value):
"""
Set the value of the APIUser input for this Choreo. ((required, string) The username registered with SendGrid.)
"""
super(GetAttachedRecipientListsInputSet, self)._set_input('APIUser', value)
def set_Name(self, value):
"""
Set the value of the Name input for this Choreo. ((required, string) The name of an existing newsletter, whose recipient lists will be obtained.)
"""
super(GetAttachedRecipientListsInputSet, self)._set_input('Name', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format of the response from SendGrid, in either json, or xml. Default is set to json.)
"""
super(GetAttachedRecipientListsInputSet, self)._set_input('ResponseFormat', value)
class GetAttachedRecipientListsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetAttachedRecipientLists Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from SendGrid. The format corresponds to the ResponseFormat input. Default is json.)
"""
return self._output.get('Response', None)
class GetAttachedRecipientListsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetAttachedRecipientListsResultSet(response, path)
| apache-2.0 | 7,040,928,816,485,411,000 | 40.540816 | 179 | 0.694915 | false |
kontais/EFI-MIPS | ToolKit/cmds/python/Lib/regex_syntax.py | 17 | 1893 | """Constants for selecting regexp syntaxes for the obsolete regex module.
This module is only for backward compatibility. "regex" has now
been replaced by the new regular expression module, "re".
These bits are passed to regex.set_syntax() to choose among
alternative regexp syntaxes.
"""
# 1 means plain parentheses serve as grouping, and backslash
# parentheses are needed for literal searching.
# 0 means backslash-parentheses are grouping, and plain parentheses
# are for literal searching.
RE_NO_BK_PARENS = 1
# 1 means plain | serves as the "or"-operator, and \| is a literal.
# 0 means \| serves as the "or"-operator, and | is a literal.
RE_NO_BK_VBAR = 2
# 0 means plain + or ? serves as an operator, and \+, \? are literals.
# 1 means \+, \? are operators and plain +, ? are literals.
RE_BK_PLUS_QM = 4
# 1 means | binds tighter than ^ or $.
# 0 means the contrary.
RE_TIGHT_VBAR = 8
# 1 means treat \n as an _OR operator
# 0 means treat it as a normal character
RE_NEWLINE_OR = 16
# 0 means that a special characters (such as *, ^, and $) always have
# their special meaning regardless of the surrounding context.
# 1 means that special characters may act as normal characters in some
# contexts. Specifically, this applies to:
# ^ - only special at the beginning, or after ( or |
# $ - only special at the end, or before ) or |
# *, +, ? - only special when not after the beginning, (, or |
RE_CONTEXT_INDEP_OPS = 32
# ANSI sequences (\n etc) and \xhh
RE_ANSI_HEX = 64
# No GNU extensions
RE_NO_GNU_EXTENSIONS = 128
# Now define combinations of bits for the standard possibilities.
RE_SYNTAX_AWK = (RE_NO_BK_PARENS | RE_NO_BK_VBAR | RE_CONTEXT_INDEP_OPS)
RE_SYNTAX_EGREP = (RE_SYNTAX_AWK | RE_NEWLINE_OR)
RE_SYNTAX_GREP = (RE_BK_PLUS_QM | RE_NEWLINE_OR)
RE_SYNTAX_EMACS = 0
# (Python's obsolete "regexp" module used a syntax similar to awk.)
| bsd-3-clause | -174,986,317,348,054,080 | 34.716981 | 73 | 0.703645 | false |
andrewthetechie/slack_rtmbot | slack_rtmbot.py | 1 | 12598 | #!/usr/bin/env python
import sys
sys.dont_write_bytecode = True
import glob
import yaml
import os
import sys
import time
import logging
import re
from threading import Thread
from logging.handlers import RotatingFileHandler
from slackclient import SlackClient
def dbg(debug_string):
"""
Used to write debugging information if debug is set in config
:param debug_string:
:return:
"""
if debug:
main_log.info(debug_string)
class RtmBot(object):
def __init__(self, token):
self.last_ping = 0
self.token = token
self.bot_plugins = []
self.slack_client = None
self.dm_help = []
self.channel_help = []
def connect(self):
"""Convenience method that creates Server instance"""
self.slack_client = SlackClient(self.token)
self.slack_client.rtm_connect()
def start(self):
self.connect()
self.load_plugins()
self.on_start()
self.load_help()
while True:
for reply in self.slack_client.rtm_read():
self.input_logging(reply)
self.input(reply)
self.output()
self.autoping()
time.sleep(config['PING_INTERVAL']
if "PING_INTERVAL" in config else .1)
def autoping(self):
"""
This method keeps the bot connection alive to slack. Requires a ping every 5 seconds if there
is no activity.
:return:
"""
# hardcode the interval to 3 seconds
now = int(time.time())
if now > self.last_ping + 3:
self.slack_client.server.ping()
self.last_ping = now
def load_help(self):
"""
calls the process_help() function in each plugin to setup the help text variables
:return:
"""
global channel_help
global dm_help
for plugin in self.bot_plugins:
plug_help = None
try:
plug_help = plugin.get_help()
if len(plug_help[0]) > 0:
for help in plug_help[0]:
self.dm_help.append(help)
if len(plug_help[1]) > 0:
for help in plug_help[1]:
self.channel_help.append(help)
except AttributeError:
main_log.info(
"{} is a bad bad plugin and doesnt implement process_help".format(plugin))
self.dm_help.append(
"help - Will return a listing of commands the bot responds to")
self.channel_help.append(
"help - Will return a listing of commands the bot responds to")
return
def output_help(self, channel):
"""
Outputs help information to the help channel passed in
:param channel:
:return:
"""
message = "Help for {}\n-------------------\n".format(config[
'BOT_NAME'])
if len(self.dm_help) > 0:
message = "{}DM Commands:\n-------------------\n".format(message)
for help in self.dm_help:
message = "{}\n{}".format(message, help)
if len(self.channel_help) > 0:
message = "{}\n\nChannel Commands:\n-------------------\n".format(
message)
for help in self.channel_help:
message = "{}\n{}".format(message, help)
self.slack_client.api_call(
"chat.postMessage", channel=channel, text=message, as_user=True)
return
def on_start(self):
"""
Runs the process_onstart method for each function that has it
:return:
"""
function_name = "process_onstart"
for plugin in self.bot_plugins:
plugin.do(function_name, None)
def input(self, data):
"""
Receives messages from the RTM api (data) and passes it to methods in the plugins based on data type
For example, a message gets sent to process_message
Also handles input for the help commands and routes them to output_help
:param data:
:return:
"""
if "type" in data:
function_name = "process_" + data["type"]
dbg("got {}".format(function_name))
match = None
if function_name == "process_message":
match = re.findall(r"{} (help|halp|help me)".format(
config['BOT_NAME']), data['text'])
if data['channel'].startswith("D"):
function_name = "process_directmessage"
match = re.findall(r"(help|halp|help me)", data['text'])
if len(match) > 0 and data['user'] != config['BOT_USER_ID']:
return self.output_help(data['channel'])
for plugin in self.bot_plugins:
plugin.do(function_name, data)
def output(self):
"""
Uses the slack web API (not the RTM API) to post a message based on content of
outputs from plugins.
Uses the web api because the RTM api is not able to process formatted messages
:return:
"""
for plugin in self.bot_plugins:
limiter = False
for output in plugin.do_output():
channel = self.slack_client.server.channels.find(output[0])
if channel is not None and output[1] != None:
if limiter == True:
time.sleep(.1)
limiter = False
message = output[1].encode('ascii', 'ignore')
# channel.send_message("{}".format(message))
self.slack_client.api_call(
"chat.postMessage", channel=output[0], text=message, as_user=True)
limiter = True
def load_plugins(self):
"""
Loads all plugins in the /plugins directory
:return:
"""
for plugin in glob.glob(directory + '/plugins/*'):
sys.path.insert(0, plugin)
sys.path.insert(0, directory + '/plugins/')
for plugin in glob.glob(directory + '/plugins/*.py') + \
glob.glob(directory + '/plugins/*/*.py'):
main_log.info(plugin)
name = plugin.split('/')[-1][:-3]
self.bot_plugins.append(Plugin(name))
def input_logging(self, data):
"""
If COMMAND_LOGGING is true in config, logs all input sent at the bot
This is used more for analytics then debugging. If you want
debugging, turn on debugging
:param data:
:return:
"""
# do nothing if we havent defined command logging or it is false
if not "INPUT_LOGGING" in config or not config['INPUT_LOGGING']:
return
# dont log anytyhing that is coming from the bot itself
if "user" in data and data['user'] == config['BOT_USER_ID']:
return
# discard some logs that we just dont need
if data['type'] in config['INPUT_DO_NOT_LOG_TYPES']:
return
input_log.info("{},{},{},{}".format(
data['type'],
data['user'] if "user" in data else None,
data['channel'] if "channel" in data else None,
data['text'] if "text" in data else None))
class Plugin(object):
def __init__(self, name, plugin_config={}):
self.name = name
self.module = __import__(name)
self.outputs = []
if name in config:
main_log.info("config found for: " + name)
self.module.config = config[name]
if 'setup' in dir(self.module):
self.module.setup()
def plugin_worker(self, function_name, data):
"""
Method used to thread plugins
:param function_name:
:param data:
:return:
"""
try:
if function_name == "process_onstart":
eval("self.module." + function_name)()
elif data['user'] != config['BOT_USER_ID']:
eval("self.module." + function_name)(data)
except KeyError:
return
def get_help(self):
"""
Runs the "process_help" function from a plugin and returns the output
:return:
"""
function_name = "process_help"
return eval("self.module." + function_name)()
def do(self, function_name, data):
"""
Runs a plugin if it has a function to match the data being passed to it
:param function_name:
:param data:
:return:
"""
if function_name in dir(self.module):
try:
# stars a thread for this call to a plugin
t = Thread(
target=self.plugin_worker, args=(
function_name, data))
t.start()
except:
dbg("problem in module {} {}".format(function_name, data))
if "catch_all" in dir(self.module):
try:
self.module.catch_all(data)
except:
dbg("problem in catch all")
def do_output(self):
output = []
while True:
if 'outputs' in dir(self.module):
if len(self.module.outputs) > 0:
main_log.info("output from {}".format(self.module))
output.append(self.module.outputs.pop(0))
else:
break
else:
self.module.outputs = []
return output
def do_dm_help(self):
dm_help = []
while True:
if 'dm_help' in dir(self.module):
if self.module.dm_help and len(self.module.dm_help) > 0:
main_log.info("dm_help from {}".format(self.module))
dm_help.append(self.module.dm_help.pop(0))
else:
break
else:
self.module.dm_help = []
return dm_help
def do_channel_help(self):
channel_help = []
while True:
if 'dm_help' in dir(self.module):
if self.module.channel_help and len(self.module.channel_help) > 0:
main_log.info("channel_help from {}".format(self.module))
dm_help.append(self.module.channel_help.pop(0))
else:
break
else:
self.module.channel_help = []
return channel_help
class UnknownChannel(Exception):
pass
def setup_logger(logger_name, log_file, level=logging.INFO):
l = logging.getLogger(logger_name)
formatter = logging.Formatter('%(asctime)s : %(message)s')
fileHandler = RotatingFileHandler(log_file, mode='a', maxBytes=(
config['LOGGING_MAX_SIZE'] if "LOGGING_MAX_SIZE" in config else 10485760),
backupCount=config[
'LOGGING_LOGS_TO_KEEP'] if "LOGGING_LOGS_TO_KEEP" in config else 5
)
fileHandler.setFormatter(formatter)
l.setLevel(level)
l.addHandler(fileHandler)
def main_loop():
"""
Starts up the main bot loop and listens for a keyboard interrupt to quit it
:return:
"""
try:
bot.start()
except KeyboardInterrupt:
sys.exit(0)
except:
main_log.exception('OOPS')
if __name__ == "__main__":
directory = os.path.dirname(sys.argv[0])
if not directory.startswith('/'):
directory = os.path.abspath("{}/{}".format(os.getcwd(),
directory
))
config = yaml.load(file('conf/rtmbot.conf', 'r'))
debug = config["DEBUG"] if "DEBUG" in config else False
input_logging = config[
'INPUT_LOGGING'] if "INPUT_LOGGING" in config else False
bot = RtmBot(config["SLACK_TOKEN"])
site_plugins = []
main_log_file = config[
'LOGPATH'] + config['LOGFILE'] if "LOGPATH" in config and "LOGFILE" else "bot.log"
setup_logger("main_logs", main_log_file, logging.INFO)
main_log = logging.getLogger('main_logs')
if input_logging:
input_log_file = config['LOGPATH'] + config[
'INPUT_LOGFILE'] if "LOGPATH" in config and "INPUT_LOGFILE" else "inputs.log"
setup_logger("input_logs", input_log_file, logging.INFO)
input_log = logging.getLogger('input_logs')
if "DAEMON" in config and config['DAEMON']:
import daemon
with daemon.DaemonContext():
main_loop()
main_loop()
| gpl-3.0 | 4,989,589,825,745,127,000 | 32.956873 | 108 | 0.531354 | false |
cailloumajor/home-web | backend/core/auth/backends.py | 1 | 1129 | # -*- coding: utf-8 -*-
# pylint: skip-file
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.hashers import check_password
class SettingsBackend:
"""
Authenticates against the settings ADMIN_LOGIN and ADMIN_PASSWORD
Use the login name and a hash of the password.
"""
def authenticate(self, username=None, password=None):
UserModel = get_user_model()
login_valid = (username == settings.ADMIN_LOGIN)
pwd_valid = check_password(password, settings.ADMIN_PASSWORD)
if login_valid and pwd_valid:
try:
user = UserModel.objects.get(username=username)
except UserModel.DoesNotExist:
user = UserModel(username=username)
user.is_staff = True
user.is_superuser = True
user.save()
return user
return None
def get_user(self, user_id):
UserModel = get_user_model()
try:
return UserModel.objects.get(pk=user_id)
except UserModel.DoesNotExist:
return None
| gpl-3.0 | 5,804,025,456,009,240,000 | 31.257143 | 69 | 0.618246 | false |
zaragoza-sedeelectronica/hackathon-co.sa | bower_components/lumx/changelog.py | 48 | 3091 | #! /usr/bin/env python
from subprocess import Popen, PIPE
import re
def getTags():
Popen('git fetch --tags'.split(), stdout=PIPE).communicate()
(stdout, _) = Popen('git tag'.split(), stdout=PIPE).communicate()
return sorted(stdout.split(), key=lambda s: [int(x) for x in s.replace('v', '').split('.')])
def checkLastChangelogTag():
last = None
with open('CHANGELOG.md', 'r+') as f:
lines = f.readlines()
for line in lines:
m = re.search(r'^##\s+(\S+):', line)
if m:
last = m.group(1)
break
return last
def buildNewLogs(fromTag, toTag):
stdout = ''
if fromTag:
(stdout, _) = Popen(('git rev-list %s..%s' % (fromTag, toTag)).split(), stdout=PIPE).communicate()
else:
(stdout, _) = Popen(('git rev-list %s' % toTag).split(), stdout=PIPE).communicate()
commits = stdout.splitlines()
feats = []
fixs = []
brokens = []
for commit in commits:
(title, _) = Popen(('git show -s --format=%%s %s' % commit).split(), stdout=PIPE).communicate()
(body, _) = Popen(('git show -s --format=%%b %s' % commit).split(), stdout=PIPE).communicate()
if not title:
continue
data = title.split(' ', 1)
if data[0] == 'feat':
feats.append(data[1].rstrip())
elif data[0] == 'fix':
fixs.append(data[1].rstrip())
if 'BROKEN:' in body:
brokens += body.split('BROKEN:')[1].splitlines()
logs = "## %s:\n" % toTag
if not len(feats) and not len(fixs) and not len(brokens):
logs += "*No major changes.*\n\n\n"
else:
if len(feats):
logs += "\n#### New features:\n"
for feat in feats:
logs += " - %s\n" % feat
if len(fixs):
logs += "\n#### Bug fixes:\n"
for fix in fixs:
logs += " - %s\n" % fix
if len(brokens):
logs += "\n#### Breaking changes:\n"
for broken in brokens:
if broken.rstrip() != '':
logs += " - %s\n" % broken
logs += "\n\n"
return logs
if __name__ == "__main__":
tags = getTags()
lastChangelogTag = checkLastChangelogTag()
changelog = ''
tagsToBuild = tags
previousTag = None
if lastChangelogTag:
previousTag = lastChangelogTag
tagsToBuild = tags[tags.index(lastChangelogTag) + 1:]
else:
tagsToBuild = tags[1:] # ignoring first release which contains only the first commit
with open('CHANGELOG.md', 'r+') as f:
changelog = f.read().replace('# Changelog\n\n', '').rstrip() + '\n'
if not len(tagsToBuild):
print "No new changlogs! Last tag (%s) is already in the CHANGELOG.md." % lastChangelogTag
exit(0)
for tag in tagsToBuild:
newLogs = buildNewLogs(previousTag, tag)
previousTag = tag
changelog = newLogs + changelog
changelog = '# Changelog\n\n' + changelog
with open('CHANGELOG.md', 'w') as f:
f.write(changelog)
| apache-2.0 | -7,786,225,083,972,410,000 | 28.160377 | 106 | 0.531867 | false |
aristanetworks/arista-ovs-quantum | quantum/plugins/nicira/nicira_nvp_plugin/NvpApiClient.py | 4 | 7453 | # Copyright 2012 Nicira, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# @author: Somik Behera, Nicira Networks, Inc.
import httplib # basic HTTP library for HTTPS connections
import logging
from quantum.plugins.nicira.nicira_nvp_plugin.api_client import (
client_eventlet, request_eventlet)
LOG = logging.getLogger("NVPApiHelper")
LOG.setLevel(logging.INFO)
class NVPApiHelper(client_eventlet.NvpApiClientEventlet):
'''
Helper class to do basic login, cookie management, and provide base
method to send HTTP requests.
Implements new eventlet-based framework derived from the management
console nvp_gevent_client module.
'''
def __init__(self, api_providers, user, password, request_timeout,
http_timeout, retries, redirects, failover_time,
concurrent_connections=3):
'''Constructor.
:param api_providers: a list of tuples in the form:
(host, port, is_ssl=True). Passed on to NvpClientEventlet.
:param user: the login username.
:param password: the login password.
:param concurrent_connections: the number of concurrent connections.
:param request_timeout: all operations (including retries, redirects
from unresponsive controllers, etc) should finish within this
timeout.
:param http_timeout: how long to wait before aborting an
unresponsive controller (and allow for retries to another
controller in the cluster)
:param retries: the number of concurrent connections.
:param redirects: the number of concurrent connections.
:param failover_time: minimum time between controller failover and new
connections allowed.
'''
client_eventlet.NvpApiClientEventlet.__init__(
self, api_providers, user, password, concurrent_connections,
failover_time=failover_time)
self._request_timeout = request_timeout
self._http_timeout = http_timeout
self._retries = retries
self._redirects = redirects
def login(self, user=None, password=None):
'''Login to NVP controller.
Assumes same password is used for all controllers.
:param user: NVP controller user (usually admin). Provided for
backwards compatability. In the normal mode of operation
this should be None.
:param password: NVP controller password. Provided for backwards
compatability. In the normal mode of operation this should
be None.
:returns: Does not return a value.
'''
if user:
self._user = user
if password:
self._password = password
return client_eventlet.NvpApiClientEventlet.login(self)
def request(self, method, url, body="", content_type="application/json"):
'''Issues request to controller.'''
g = request_eventlet.NvpGenericRequestEventlet(
self, method, url, body, content_type, auto_login=True,
request_timeout=self._request_timeout,
http_timeout=self._http_timeout,
retries=self._retries, redirects=self._redirects)
g.start()
response = g.join()
LOG.debug('NVPApiHelper.request() returns "%s"' % response)
# response is a modified HTTPResponse object or None.
# response.read() will not work on response as the underlying library
# request_eventlet.NvpApiRequestEventlet has already called this
# method in order to extract the body and headers for processing.
# NvpApiRequestEventlet derived classes call .read() and
# .getheaders() on the HTTPResponse objects and store the results in
# the response object's .body and .headers data members for future
# access.
if response is None:
# Timeout.
LOG.error('Request timed out: %s to %s' % (method, url))
raise RequestTimeout()
status = response.status
if status == httplib.UNAUTHORIZED:
raise UnAuthorizedRequest()
# Fail-fast: Check for exception conditions and raise the
# appropriate exceptions for known error codes.
if status in self.error_codes:
LOG.error("Received error code: %s" % status)
LOG.error("Server Error Message: %s" % response.body)
self.error_codes[status](self)
# Continue processing for non-error condition.
if (status != httplib.OK and status != httplib.CREATED
and status != httplib.NO_CONTENT):
LOG.error("%s to %s, unexpected response code: %d (content = '%s')"
% (method, url, response.status, response.body))
return None
return response.body
def fourZeroFour(self):
raise ResourceNotFound()
def fourZeroNine(self):
raise Conflict()
def fiveZeroThree(self):
raise ServiceUnavailable()
def fourZeroThree(self):
raise Forbidden()
def zero(self):
raise NvpApiException()
# TODO(del): ensure error_codes are handled/raised appropriately
# in api_client.
error_codes = {404: fourZeroFour,
409: fourZeroNine,
503: fiveZeroThree,
403: fourZeroThree,
301: zero,
307: zero,
400: zero,
500: zero,
503: zero}
class NvpApiException(Exception):
'''
Base NvpApiClient Exception
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
'''
message = "An unknown exception occurred."
def __init__(self, **kwargs):
try:
self._error_string = self.message % kwargs
except Exception:
# at least get the core message out if something happened
self._error_string = self.message
def __str__(self):
return self._error_string
class UnAuthorizedRequest(NvpApiException):
message = "Server denied session's authentication credentials."
class ResourceNotFound(NvpApiException):
message = "An entity referenced in the request was not found."
class Conflict(NvpApiException):
message = "Request conflicts with configuration on a different entity."
class ServiceUnavailable(NvpApiException):
message = ("Request could not completed because the associated "
"resource could not be reached.")
class Forbidden(NvpApiException):
message = ("The request is forbidden from accessing the "
"referenced resource.")
class RequestTimeout(NvpApiException):
message = "The request has timed out."
| apache-2.0 | -5,229,935,601,841,114,000 | 34.660287 | 79 | 0.645914 | false |
gribozavr/swift | utils/swift_build_support/swift_build_support/host_specific_configuration.py | 1 | 12379 | # swift_build_support/host_configuration_support.py -------------*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2019 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ----------------------------------------------------------------------------
from argparse import ArgumentError
import diagnostics
from .targets import StdlibDeploymentTarget
class HostSpecificConfiguration(object):
"""Configuration information for an individual host."""
def __init__(self, host_target, args):
"""Initialize for the given `host_target`."""
# Compute the set of deployment targets to configure/build.
if host_target == args.host_target:
# This host is the user's desired product, so honor the requested
# set of targets to configure/build.
stdlib_targets_to_configure = args.stdlib_deployment_targets
if "all" in args.build_stdlib_deployment_targets:
stdlib_targets_to_build = set(stdlib_targets_to_configure)
else:
stdlib_targets_to_build = set(
args.build_stdlib_deployment_targets).intersection(
set(args.stdlib_deployment_targets))
else:
# Otherwise, this is a host we are building as part of
# cross-compiling, so we only need the target itself.
stdlib_targets_to_configure = [host_target]
stdlib_targets_to_build = set(stdlib_targets_to_configure)
# Compute derived information from the arguments.
#
# FIXME: We should move the platform-derived arguments to be entirely
# data driven, so that we can eliminate this code duplication and just
# iterate over all supported platforms.
platforms_to_skip_build = self.__platforms_to_skip_build(args)
platforms_to_skip_test = self.__platforms_to_skip_test(args)
platforms_archs_to_skip_test = \
self.__platforms_archs_to_skip_test(args)
platforms_to_skip_test_host = self.__platforms_to_skip_test_host(args)
# Compute the lists of **CMake** targets for each use case (configure
# vs. build vs. run) and the SDKs to configure with.
self.sdks_to_configure = set()
self.swift_stdlib_build_targets = []
self.swift_test_run_targets = []
self.swift_benchmark_build_targets = []
self.swift_benchmark_run_targets = []
for deployment_target_name in stdlib_targets_to_configure:
# Get the target object.
deployment_target = StdlibDeploymentTarget.get_target_for_name(
deployment_target_name)
if deployment_target is None:
diagnostics.fatal("unknown target: %r" % (
deployment_target_name,))
# Add the SDK to use.
deployment_platform = deployment_target.platform
self.sdks_to_configure.add(deployment_platform.sdk_name)
# If we aren't actually building this target (only configuring
# it), do nothing else.
if deployment_target_name not in stdlib_targets_to_build:
continue
# Compute which actions are desired.
build = (
deployment_platform not in platforms_to_skip_build)
test = (
deployment_platform not in platforms_to_skip_test)
test_host_only = None
dt_supports_benchmark = deployment_target.supports_benchmark
build_benchmarks = build and dt_supports_benchmark
build_external_benchmarks = all([build, dt_supports_benchmark,
args.build_external_benchmarks])
# FIXME: Note, `build-script-impl` computed a property here
# w.r.t. testing, but it was actually unused.
# For platforms which normally require a connected device to
# test, the default behavior is to run tests that only require
# the host (i.e., they do not attempt to execute).
if deployment_platform.uses_host_tests and \
deployment_platform not in \
platforms_to_skip_test_host:
test_host_only = True
name = deployment_target.name
for skip_test_arch in platforms_archs_to_skip_test:
if deployment_target.name == skip_test_arch.name:
test = False
if build:
# Validation, long, and stress tests require building the full
# standard library, whereas the other targets can build a
# slightly smaller subset which is faster to build.
if args.build_swift_stdlib_unittest_extra or \
args.validation_test or args.long_test or \
args.stress_test:
self.swift_stdlib_build_targets.append(
"swift-stdlib-" + name)
else:
self.swift_stdlib_build_targets.append(
"swift-test-stdlib-" + name)
if build_benchmarks:
self.swift_benchmark_build_targets.append(
"swift-benchmark-" + name)
if args.benchmark:
self.swift_benchmark_run_targets.append(
"check-swift-benchmark-" + name)
if build_external_benchmarks:
# Add support for the external benchmarks.
self.swift_benchmark_build_targets.append(
"swift-benchmark-{}-external".format(name))
if args.benchmark:
self.swift_benchmark_run_targets.append(
"check-swift-benchmark-{}-external".format(name))
if test:
if test_host_only:
suffix = "-only_non_executable"
elif args.only_executable_test:
suffix = "-only_executable"
else:
suffix = ""
subset_suffix = ""
if args.validation_test and args.long_test and \
args.stress_test:
subset_suffix = "-all"
elif args.validation_test:
subset_suffix = "-validation"
elif args.long_test:
subset_suffix = "-only_long"
elif args.stress_test:
subset_suffix = "-only_stress"
else:
subset_suffix = ""
# Support for running the macCatalyst tests with
# the iOS-like target triple.
if name == "macosx-x86_64" and args.maccatalyst \
and args.maccatalyst_ios_tests:
(self.swift_test_run_targets
.append("check-swift{}{}-{}".format(
subset_suffix, suffix, "macosx-maccatalyst-x86_64")))
else:
(self.swift_test_run_targets
.append("check-swift{}{}-{}".format(
subset_suffix, suffix, name)))
if args.test_optimized and not test_host_only:
self.swift_test_run_targets.append(
"check-swift{}-optimize-{}".format(
subset_suffix, name))
if args.test_optimize_for_size and not test_host_only:
self.swift_test_run_targets.append(
"check-swift{}-optimize_size-{}".format(
subset_suffix, name))
if args.test_optimize_none_with_implicit_dynamic and \
not test_host_only:
self.swift_test_run_targets.append(
"check-swift{}-optimize_none_with_implicit_dynamic-{}"
.format(subset_suffix, name))
def __platforms_to_skip_build(self, args):
platforms_to_skip_build = set()
if not args.build_linux:
platforms_to_skip_build.add(StdlibDeploymentTarget.Linux)
if not args.build_freebsd:
platforms_to_skip_build.add(StdlibDeploymentTarget.FreeBSD)
if not args.build_cygwin:
platforms_to_skip_build.add(StdlibDeploymentTarget.Cygwin)
if not args.build_osx:
platforms_to_skip_build.add(StdlibDeploymentTarget.OSX)
if not args.build_ios_device:
platforms_to_skip_build.add(StdlibDeploymentTarget.iOS)
if not args.build_ios_simulator:
platforms_to_skip_build.add(StdlibDeploymentTarget.iOSSimulator)
if not args.build_tvos_device:
platforms_to_skip_build.add(StdlibDeploymentTarget.AppleTV)
if not args.build_tvos_simulator:
platforms_to_skip_build.add(
StdlibDeploymentTarget.AppleTVSimulator)
if not args.build_watchos_device:
platforms_to_skip_build.add(StdlibDeploymentTarget.AppleWatch)
if not args.build_watchos_simulator:
platforms_to_skip_build.add(
StdlibDeploymentTarget.AppleWatchSimulator)
if not args.build_android:
platforms_to_skip_build.add(StdlibDeploymentTarget.Android)
return platforms_to_skip_build
def __platforms_to_skip_test(self, args):
platforms_to_skip_test = set()
if not args.test_linux:
platforms_to_skip_test.add(StdlibDeploymentTarget.Linux)
if not args.test_freebsd:
platforms_to_skip_test.add(StdlibDeploymentTarget.FreeBSD)
if not args.test_cygwin:
platforms_to_skip_test.add(StdlibDeploymentTarget.Cygwin)
if not args.test_osx:
platforms_to_skip_test.add(StdlibDeploymentTarget.OSX)
if not args.test_ios_host:
platforms_to_skip_test.add(StdlibDeploymentTarget.iOS)
else:
raise ArgumentError(None,
"error: iOS device tests are not " +
"supported in open-source Swift.")
if not args.test_ios_simulator:
platforms_to_skip_test.add(StdlibDeploymentTarget.iOSSimulator)
if not args.test_tvos_host:
platforms_to_skip_test.add(StdlibDeploymentTarget.AppleTV)
else:
raise ArgumentError(None,
"error: tvOS device tests are not " +
"supported in open-source Swift.")
if not args.test_tvos_simulator:
platforms_to_skip_test.add(StdlibDeploymentTarget.AppleTVSimulator)
if not args.test_watchos_host:
platforms_to_skip_test.add(StdlibDeploymentTarget.AppleWatch)
else:
raise ArgumentError(None,
"error: watchOS device tests are not " +
"supported in open-source Swift.")
if not args.test_watchos_simulator:
platforms_to_skip_test.add(
StdlibDeploymentTarget.AppleWatchSimulator)
if not args.test_android:
platforms_to_skip_test.add(StdlibDeploymentTarget.Android)
return platforms_to_skip_test
def __platforms_archs_to_skip_test(self, args):
platforms_archs_to_skip_test = set()
if not args.test_ios_32bit_simulator:
platforms_archs_to_skip_test.add(
StdlibDeploymentTarget.iOSSimulator.i386)
return platforms_archs_to_skip_test
def __platforms_to_skip_test_host(self, args):
platforms_to_skip_test_host = set()
if not args.test_android_host:
platforms_to_skip_test_host.add(StdlibDeploymentTarget.Android)
if not args.test_ios_host:
platforms_to_skip_test_host.add(StdlibDeploymentTarget.iOS)
if not args.test_tvos_host:
platforms_to_skip_test_host.add(StdlibDeploymentTarget.AppleTV)
if not args.test_watchos_host:
platforms_to_skip_test_host.add(StdlibDeploymentTarget.AppleWatch)
return platforms_to_skip_test_host
| apache-2.0 | -7,912,246,868,906,289,000 | 46.068441 | 79 | 0.578803 | false |
openhatch/oh-mainline | vendor/packages/docutils/test/test_parsers/test_rst/test_tables.py | 16 | 37488 | #! /usr/bin/env python
# $Id: test_tables.py 7313 2012-01-11 20:28:57Z milde $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
Tests for states.py.
"""
import os
from __init__ import DocutilsTestSupport
def suite():
s = DocutilsTestSupport.ParserTestSuite()
s.generateTests(totest)
return s
mydir = 'test_parsers/test_rst/'
include2 = os.path.join(mydir, 'test_directives/include2.txt')
totest = {}
totest['grid_tables'] = [
["""\
+-------------------------------------+
| A table with one cell and one line. |
+-------------------------------------+
""",
"""\
<document source="test data">
<table>
<tgroup cols="1">
<colspec colwidth="37">
<tbody>
<row>
<entry>
<paragraph>
A table with one cell and one line.
"""],
["""\
+-----------------------+
| A table with one cell |
| and two lines. |
+-----------------------+
""",
"""\
<document source="test data">
<table>
<tgroup cols="1">
<colspec colwidth="23">
<tbody>
<row>
<entry>
<paragraph>
A table with one cell
and two lines.
"""],
["""\
+-----------------------+
| A malformed table. |
+-----------------------+
""",
"""\
<document source="test data">
<system_message level="3" line="1" source="test data" type="ERROR">
<paragraph>
Malformed table.
<literal_block xml:space="preserve">
+-----------------------+
| A malformed table. |
+-----------------------+
"""],
["""\
+------------------------+
| A well-formed | table. |
+------------------------+
+------------------------+
| This +----------+ too! |
+------------------------+
""",
"""\
<document source="test data">
<table>
<tgroup cols="1">
<colspec colwidth="24">
<tbody>
<row>
<entry>
<paragraph>
A well-formed | table.
<table>
<tgroup cols="1">
<colspec colwidth="24">
<tbody>
<row>
<entry>
<paragraph>
This +----------+ too!
"""],
["""\
+--------------+--------------+
| A table with | two columns. |
+--------------+--------------+
""",
"""\
<document source="test data">
<table>
<tgroup cols="2">
<colspec colwidth="14">
<colspec colwidth="14">
<tbody>
<row>
<entry>
<paragraph>
A table with
<entry>
<paragraph>
two columns.
"""],
["""\
+--------------+
| A table with |
+--------------+
| two rows. |
+--------------+
""",
"""\
<document source="test data">
<table>
<tgroup cols="1">
<colspec colwidth="14">
<tbody>
<row>
<entry>
<paragraph>
A table with
<row>
<entry>
<paragraph>
two rows.
"""],
["""\
+--------------+-------------+
| A table with | two columns |
+--------------+-------------+
| and | two rows. |
+--------------+-------------+
""",
"""\
<document source="test data">
<table>
<tgroup cols="2">
<colspec colwidth="14">
<colspec colwidth="13">
<tbody>
<row>
<entry>
<paragraph>
A table with
<entry>
<paragraph>
two columns
<row>
<entry>
<paragraph>
and
<entry>
<paragraph>
two rows.
"""],
["""\
+--------------+---------------+
| A table with | two columns, |
+--------------+---------------+
| two rows, and a column span. |
+------------------------------+
""",
"""\
<document source="test data">
<table>
<tgroup cols="2">
<colspec colwidth="14">
<colspec colwidth="15">
<tbody>
<row>
<entry>
<paragraph>
A table with
<entry>
<paragraph>
two columns,
<row>
<entry morecols="1">
<paragraph>
two rows, and a column span.
"""],
["""\
+--------------------------+
| A table with three rows, |
+------------+-------------+
| and two | columns. |
+------------+-------------+
| First and last rows |
| contains column spans. |
+--------------------------+
""",
"""\
<document source="test data">
<table>
<tgroup cols="2">
<colspec colwidth="12">
<colspec colwidth="13">
<tbody>
<row>
<entry morecols="1">
<paragraph>
A table with three rows,
<row>
<entry>
<paragraph>
and two
<entry>
<paragraph>
columns.
<row>
<entry morecols="1">
<paragraph>
First and last rows
contains column spans.
"""],
["""\
+--------------+--------------+
| A table with | two columns, |
+--------------+ and a row |
| two rows, | span. |
+--------------+--------------+
""",
"""\
<document source="test data">
<table>
<tgroup cols="2">
<colspec colwidth="14">
<colspec colwidth="14">
<tbody>
<row>
<entry>
<paragraph>
A table with
<entry morerows="1">
<paragraph>
two columns,
and a row
span.
<row>
<entry>
<paragraph>
two rows,
"""],
["""\
+------------+-------------+---------------+
| A table | two rows in | and row spans |
| with three +-------------+ to left and |
| columns, | the middle, | right. |
+------------+-------------+---------------+
""",
"""\
<document source="test data">
<table>
<tgroup cols="3">
<colspec colwidth="12">
<colspec colwidth="13">
<colspec colwidth="15">
<tbody>
<row>
<entry morerows="1">
<paragraph>
A table
with three
columns,
<entry>
<paragraph>
two rows in
<entry morerows="1">
<paragraph>
and row spans
to left and
right.
<row>
<entry>
<paragraph>
the middle,
"""],
["""\
Complex spanning pattern (no edge knows all rows/cols):
+-----------+-------------------------+
| W/NW cell | N/NE cell |
| +-------------+-----------+
| | Middle cell | E/SE cell |
+-----------+-------------+ |
| S/SE cell | |
+-------------------------+-----------+
""",
"""\
<document source="test data">
<paragraph>
Complex spanning pattern (no edge knows all rows/cols):
<table>
<tgroup cols="3">
<colspec colwidth="11">
<colspec colwidth="13">
<colspec colwidth="11">
<tbody>
<row>
<entry morerows="1">
<paragraph>
W/NW cell
<entry morecols="1">
<paragraph>
N/NE cell
<row>
<entry>
<paragraph>
Middle cell
<entry morerows="1">
<paragraph>
E/SE cell
<row>
<entry morecols="1">
<paragraph>
S/SE cell
"""],
["""\
+------------------------+------------+----------+----------+
| Header row, column 1 | Header 2 | Header 3 | Header 4 |
+========================+============+==========+==========+
| body row 1, column 1 | column 2 | column 3 | column 4 |
+------------------------+------------+----------+----------+
| body row 2 | Cells may span columns. |
+------------------------+------------+---------------------+
| body row 3 | Cells may | - Table cells |
+------------------------+ span rows. | - contain |
| body row 4 | | - body elements. |
+------------------------+------------+---------------------+
""",
"""\
<document source="test data">
<table>
<tgroup cols="4">
<colspec colwidth="24">
<colspec colwidth="12">
<colspec colwidth="10">
<colspec colwidth="10">
<thead>
<row>
<entry>
<paragraph>
Header row, column 1
<entry>
<paragraph>
Header 2
<entry>
<paragraph>
Header 3
<entry>
<paragraph>
Header 4
<tbody>
<row>
<entry>
<paragraph>
body row 1, column 1
<entry>
<paragraph>
column 2
<entry>
<paragraph>
column 3
<entry>
<paragraph>
column 4
<row>
<entry>
<paragraph>
body row 2
<entry morecols="2">
<paragraph>
Cells may span columns.
<row>
<entry>
<paragraph>
body row 3
<entry morerows="1">
<paragraph>
Cells may
span rows.
<entry morecols="1" morerows="1">
<bullet_list bullet="-">
<list_item>
<paragraph>
Table cells
<list_item>
<paragraph>
contain
<list_item>
<paragraph>
body elements.
<row>
<entry>
<paragraph>
body row 4
"""],
["""\
+-----------------+--------+
| A simple table | cell 2 |
+-----------------+--------+
| cell 3 | cell 4 |
+-----------------+--------+
No blank line after table.
""",
"""\
<document source="test data">
<table>
<tgroup cols="2">
<colspec colwidth="17">
<colspec colwidth="8">
<tbody>
<row>
<entry>
<paragraph>
A simple table
<entry>
<paragraph>
cell 2
<row>
<entry>
<paragraph>
cell 3
<entry>
<paragraph>
cell 4
<system_message level="2" line="6" source="test data" type="WARNING">
<paragraph>
Blank line required after table.
<paragraph>
No blank line after table.
"""],
["""\
+-----------------+--------+
| A simple table | cell 2 |
+-----------------+--------+
| cell 3 | cell 4 |
+-----------------+--------+
Unexpected indent and no blank line after table.
""",
"""\
<document source="test data">
<table>
<tgroup cols="2">
<colspec colwidth="17">
<colspec colwidth="8">
<tbody>
<row>
<entry>
<paragraph>
A simple table
<entry>
<paragraph>
cell 2
<row>
<entry>
<paragraph>
cell 3
<entry>
<paragraph>
cell 4
<system_message level="3" line="6" source="test data" type="ERROR">
<paragraph>
Unexpected indentation.
<system_message level="2" line="6" source="test data" type="WARNING">
<paragraph>
Blank line required after table.
<block_quote>
<paragraph>
Unexpected indent and no blank line after table.
"""],
["""\
+--------------+-------------+
| A bad table. | |
+--------------+ |
| Cells must be rectangles. |
+----------------------------+
""",
"""\
<document source="test data">
<system_message level="3" line="1" source="test data" type="ERROR">
<paragraph>
Malformed table.
Malformed table; parse incomplete.
<literal_block xml:space="preserve">
+--------------+-------------+
| A bad table. | |
+--------------+ |
| Cells must be rectangles. |
+----------------------------+
"""],
["""\
+------------------------------+
| This table contains another. |
| |
| +-------------------------+ |
| | A table within a table. | |
| +-------------------------+ |
+------------------------------+
""",
"""\
<document source="test data">
<table>
<tgroup cols="1">
<colspec colwidth="30">
<tbody>
<row>
<entry>
<paragraph>
This table contains another.
<table>
<tgroup cols="1">
<colspec colwidth="25">
<tbody>
<row>
<entry>
<paragraph>
A table within a table.
"""],
["""\
+------------------+--------+
| A simple table | |
+------------------+--------+
| with empty cells | |
+------------------+--------+
""",
"""\
<document source="test data">
<table>
<tgroup cols="2">
<colspec colwidth="18">
<colspec colwidth="8">
<tbody>
<row>
<entry>
<paragraph>
A simple table
<entry>
<row>
<entry>
<paragraph>
with empty cells
<entry>
"""],
[("""\
+------------------------------------------------------------------------------+
| .. include:: |
%s
+------------------------------------------------------------------------------+
| (The first cell of this table may expand |
| to accommodate long filesystem paths.) |
+------------------------------------------------------------------------------+
""") % ('\n'.join(['| %-70s |' % include2[part * 70 : (part + 1) * 70]
for part in range(len(include2) // 70 + 1)])),
"""\
<document source="test data">
<table>
<tgroup cols="1">
<colspec colwidth="78">
<tbody>
<row>
<entry>
<paragraph>
Here are some paragraphs
that can appear at any level.
<paragraph>
This file (include2.txt) is used by
<literal>
test_include.py
.
<row>
<entry>
<paragraph>
(The first cell of this table may expand
to accommodate long filesystem paths.)
"""],
[("""\
Something before.
+------------------------------------------------------------------------------+
| .. include:: |
%s
+------------------------------------------------------------------------------+
Something afterwards.
And more.
""") % ('\n'.join(['| %-70s |' % include2[part * 70 : (part + 1) * 70]
for part in range(len(include2) // 70 + 1)])),
"""\
<document source="test data">
<paragraph>
Something before.
<table>
<tgroup cols="1">
<colspec colwidth="78">
<tbody>
<row>
<entry>
<paragraph>
Here are some paragraphs
that can appear at any level.
<paragraph>
This file (include2.txt) is used by
<literal>
test_include.py
.
<paragraph>
Something afterwards.
<paragraph>
And more.
"""],
]
totest['simple_tables'] = [
["""\
============ ============
A table with two columns.
============ ============
Paragraph.
""",
"""\
<document source="test data">
<table>
<tgroup cols="2">
<colspec colwidth="12">
<colspec colwidth="12">
<tbody>
<row>
<entry>
<paragraph>
A table with
<entry>
<paragraph>
two columns.
<paragraph>
Paragraph.
"""],
["""\
============ ============
A table with two columns
and two rows.
============ ============
""",
"""\
<document source="test data">
<table>
<tgroup cols="2">
<colspec colwidth="12">
<colspec colwidth="12">
<tbody>
<row>
<entry>
<paragraph>
A table with
<entry>
<paragraph>
two columns
<row>
<entry>
<paragraph>
and
<entry>
<paragraph>
two rows.
"""],
["""\
============ ==============
A table with two columns,
two rows, and a column span.
============================
""",
"""\
<document source="test data">
<table>
<tgroup cols="2">
<colspec colwidth="12">
<colspec colwidth="14">
<tbody>
<row>
<entry>
<paragraph>
A table with
<entry>
<paragraph>
two columns,
<row>
<entry morecols="1">
<paragraph>
two rows, and a column span.
"""],
["""\
== =========== ===========
1 A table with three rows,
-- ------------------------
2 and three columns.
3 First and third rows
contain column spans.
This row is a multi-line row, and overflows to the right.
-- ------------------------
4 One last row.
== =========== ===========
""",
"""\
<document source="test data">
<table>
<tgroup cols="3">
<colspec colwidth="2">
<colspec colwidth="11">
<colspec colwidth="44">
<tbody>
<row>
<entry>
<paragraph>
1
<entry morecols="1">
<paragraph>
A table with three rows,
<row>
<entry>
<paragraph>
2
<entry>
<paragraph>
and three
<entry>
<paragraph>
columns.
<row>
<entry>
<paragraph>
3
<entry morecols="1">
<paragraph>
First and third rows
contain column spans.
<paragraph>
This row is a multi-line row, and overflows to the right.
<row>
<entry>
<paragraph>
4
<entry>
<paragraph>
One last
<entry>
<paragraph>
row.
"""],
["""\
======= ========= ========
A table with three columns.
================== ========
""",
"""\
<document source="test data">
<table>
<tgroup cols="3">
<colspec colwidth="7">
<colspec colwidth="9">
<colspec colwidth="8">
<tbody>
<row>
<entry morecols="1">
<paragraph>
A table with three
<entry>
<paragraph>
columns.
"""],
["""\
============== ======
A simple table with
no bottom border
""",
"""\
<document source="test data">
<system_message level="3" line="1" source="test data" type="ERROR">
<paragraph>
Malformed table.
No bottom table border found.
<literal_block xml:space="preserve">
============== ======
A simple table with
no bottom border
"""],
["""\
============== ======
A simple table cell 2
cell 3 cell 4
============== ======
No blank line after table.
""",
"""\
<document source="test data">
<system_message level="3" line="1" source="test data" type="ERROR">
<paragraph>
Malformed table.
No bottom table border found or no blank line after table bottom.
<literal_block xml:space="preserve">
============== ======
A simple table cell 2
cell 3 cell 4
============== ======
<system_message level="2" line="5" source="test data" type="WARNING">
<paragraph>
Blank line required after table.
<paragraph>
No blank line after table.
"""],
["""\
============== ======
A simple table cell 2
============== ======
cell 3 cell 4
============== ======
No blank line after table.
""",
"""\
<document source="test data">
<table>
<tgroup cols="2">
<colspec colwidth="14">
<colspec colwidth="6">
<thead>
<row>
<entry>
<paragraph>
A simple table
<entry>
<paragraph>
cell 2
<tbody>
<row>
<entry>
<paragraph>
cell 3
<entry>
<paragraph>
cell 4
<system_message level="2" line="6" source="test data" type="WARNING">
<paragraph>
Blank line required after table.
<paragraph>
No blank line after table.
"""],
["""\
============== ======
A simple table cell 2
cell 3 cell 4
============== ======
Unexpected indent and no blank line after table.
""",
"""\
<document source="test data">
<system_message level="3" line="1" source="test data" type="ERROR">
<paragraph>
Malformed table.
No bottom table border found or no blank line after table bottom.
<literal_block xml:space="preserve">
============== ======
A simple table cell 2
cell 3 cell 4
============== ======
<system_message level="2" line="5" source="test data" type="WARNING">
<paragraph>
Blank line required after table.
<block_quote>
<paragraph>
Unexpected indent and no blank line after table.
"""],
["""\
============== ======
A bad table cell 2
cell 3 cell 4
============ ========
""",
"""\
<document source="test data">
<system_message level="3" line="4" source="test data" type="ERROR">
<paragraph>
Malformed table.
Column span alignment problem in table line 4.
<literal_block xml:space="preserve">
============== ======
A bad table cell 2
cell 3 cell 4
============ ========
"""],
["""\
======== =========
A bad table cell 2
cell 3 cell 4
======== =========
""",
"""\
<document source="test data">
<system_message level="3" line="2" source="test data" type="ERROR">
<paragraph>
Malformed table.
Text in column margin in table line 2.
<literal_block xml:space="preserve">
======== =========
A bad table cell 2
cell 3 cell 4
======== =========
"""],
["""\
== ============================
1 This table contains another.
2 ======= ====== ========
A table within a table.
======= ====== ========
The outer table does have to
have at least two columns
though.
== ============================
""",
"""\
<document source="test data">
<table>
<tgroup cols="2">
<colspec colwidth="2">
<colspec colwidth="28">
<tbody>
<row>
<entry>
<paragraph>
1
<entry>
<paragraph>
This table contains another.
<row>
<entry>
<paragraph>
2
<entry>
<table>
<tgroup cols="3">
<colspec colwidth="7">
<colspec colwidth="6">
<colspec colwidth="8">
<tbody>
<row>
<entry>
<paragraph>
A table
<entry>
<paragraph>
within
<entry>
<paragraph>
a table.
<paragraph>
The outer table does have to
have at least two columns
though.
"""],
["""\
================ ======
A simple table
with empty cells
================ ======
""",
"""\
<document source="test data">
<table>
<tgroup cols="2">
<colspec colwidth="16">
<colspec colwidth="6">
<tbody>
<row>
<entry>
<paragraph>
A simple table
<entry>
<row>
<entry>
<paragraph>
with empty cells
<entry>
"""],
["""\
============== ========
A table with
============== ========
centered cells.
============== ========
""",
"""\
<document source="test data">
<table>
<tgroup cols="2">
<colspec colwidth="14">
<colspec colwidth="8">
<thead>
<row>
<entry>
<paragraph>
A table
<entry>
<paragraph>
with
<tbody>
<row>
<entry>
<paragraph>
centered
<entry>
<paragraph>
cells.
"""],
["""\
============== ======
A simple table this text extends to the right
cell 3 the bottom border below is too long
============== ========
""",
"""\
<document source="test data">
<system_message level="3" line="1" source="test data" type="ERROR">
<paragraph>
Malformed table.
Bottom/header table border does not match top border.
<literal_block xml:space="preserve">
============== ======
A simple table this text extends to the right
cell 3 the bottom border below is too long
============== ========
"""],
["""\
============ =================
A table with row separators.
------------ -----------------
Blank line before.
------------ -----------------
Blank lines before and after.
------------ -----------------
Blank line after.
============ =================
""",
"""\
<document source="test data">
<table>
<tgroup cols="2">
<colspec colwidth="12">
<colspec colwidth="17">
<tbody>
<row>
<entry>
<paragraph>
A table with
<entry>
<paragraph>
row separators.
<row>
<entry>
<paragraph>
Blank line
<entry>
<paragraph>
before.
<row>
<entry>
<paragraph>
Blank lines
<entry>
<paragraph>
before and after.
<row>
<entry>
<paragraph>
Blank line
<entry>
<paragraph>
after.
"""],
["""\
============ ====================
A table with many row separators.
------------ --------------------
------------ --------------------
------------ --------------------
============ ====================
""",
"""\
<document source="test data">
<table>
<tgroup cols="2">
<colspec colwidth="12">
<colspec colwidth="20">
<tbody>
<row>
<entry>
<paragraph>
A table with
<entry>
<paragraph>
many row separators.
<row>
<entry>
<entry>
<row>
<entry>
<entry>
<row>
<entry>
<entry>
"""],
["""\
== =========== ===========
1 Span columns 2 & 3
-- ------------------------
2 Span columns 2 & 3
------------------------
3
== =========== ===========
== =========== ===========
1 Span cols 1&2 but not 3
--------------- -----------
2 Span cols 1&2 but not 3
---------------
3 no spans here
== =========== ===========
== =========== ===========
1 Not a span Not a span
----------- -----------
2
== =========== ===========
""",
"""\
<document source="test data">
<system_message level="3" line="4" source="test data" type="ERROR">
<paragraph>
Malformed table.
Text in column margin in table line 4.
<literal_block xml:space="preserve">
== =========== ===========
1 Span columns 2 & 3
-- ------------------------
2 Span columns 2 & 3
------------------------
3
== =========== ===========
<system_message level="3" line="13" source="test data" type="ERROR">
<paragraph>
Malformed table.
Column span incomplete in table line 5.
<literal_block xml:space="preserve">
== =========== ===========
1 Span cols 1&2 but not 3
--------------- -----------
2 Span cols 1&2 but not 3
---------------
3 no spans here
== =========== ===========
<table>
<tgroup cols="3">
<colspec colwidth="2">
<colspec colwidth="11">
<colspec colwidth="11">
<tbody>
<row>
<entry>
<paragraph>
1
<entry>
<system_message level="4" line="19" source="test data" type="SEVERE">
<paragraph>
Unexpected section title.
<literal_block xml:space="preserve">
Not a span
-----------
<entry>
<system_message level="4" line="19" source="test data" type="SEVERE">
<paragraph>
Unexpected section title.
<literal_block xml:space="preserve">
Not a span
-----------
<row>
<entry>
<paragraph>
2
<entry>
<entry>
"""],
["""\
========= =====================================================================
Inclusion .. include::
%s
Note The first row of this table may expand
to accommodate long filesystem paths.
========= =====================================================================
""" % ('\n'.join([' %-65s' % include2[part * 65 : (part + 1) * 65]
for part in range(len(include2) // 65 + 1)])),
"""\
<document source="test data">
<table>
<tgroup cols="2">
<colspec colwidth="9">
<colspec colwidth="69">
<tbody>
<row>
<entry>
<paragraph>
Inclusion
<entry>
<paragraph>
Here are some paragraphs
that can appear at any level.
<paragraph>
This file (include2.txt) is used by
<literal>
test_include.py
.
<row>
<entry>
<paragraph>
Note
<entry>
<paragraph>
The first row of this table may expand
to accommodate long filesystem paths.
"""],
]
if __name__ == '__main__':
import unittest
unittest.main(defaultTest='suite')
| agpl-3.0 | 3,214,507,580,240,908,300 | 28.776013 | 93 | 0.311886 | false |
palmerjh/iEBE | EBE-Node/superMC/translate.py | 8 | 3447 | #! /usr/bin/env python
# This script translates a list of arguments into one value specified by a rule file. Usage:
# translate ruleFilename key1 key2 ...
# It prints out the value corresponds to [key1, key2, ...] from a dictionary read from ruleFilename.
# To see how the dictionary is generated, see readRules function.
from sys import exit, argv
def processOneLine(aLine, level_indicator="+", key_separator=":", commentSymbol="#"):
"""
Return [level, keys_list, value] list from string aLine.
level is indicated by how many successive level_indicators are there to the left, key and value are separated by key_separator.
"""
# take care of comments:
if commentSymbol in aLine:
aLine = aLine[:aLine.index(commentSymbol)].strip();
# if it's an empty line:
aLine = aLine.strip()
if aLine=="": return []
# check if syntax is correct:
if key_separator not in aLine:
print("translate.processOneLine error: key-value separator "+key_separator+" not included in the line \n"+aLine)
exit(-1)
# get level
level = 0
for i in range(len(aLine)):
if aLine[i]==level_indicator:
level = level + 1
else:
aLine = aLine[i:]
break
# separate key and value
components = aLine.split(key_separator);
keys_list = [x.strip() for x in components[:-1]];
value = components[-1].strip();
# finally...
return [level, keys_list, value]
def readRules(buffer,level_indicator="+", key_separator=":", commentSymbol="#"):
"""
Process the text buffer to get the rule used for translations, line by line. Each line will be transferred into one entry in a rule dictionary. The dictionary will then be returned. The rule dictionary is generated using all the list of all strings between key_separators except the last one as the key, and the last one as value.
For example,
a : b: 1 # comments
will be translates into entry ["a","b"]:"1"
To ease the pain for repeated common keys, a level_indicator can be used to indicate how may shared keys the current line inherits from previous lines. The number of level_indicator means the number of keys the current line should inherit (starts from left) from previous lines.
For example, if the text buffer looks like:
z : 1
+ a : 2
++ b : 3
+ d: 4
The rule dictionary will contain:
("z") : "1"
("z", "a") : 2
("z", "a", "b") : 3
("z", "d") : 4
Note that the following
z : 1
++ a : 2
will raise an error.
"""
D = {}
accumulated_keys = [];
for aLine in buffer:
tmp_result = processOneLine(aLine)
if not tmp_result: continue
level, keys, value = tmp_result
if level>len(accumulated_keys):
print("translates.readRules error: two many "+level_indicator+" signs in the line\n"+aLine)
exit(-1)
else:
accumulated_keys = accumulated_keys[:level]
accumulated_keys.extend(keys)
D[tuple(accumulated_keys)] = value
return D
def translate(ruleFilename, keys_list):
"""
Translate keys_list into the correponding value given in the dictionary generated from ruleFilename using readRules function.
"""
D = readRules(ruleFilename)
result = ""
for ii in range(len(keys_list)): result+=" "+(D[tuple(keys_list[:ii+1])])
return result
if __name__ == '__main__':
if len(argv)<3:
print("Usage: translate ruleFilename key1 key2 ...")
exit(-1)
else:
print(translate(file(argv[1]).readlines(),argv[2:]))
| gpl-3.0 | 5,394,030,001,868,859,000 | 34.536082 | 334 | 0.668988 | false |
amenonsen/ansible | lib/ansible/modules/storage/netapp/na_ontap_igroup_initiator.py | 21 | 6258 | #!/usr/bin/python
''' This is an Ansible module for ONTAP, to manage initiators in an Igroup
(c) 2019, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
'''
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
module: na_ontap_igroup_initiator
short_description: NetApp ONTAP igroup initiator configuration
extends_documentation_fragment:
- netapp.na_ontap
version_added: '2.8'
author: NetApp Ansible Team (@carchi8py) <[email protected]>
description:
- Add/Remove initiators from an igroup
options:
state:
description:
- Whether the specified initiator should exist or not in an igroup.
choices: ['present', 'absent']
default: present
names:
description:
- List of initiators to manage.
required: true
aliases:
- name
initiator_group:
description:
- Name of the initiator group to which the initiator belongs.
required: true
vserver:
description:
- The name of the vserver to use.
required: true
'''
EXAMPLES = '''
- name: Add initiators to an igroup
na_ontap_igroup_initiator:
names: abc.test:def.com,def.test:efg.com
initiator_group: test_group
vserver: ansibleVServer
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
- name: Remove an initiator from an igroup
na_ontap_igroup_initiator:
state: absent
names: abc.test:def.com
initiator_group: test_group
vserver: ansibleVServer
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
'''
RETURN = '''
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
from ansible.module_utils.netapp_module import NetAppModule
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppOntapIgroupInitiator(object):
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, choices=['present', 'absent'], default='present'),
names=dict(required=True, type='list', aliases=['name']),
initiator_group=dict(required=True, type='str'),
vserver=dict(required=True, type='str'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
def get_initiators(self):
"""
Get the existing list of initiators from an igroup
:rtype: list() or None
"""
igroup_info = netapp_utils.zapi.NaElement('igroup-get-iter')
attributes = dict(query={'initiator-group-info': {'initiator-group-name': self.parameters['initiator_group'],
'vserver': self.parameters['vserver']}})
igroup_info.translate_struct(attributes)
result, current = None, []
try:
result = self.server.invoke_successfully(igroup_info, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error fetching igroup info %s: %s' % (self.parameters['initiator_group'],
to_native(error)),
exception=traceback.format_exc())
if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
igroup_info = result.get_child_by_name('attributes-list').get_child_by_name('initiator-group-info')
if igroup_info.get_child_by_name('initiators') is not None:
current = [initiator['initiator-name'] for initiator in igroup_info['initiators'].get_children()]
return current
def modify_initiator(self, initiator_name, zapi):
"""
Add or remove an initiator to/from an igroup
"""
options = {'initiator-group-name': self.parameters['initiator_group'],
'initiator': initiator_name}
initiator_modify = netapp_utils.zapi.NaElement.create_node_with_children(zapi, **options)
try:
self.server.invoke_successfully(initiator_modify, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error modifying igroup initiator %s: %s' % (initiator_name,
to_native(error)),
exception=traceback.format_exc())
def autosupport_log(self):
netapp_utils.ems_log_event("na_ontap_igroup_initiator", self.server)
def apply(self):
self.autosupport_log()
initiators = self.get_initiators()
for initiator in self.parameters['names']:
present = None
if initiator in initiators:
present = True
cd_action = self.na_helper.get_cd_action(present, self.parameters)
if self.na_helper.changed:
if self.module.check_mode:
pass
else:
if cd_action == 'create':
self.modify_initiator(initiator, 'igroup-add')
elif cd_action == 'delete':
self.modify_initiator(initiator, 'igroup-remove')
self.module.exit_json(changed=self.na_helper.changed)
def main():
obj = NetAppOntapIgroupInitiator()
obj.apply()
if __name__ == '__main__':
main()
| gpl-3.0 | -1,394,123,362,881,007,400 | 33.196721 | 117 | 0.605944 | false |
efiop/dvc | dvc/repo/reproduce.py | 1 | 8232 | import logging
import typing
from functools import partial
from dvc.exceptions import DvcException, ReproductionError
from dvc.repo.scm_context import scm_context
from . import locked
if typing.TYPE_CHECKING:
from . import Repo
logger = logging.getLogger(__name__)
def _reproduce_stage(stage, **kwargs):
def _run_callback(repro_callback):
_dump_stage(stage)
_track_stage(stage)
repro_callback([stage])
checkpoint_func = kwargs.pop("checkpoint_func", None)
if stage.is_checkpoint:
if checkpoint_func:
kwargs["checkpoint_func"] = partial(_run_callback, checkpoint_func)
else:
raise DvcException(
"Checkpoint stages are not supported in 'dvc repro'. "
"Checkpoint stages must be reproduced with 'dvc exp run' "
"or 'dvc exp resume'."
)
if stage.frozen and not stage.is_import:
logger.warning(
"{} is frozen. Its dependencies are"
" not going to be reproduced.".format(stage)
)
stage = stage.reproduce(**kwargs)
if not stage:
return []
if not kwargs.get("dry", False):
track = checkpoint_func is not None
_dump_stage(stage)
if track:
_track_stage(stage)
return [stage]
def _dump_stage(stage):
from ..dvcfile import Dvcfile
dvcfile = Dvcfile(stage.repo, stage.path)
dvcfile.dump(stage, update_pipeline=False)
def _track_stage(stage):
from dvc.utils import relpath
stage.repo.scm.track_file(stage.dvcfile.relpath)
for dep in stage.deps:
if (
not dep.use_scm_ignore
and dep.is_in_repo
and not stage.repo.repo_fs.isdvc(dep.path_info)
):
stage.repo.scm.track_file(relpath(dep.path_info))
for out in stage.outs:
if not out.use_scm_ignore and out.is_in_repo:
stage.repo.scm.track_file(relpath(out.path_info))
if out.live:
from dvc.repo.live import summary_path_info
summary = summary_path_info(out)
if summary:
stage.repo.scm.track_file(relpath(summary))
stage.repo.scm.track_changed_files()
@locked
@scm_context
def reproduce(
self: "Repo",
targets=None,
recursive=False,
pipeline=False,
all_pipelines=False,
**kwargs,
):
from .graph import get_pipeline, get_pipelines
glob = kwargs.pop("glob", False)
accept_group = not glob
if isinstance(targets, str):
targets = [targets]
if not all_pipelines and not targets:
from dvc.dvcfile import PIPELINE_FILE
targets = [PIPELINE_FILE]
interactive = kwargs.get("interactive", False)
if not interactive:
kwargs["interactive"] = self.config["core"].get("interactive", False)
stages = set()
if pipeline or all_pipelines:
pipelines = get_pipelines(self.graph)
if all_pipelines:
used_pipelines = pipelines
else:
used_pipelines = []
for target in targets:
stage = self.stage.get_target(target)
used_pipelines.append(get_pipeline(pipelines, stage))
for pline in used_pipelines:
for stage in pline:
if pline.in_degree(stage) == 0:
stages.add(stage)
else:
for target in targets:
stages.update(
self.stage.collect(
target,
recursive=recursive,
accept_group=accept_group,
glob=glob,
)
)
return _reproduce_stages(self.graph, list(stages), **kwargs)
def _reproduce_stages(
G, stages, downstream=False, single_item=False, on_unchanged=None, **kwargs
):
r"""Derive the evaluation of the given node for the given graph.
When you _reproduce a stage_, you want to _evaluate the descendants_
to know if it make sense to _recompute_ it. A post-ordered search
will give us an order list of the nodes we want.
For example, let's say that we have the following pipeline:
E
/ \
D F
/ \ \
B C G
\ /
A
The derived evaluation of D would be: [A, B, C, D]
In case that `downstream` option is specified, the desired effect
is to derive the evaluation starting from the given stage up to the
ancestors. However, the `networkx.ancestors` returns a set, without
any guarantee of any order, so we are going to reverse the graph and
use a reverse post-ordered search using the given stage as a starting
point.
E A
/ \ / \
D F B C G
/ \ \ --- reverse --> \ / /
B C G D F
\ / \ /
A E
The derived evaluation of _downstream_ B would be: [B, D, E]
"""
steps = _get_steps(G, stages, downstream, single_item)
force_downstream = kwargs.pop("force_downstream", False)
result = []
unchanged = []
# `ret` is used to add a cosmetic newline.
ret = []
checkpoint_func = kwargs.pop("checkpoint_func", None)
for stage in steps:
if ret:
logger.info("")
if checkpoint_func:
kwargs["checkpoint_func"] = partial(
_repro_callback, checkpoint_func, unchanged
)
from dvc.stage.monitor import CheckpointKilledError
try:
ret = _reproduce_stage(stage, **kwargs)
if len(ret) == 0:
unchanged.extend([stage])
elif force_downstream:
# NOTE: we are walking our pipeline from the top to the
# bottom. If one stage is changed, it will be reproduced,
# which tells us that we should force reproducing all of
# the other stages down below, even if their direct
# dependencies didn't change.
kwargs["force"] = True
if ret:
result.extend(ret)
except CheckpointKilledError:
raise
except Exception as exc:
raise ReproductionError(stage.relpath) from exc
if on_unchanged is not None:
on_unchanged(unchanged)
return result
def _get_steps(G, stages, downstream, single_item):
import networkx as nx
active = G.copy()
if not single_item:
# NOTE: frozen stages don't matter for single_item
for stage in G:
if stage.frozen:
# NOTE: disconnect frozen stage from its dependencies
active.remove_edges_from(G.out_edges(stage))
all_pipelines = []
for stage in stages:
if downstream:
# NOTE (py3 only):
# Python's `deepcopy` defaults to pickle/unpickle the object.
# Stages are complex objects (with references to `repo`,
# `outs`, and `deps`) that cause struggles when you try
# to serialize them. We need to create a copy of the graph
# itself, and then reverse it, instead of using
# graph.reverse() directly because it calls `deepcopy`
# underneath -- unless copy=False is specified.
nodes = nx.dfs_postorder_nodes(active.reverse(copy=False), stage)
all_pipelines += reversed(list(nodes))
else:
all_pipelines += nx.dfs_postorder_nodes(active, stage)
steps = []
for stage in all_pipelines:
if stage not in steps:
# NOTE: order of steps still matters for single_item
if single_item and stage not in stages:
continue
steps.append(stage)
return steps
def _repro_callback(experiments_callback, unchanged, stages):
experiments_callback(unchanged, stages)
| apache-2.0 | 2,688,902,925,146,140,000 | 30.661538 | 79 | 0.557094 | false |
Marketing1by1/petl | petl/test/io/test_csv.py | 3 | 7619 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
from tempfile import NamedTemporaryFile
import gzip
import os
import logging
from petl.compat import PY2
from petl.test.helpers import ieq, eq_
from petl.io.csv import fromcsv, fromtsv, tocsv, appendcsv, totsv, appendtsv
logger = logging.getLogger(__name__)
debug = logger.debug
def test_fromcsv():
data = [b'foo,bar',
b'a,1',
b'b,2',
b'c,2']
f = NamedTemporaryFile(mode='wb', delete=False)
f.write(b'\n'.join(data))
f.close()
expect = (('foo', 'bar'),
('a', '1'),
('b', '2'),
('c', '2'))
actual = fromcsv(f.name, encoding='ascii')
debug(actual)
ieq(expect, actual)
ieq(expect, actual) # verify can iterate twice
def test_fromcsv_lineterminators():
data = [b'foo,bar',
b'a,1',
b'b,2',
b'c,2']
expect = (('foo', 'bar'),
('a', '1'),
('b', '2'),
('c', '2'))
for lt in b'\r', b'\n', b'\r\n':
debug(repr(lt))
f = NamedTemporaryFile(mode='wb', delete=False)
f.write(lt.join(data))
f.close()
with open(f.name, 'rb') as g:
debug(repr(g.read()))
actual = fromcsv(f.name, encoding='ascii')
debug(actual)
ieq(expect, actual)
def test_fromcsv_quoted():
import csv
data = [b'"foo","bar"',
b'"a",1',
b'"b",2',
b'"c",2']
f = NamedTemporaryFile(mode='wb', delete=False)
f.write(b'\n'.join(data))
f.close()
expect = (('foo', 'bar'),
('a', 1),
('b', 2),
('c', 2))
actual = fromcsv(f.name, quoting=csv.QUOTE_NONNUMERIC)
debug(actual)
ieq(expect, actual)
ieq(expect, actual) # verify can iterate twice
def test_fromtsv():
data = [b'foo\tbar',
b'a\t1',
b'b\t2',
b'c\t2']
f = NamedTemporaryFile(mode='wb', delete=False)
f.write(b'\n'.join(data))
f.close()
expect = (('foo', 'bar'),
('a', '1'),
('b', '2'),
('c', '2'))
actual = fromtsv(f.name, encoding='ascii')
ieq(expect, actual)
ieq(expect, actual) # verify can iterate twice
def test_fromcsv_gz():
data = [b'foo,bar',
b'a,1',
b'b,2',
b'c,2']
expect = (('foo', 'bar'),
('a', '1'),
('b', '2'),
('c', '2'))
# '\r' not supported in PY2 because universal newline mode is
# not supported by gzip module
if PY2:
lts = b'\n', b'\r\n'
else:
lts = b'\r', b'\n', b'\r\n'
for lt in lts:
f = NamedTemporaryFile(delete=False)
f.close()
fn = f.name + '.gz'
os.rename(f.name, fn)
fz = gzip.open(fn, 'wb')
fz.write(lt.join(data))
fz.close()
actual = fromcsv(fn, encoding='ascii')
ieq(expect, actual)
ieq(expect, actual) # verify can iterate twice
def test_tocsv_appendcsv():
# exercise function
table = (('foo', 'bar'),
('a', 1),
('b', 2),
('c', 2))
f = NamedTemporaryFile(delete=False)
f.close()
tocsv(table, f.name, encoding='ascii', lineterminator='\n')
# check what it did
with open(f.name, 'rb') as o:
data = [b'foo,bar',
b'a,1',
b'b,2',
b'c,2']
# don't forget final terminator
expect = b'\n'.join(data) + b'\n'
actual = o.read()
eq_(expect, actual)
# check appending
table2 = (('foo', 'bar'),
('d', 7),
('e', 9),
('f', 1))
appendcsv(table2, f.name, encoding='ascii', lineterminator='\n')
# check what it did
with open(f.name, 'rb') as o:
data = [b'foo,bar',
b'a,1',
b'b,2',
b'c,2',
b'd,7',
b'e,9',
b'f,1']
# don't forget final terminator
expect = b'\n'.join(data) + b'\n'
actual = o.read()
eq_(expect, actual)
def test_tocsv_noheader():
# check explicit no header
table = (('foo', 'bar'),
('a', 1),
('b', 2),
('c', 2))
f = NamedTemporaryFile(delete=False)
tocsv(table, f.name, encoding='ascii', lineterminator='\n',
write_header=False)
# check what it did
with open(f.name, 'rb') as o:
data = [b'a,1',
b'b,2',
b'c,2']
# don't forget final terminator
expect = b'\n'.join(data) + b'\n'
actual = o.read()
eq_(expect, actual)
def test_totsv_appendtsv():
# exercise function
table = (('foo', 'bar'),
('a', 1),
('b', 2),
('c', 2))
f = NamedTemporaryFile(delete=False)
f.close()
totsv(table, f.name, encoding='ascii', lineterminator='\n')
# check what it did
with open(f.name, 'rb') as o:
data = [b'foo\tbar',
b'a\t1',
b'b\t2',
b'c\t2']
# don't forget final terminator
expect = b'\n'.join(data) + b'\n'
actual = o.read()
eq_(expect, actual)
# check appending
table2 = (('foo', 'bar'),
('d', 7),
('e', 9),
('f', 1))
appendtsv(table2, f.name, encoding='ascii', lineterminator='\n')
# check what it did
with open(f.name, 'rb') as o:
data = [b'foo\tbar',
b'a\t1',
b'b\t2',
b'c\t2',
b'd\t7',
b'e\t9',
b'f\t1']
# don't forget final terminator
expect = b'\n'.join(data) + b'\n'
actual = o.read()
eq_(expect, actual)
def test_tocsv_appendcsv_gz():
# exercise function
table = (('foo', 'bar'),
('a', 1),
('b', 2),
('c', 2))
f = NamedTemporaryFile(delete=False)
fn = f.name + '.gz'
f.close()
tocsv(table, fn, encoding='ascii', lineterminator='\n')
# check what it did
o = gzip.open(fn, 'rb')
try:
data = [b'foo,bar',
b'a,1',
b'b,2',
b'c,2']
# don't forget final terminator
expect = b'\n'.join(data) + b'\n'
actual = o.read()
eq_(expect, actual)
finally:
o.close()
# check appending
table2 = (('foo', 'bar'),
('d', 7),
('e', 9),
('f', 1))
appendcsv(table2, fn, encoding='ascii', lineterminator='\n')
# check what it did
o = gzip.open(fn, 'rb')
try:
data = [b'foo,bar',
b'a,1',
b'b,2',
b'c,2',
b'd,7',
b'e,9',
b'f,1']
# don't forget final terminator
expect = b'\n'.join(data) + b'\n'
actual = o.read()
eq_(expect, actual)
finally:
o.close()
def test_fromcsv_header():
header = ['foo', 'bar']
data = [b'a,1',
b'b,2',
b'c,2']
f = NamedTemporaryFile(mode='wb', delete=False)
f.write(b'\n'.join(data))
f.close()
expect = (('foo', 'bar'),
('a', '1'),
('b', '2'),
('c', '2'))
actual = fromcsv(f.name, encoding='ascii', header=header)
debug(actual)
ieq(expect, actual)
ieq(expect, actual) # verify can iterate twice | mit | -6,578,390,922,451,342,000 | 23.501608 | 76 | 0.446384 | false |
dogless/airavata | airavata-api/airavata-client-sdks/airavata-python-sdk/src/main/resources/samples/createExperiment.py | 6 | 2463 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys, ConfigParser
sys.path.append('../lib')
from apache.airavata.api import Airavata
from apache.airavata.api.ttypes import *
from apache.airavata.model.workspace.experiment.ttypes import *
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
try:
# Read Airavata Client properties
airavataConfig = ConfigParser.RawConfigParser()
airavataConfig.read('../conf/airavata-client.properties')
# Create a socket to the Airavata Server
transport = TSocket.TSocket(airavataConfig.get('AiravataServer', 'host'), airavataConfig.get('AiravataServer', 'port'))
# Use Buffered Protocol to speedup over raw sockets
transport = TTransport.TBufferedTransport(transport)
# Airavata currently uses Binary Protocol
protocol = TBinaryProtocol.TBinaryProtocol(transport)
# Create a Airavata client to use the protocol encoder
airavataClient = Airavata.Client(protocol)
# Connect to Airavata Server
transport.open()
#Create a experiment
experiment = Experiment()
experiment.userName = "smarru"
experiment.name = "cli-test-experiment"
experiment.description = "experiment to test python cli"
experiment.applicationId = "Echo_b22f2303-a574-43ef-a6f2-ab8e64e2d0a2"
#experiment.experimentInputs
print 'Created Experiment with Id:', airavataClient.createExperiment("sdsc", experiment)
print 'Airavata Server Version is:', airavataClient.getAPIVersion()
# Close Connection to Airavata Server
transport.close()
except Thrift.TException, tx:
print '%s' % (tx.message)
| apache-2.0 | 2,160,463,855,447,133,200 | 32.283784 | 123 | 0.760049 | false |
andfoy/margffoy-tuay-server | env/lib/python2.7/site-packages/reportlab-3.2.0-py2.7-linux-x86_64.egg/reportlab/graphics/charts/spider.py | 34 | 16057 | #Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/graphics/charts/spider.py
# spider chart, also known as radar chart
__version__=''' $Id$ '''
__doc__="""Spider Chart
Normal use shows variation of 5-10 parameters against some 'norm' or target.
When there is more than one series, place the series with the largest
numbers first, as it will be overdrawn by each successive one.
"""
import copy
from math import sin, cos, pi
from reportlab.lib import colors
from reportlab.lib.validators import isColor, isNumber, isListOfNumbersOrNone,\
isListOfNumbers, isColorOrNone, isString,\
isListOfStringsOrNone, OneOf, SequenceOf,\
isBoolean, isListOfColors, isNumberOrNone,\
isNoneOrListOfNoneOrStrings, isTextAnchor,\
isNoneOrListOfNoneOrNumbers, isBoxAnchor,\
isStringOrNone, isStringOrNone, EitherOr,\
isCallable
from reportlab.lib.attrmap import *
from reportlab.pdfgen.canvas import Canvas
from reportlab.graphics.shapes import Group, Drawing, Line, Rect, Polygon, PolyLine, Ellipse, \
Wedge, String, STATE_DEFAULTS
from reportlab.graphics.widgetbase import Widget, TypedPropertyCollection, PropHolder
from reportlab.graphics.charts.areas import PlotArea
from reportlab.graphics.charts.legends import _objStr
from reportlab.graphics.charts.piecharts import WedgeLabel
from reportlab.graphics.widgets.markers import makeMarker, uSymbol2Symbol, isSymbol
class StrandProperty(PropHolder):
_attrMap = AttrMap(
strokeWidth = AttrMapValue(isNumber,desc='width'),
fillColor = AttrMapValue(isColorOrNone,desc='filling color'),
strokeColor = AttrMapValue(isColorOrNone,desc='stroke color'),
strokeDashArray = AttrMapValue(isListOfNumbersOrNone,desc='dashing pattern, e.g. (3,2)'),
symbol = AttrMapValue(EitherOr((isStringOrNone,isSymbol)), desc='Widget placed at data points.',advancedUsage=1),
symbolSize= AttrMapValue(isNumber, desc='Symbol size.',advancedUsage=1),
name = AttrMapValue(isStringOrNone, desc='Name of the strand.'),
)
def __init__(self):
self.strokeWidth = 1
self.fillColor = None
self.strokeColor = STATE_DEFAULTS["strokeColor"]
self.strokeDashArray = STATE_DEFAULTS["strokeDashArray"]
self.symbol = None
self.symbolSize = 5
self.name = None
class SpokeProperty(PropHolder):
_attrMap = AttrMap(
strokeWidth = AttrMapValue(isNumber,desc='width'),
fillColor = AttrMapValue(isColorOrNone,desc='filling color'),
strokeColor = AttrMapValue(isColorOrNone,desc='stroke color'),
strokeDashArray = AttrMapValue(isListOfNumbersOrNone,desc='dashing pattern, e.g. (2,1)'),
labelRadius = AttrMapValue(isNumber,desc='label radius',advancedUsage=1),
visible = AttrMapValue(isBoolean,desc="True if the spoke line is to be drawn"),
)
def __init__(self,**kw):
self.strokeWidth = 0.5
self.fillColor = None
self.strokeColor = STATE_DEFAULTS["strokeColor"]
self.strokeDashArray = STATE_DEFAULTS["strokeDashArray"]
self.visible = 1
self.labelRadius = 1.05
class SpokeLabel(WedgeLabel):
def __init__(self,**kw):
WedgeLabel.__init__(self,**kw)
if '_text' not in list(kw.keys()): self._text = ''
class StrandLabel(SpokeLabel):
_attrMap = AttrMap(BASE=SpokeLabel,
format = AttrMapValue(EitherOr((isStringOrNone,isCallable)),desc="Format for the label"),
dR = AttrMapValue(isNumberOrNone,desc="radial shift for label"),
)
def __init__(self,**kw):
self.format = ''
self.dR = 0
SpokeLabel.__init__(self,**kw)
def _setupLabel(labelClass, text, radius, cx, cy, angle, car, sar, sty):
L = labelClass()
L._text = text
L.x = cx + radius*car
L.y = cy + radius*sar
L._pmv = angle*180/pi
L.boxAnchor = sty.boxAnchor
L.dx = sty.dx
L.dy = sty.dy
L.angle = sty.angle
L.boxAnchor = sty.boxAnchor
L.boxStrokeColor = sty.boxStrokeColor
L.boxStrokeWidth = sty.boxStrokeWidth
L.boxFillColor = sty.boxFillColor
L.strokeColor = sty.strokeColor
L.strokeWidth = sty.strokeWidth
L.leading = sty.leading
L.width = sty.width
L.maxWidth = sty.maxWidth
L.height = sty.height
L.textAnchor = sty.textAnchor
L.visible = sty.visible
L.topPadding = sty.topPadding
L.leftPadding = sty.leftPadding
L.rightPadding = sty.rightPadding
L.bottomPadding = sty.bottomPadding
L.fontName = sty.fontName
L.fontSize = sty.fontSize
L.fillColor = sty.fillColor
return L
class SpiderChart(PlotArea):
_attrMap = AttrMap(BASE=PlotArea,
data = AttrMapValue(None, desc='Data to be plotted, list of (lists of) numbers.'),
labels = AttrMapValue(isListOfStringsOrNone, desc="optional list of labels to use for each data point"),
startAngle = AttrMapValue(isNumber, desc="angle of first slice; like the compass, 0 is due North"),
direction = AttrMapValue( OneOf('clockwise', 'anticlockwise'), desc="'clockwise' or 'anticlockwise'"),
strands = AttrMapValue(None, desc="collection of strand descriptor objects"),
spokes = AttrMapValue(None, desc="collection of spoke descriptor objects"),
strandLabels = AttrMapValue(None, desc="collection of strand label descriptor objects"),
spokeLabels = AttrMapValue(None, desc="collection of spoke label descriptor objects"),
)
def makeSwatchSample(self, rowNo, x, y, width, height):
baseStyle = self.strands
styleIdx = rowNo % len(baseStyle)
style = baseStyle[styleIdx]
strokeColor = getattr(style, 'strokeColor', getattr(baseStyle,'strokeColor',None))
fillColor = getattr(style, 'fillColor', getattr(baseStyle,'fillColor',None))
strokeDashArray = getattr(style, 'strokeDashArray', getattr(baseStyle,'strokeDashArray',None))
strokeWidth = getattr(style, 'strokeWidth', getattr(baseStyle, 'strokeWidth',0))
symbol = getattr(style, 'symbol', getattr(baseStyle, 'symbol',None))
ym = y+height/2.0
if fillColor is None and strokeColor is not None and strokeWidth>0:
bg = Line(x,ym,x+width,ym,strokeWidth=strokeWidth,strokeColor=strokeColor,
strokeDashArray=strokeDashArray)
elif fillColor is not None:
bg = Rect(x,y,width,height,strokeWidth=strokeWidth,strokeColor=strokeColor,
strokeDashArray=strokeDashArray,fillColor=fillColor)
else:
bg = None
if symbol:
symbol = uSymbol2Symbol(symbol,x+width/2.,ym,color)
if bg:
g = Group()
g.add(bg)
g.add(symbol)
return g
return symbol or bg
def getSeriesName(self,i,default=None):
'''return series name i or default'''
return _objStr(getattr(self.strands[i],'name',default))
def __init__(self):
PlotArea.__init__(self)
self.data = [[10,12,14,16,14,12], [6,8,10,12,9,11]]
self.labels = None # or list of strings
self.labels = ['a','b','c','d','e','f']
self.startAngle = 90
self.direction = "clockwise"
self.strands = TypedPropertyCollection(StrandProperty)
self.spokes = TypedPropertyCollection(SpokeProperty)
self.spokeLabels = TypedPropertyCollection(SpokeLabel)
self.spokeLabels._text = None
self.strandLabels = TypedPropertyCollection(StrandLabel)
self.x = 10
self.y = 10
self.width = 180
self.height = 180
def demo(self):
d = Drawing(200, 200)
d.add(SpiderChart())
return d
def normalizeData(self, outer = 0.0):
"""Turns data into normalized ones where each datum is < 1.0,
and 1.0 = maximum radius. Adds 10% at outside edge by default"""
data = self.data
assert min(list(map(min,data))) >=0, "Cannot do spider plots of negative numbers!"
norm = max(list(map(max,data)))
norm *= (1.0+outer)
if norm<1e-9: norm = 1.0
self._norm = norm
return [[e/norm for e in row] for row in data]
def _innerDrawLabel(self, sty, radius, cx, cy, angle, car, sar, labelClass=StrandLabel):
"Draw a label for a given item in the list."
fmt = sty.format
value = radius*self._norm
if not fmt:
text = None
elif isinstance(fmt,str):
if fmt == 'values':
text = sty._text
else:
text = fmt % value
elif hasattr(fmt,'__call__'):
text = fmt(value)
else:
raise ValueError("Unknown formatter type %s, expected string or function" % fmt)
if text:
dR = sty.dR
if dR:
radius += dR/self._radius
L = _setupLabel(labelClass, text, radius, cx, cy, angle, car, sar, sty)
if dR<0: L._anti = 1
else:
L = None
return L
def draw(self):
# normalize slice data
g = self.makeBackground() or Group()
xradius = self.width/2.0
yradius = self.height/2.0
self._radius = radius = min(xradius, yradius)
cx = self.x + xradius
cy = self.y + yradius
data = self.normalizeData()
self._seriesCount = len(data)
n = len(data[0])
#labels
if self.labels is None:
labels = [''] * n
else:
labels = self.labels
#there's no point in raising errors for less than enough errors if
#we silently create all for the extreme case of no labels.
i = n-len(labels)
if i>0:
labels = labels + ['']*i
S = []
STRANDS = []
STRANDAREAS = []
syms = []
labs = []
csa = []
angle = self.startAngle*pi/180
direction = self.direction == "clockwise" and -1 or 1
angleBetween = direction*(2 * pi)/float(n)
spokes = self.spokes
spokeLabels = self.spokeLabels
for i in range(n):
car = cos(angle)*radius
sar = sin(angle)*radius
csa.append((car,sar,angle))
si = self.spokes[i]
if si.visible:
spoke = Line(cx, cy, cx + car, cy + sar, strokeWidth = si.strokeWidth, strokeColor=si.strokeColor, strokeDashArray=si.strokeDashArray)
S.append(spoke)
sli = spokeLabels[i]
text = sli._text
if not text: text = labels[i]
if text:
S.append(_setupLabel(WedgeLabel, text, si.labelRadius, cx, cy, angle, car, sar, sli))
angle += angleBetween
# now plot the polygons
rowIdx = 0
strands = self.strands
strandLabels = self.strandLabels
for row in data:
# series plot
rsty = strands[rowIdx]
points = []
car, sar = csa[-1][:2]
r = row[-1]
points.append(cx+car*r)
points.append(cy+sar*r)
for i in range(n):
car, sar, angle = csa[i]
r = row[i]
points.append(cx+car*r)
points.append(cy+sar*r)
L = self._innerDrawLabel(strandLabels[(rowIdx,i)], r, cx, cy, angle, car, sar, labelClass=StrandLabel)
if L: labs.append(L)
sty = strands[(rowIdx,i)]
uSymbol = sty.symbol
# put in a marker, if it needs one
if uSymbol:
s_x = cx+car*r
s_y = cy+sar*r
s_fillColor = sty.fillColor
s_strokeColor = sty.strokeColor
s_strokeWidth = sty.strokeWidth
s_angle = 0
s_size = sty.symbolSize
if type(uSymbol) is type(''):
symbol = makeMarker(uSymbol,
size = s_size,
x = s_x,
y = s_y,
fillColor = s_fillColor,
strokeColor = s_strokeColor,
strokeWidth = s_strokeWidth,
angle = s_angle,
)
else:
symbol = uSymbol2Symbol(uSymbol,s_x,s_y,s_fillColor)
for k,v in (('size', s_size), ('fillColor', s_fillColor),
('x', s_x), ('y', s_y),
('strokeColor',s_strokeColor), ('strokeWidth',s_strokeWidth),
('angle',s_angle),):
if getattr(symbol,k,None) is None:
try:
setattr(symbol,k,v)
except:
pass
syms.append(symbol)
# make up the 'strand'
if rsty.fillColor:
strand = Polygon(points)
strand.fillColor = rsty.fillColor
strand.strokeColor = None
strand.strokeWidth = 0
STRANDAREAS.append(strand)
if rsty.strokeColor and rsty.strokeWidth:
strand = PolyLine(points)
strand.strokeColor = rsty.strokeColor
strand.strokeWidth = rsty.strokeWidth
strand.strokeDashArray = rsty.strokeDashArray
STRANDS.append(strand)
rowIdx += 1
for s in (STRANDAREAS+STRANDS+syms+S+labs): g.add(s)
return g
def sample1():
"Make a simple spider chart"
d = Drawing(400, 400)
sp = SpiderChart()
sp.x = 50
sp.y = 50
sp.width = 300
sp.height = 300
sp.data = [[10,12,14,16,14,12], [6,8,10,12,9,15],[7,8,17,4,12,8]]
sp.labels = ['a','b','c','d','e','f']
sp.strands[0].strokeColor = colors.cornsilk
sp.strands[1].strokeColor = colors.cyan
sp.strands[2].strokeColor = colors.palegreen
sp.strands[0].fillColor = colors.cornsilk
sp.strands[1].fillColor = colors.cyan
sp.strands[2].fillColor = colors.palegreen
sp.spokes.strokeDashArray = (2,2)
d.add(sp)
return d
def sample2():
"Make a spider chart with markers, but no fill"
d = Drawing(400, 400)
sp = SpiderChart()
sp.x = 50
sp.y = 50
sp.width = 300
sp.height = 300
sp.data = [[10,12,14,16,14,12], [6,8,10,12,9,15],[7,8,17,4,12,8]]
sp.labels = ['U','V','W','X','Y','Z']
sp.strands.strokeWidth = 1
sp.strands[0].fillColor = colors.pink
sp.strands[1].fillColor = colors.lightblue
sp.strands[2].fillColor = colors.palegreen
sp.strands[0].strokeColor = colors.red
sp.strands[1].strokeColor = colors.blue
sp.strands[2].strokeColor = colors.green
sp.strands.symbol = "FilledDiamond"
sp.strands[1].symbol = makeMarker("Circle")
sp.strands[1].symbol.strokeWidth = 0.5
sp.strands[1].symbol.fillColor = colors.yellow
sp.strands.symbolSize = 6
sp.strandLabels[0,3]._text = 'special'
sp.strandLabels[0,1]._text = 'one'
sp.strandLabels[0,0]._text = 'zero'
sp.strandLabels[1,0]._text = 'Earth'
sp.strandLabels[2,2]._text = 'Mars'
sp.strandLabels.format = 'values'
sp.strandLabels.dR = -5
d.add(sp)
return d
if __name__=='__main__':
d = sample1()
from reportlab.graphics.renderPDF import drawToFile
drawToFile(d, 'spider.pdf')
d = sample2()
drawToFile(d, 'spider2.pdf')
| gpl-2.0 | 7,203,825,704,348,503,000 | 38.355392 | 150 | 0.57464 | false |
kartikshah1/Test | venv/lib/python2.7/site-packages/setuptools/command/install_scripts.py | 111 | 2031 | import distutils.command.install_scripts as orig
from pkg_resources import Distribution, PathMetadata, ensure_directory
import os
from distutils import log
class install_scripts(orig.install_scripts):
"""Do normal script install, plus any egg_info wrapper scripts"""
def initialize_options(self):
orig.install_scripts.initialize_options(self)
self.no_ep = False
def run(self):
from setuptools.command.easy_install import get_script_args
from setuptools.command.easy_install import sys_executable
self.run_command("egg_info")
if self.distribution.scripts:
orig.install_scripts.run(self) # run first to set up self.outfiles
else:
self.outfiles = []
if self.no_ep:
# don't install entry point scripts into .egg file!
return
ei_cmd = self.get_finalized_command("egg_info")
dist = Distribution(
ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info),
ei_cmd.egg_name, ei_cmd.egg_version,
)
bs_cmd = self.get_finalized_command('build_scripts')
executable = getattr(bs_cmd,'executable',sys_executable)
is_wininst = getattr(
self.get_finalized_command("bdist_wininst"), '_is_running', False
)
for args in get_script_args(dist, executable, is_wininst):
self.write_script(*args)
def write_script(self, script_name, contents, mode="t", *ignored):
"""Write an executable file to the scripts directory"""
from setuptools.command.easy_install import chmod, current_umask
log.info("Installing %s script to %s", script_name, self.install_dir)
target = os.path.join(self.install_dir, script_name)
self.outfiles.append(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
f = open(target,"w"+mode)
f.write(contents)
f.close()
chmod(target, 0o777-mask)
| mit | -6,497,713,646,227,108,000 | 38.057692 | 79 | 0.632201 | false |
angr/angr | angr/flirt/build_sig.py | 1 | 10360 | # pylint:disable=consider-using-with
from typing import List, Dict
import json
import subprocess
import argparse
import tempfile
import os
import itertools
from collections import defaultdict
import angr
UNIQUE_STRING_COUNT = 20
# strings longer than MAX_UNIQUE_STRING_LEN will be truncated
MAX_UNIQUE_STRING_LEN = 70
def get_basic_info(ar_path: str) -> Dict[str,str]:
"""
Get basic information of the archive file.
"""
with tempfile.TemporaryDirectory() as tempdirname:
cwd = os.getcwd()
os.chdir(tempdirname)
subprocess.call(["ar", "x", ar_path])
# Load arch and OS information from the first .o file
o_files = [ f for f in os.listdir(".") if f.endswith(".o") ]
if o_files:
proj = angr.Project(o_files[0], auto_load_libs=False)
arch_name = proj.arch.name.lower()
os_name = proj.simos.name.lower()
os.chdir(cwd)
return {
'arch': arch_name,
'platform': os_name,
}
def get_unique_strings(ar_path: str) -> List[str]:
"""
For Linux libraries, this method requires ar (from binutils), nm (from binutils), and strings.
"""
# get symbols
nm_output = subprocess.check_output(["nm", ar_path])
nm_lines = nm_output.decode("utf-8").split("\n")
symbols = set()
for nm_line in nm_lines:
symbol_types = "UuVvTtRrDdWwBbNn"
for symbol_type in symbol_types:
if f" {symbol_type} " in nm_line:
# parse it
symbol = nm_line[nm_line.find(f" {symbol_type}") + 3: ].strip(" ")
if "." in symbol:
symbols |= set(symbol.split("."))
else:
symbols.add(symbol)
break
# extract the archive file into a temporary directory
all_strings = set()
with tempfile.TemporaryDirectory() as tempdirname:
cwd = os.getcwd()
os.chdir(tempdirname)
subprocess.call(["ar", "x", ar_path])
for filename in os.listdir("."):
if filename.endswith(".o"):
strings = subprocess.check_output(["strings", "-n", "8", filename])
strings = strings.decode("utf-8").split("\n")
non_symbol_strings = set()
for s in strings:
if s in symbols:
continue
if "." in s and any(subs in symbols for subs in s.split(".")):
continue
# C++ specific
if "::" in s:
continue
if "_" in s:
# make sure it's not a substring of any symbol
is_substring = False
for symbol in symbols:
if s in symbol:
is_substring = True
break
if is_substring:
continue
non_symbol_strings.add(s)
all_strings |= non_symbol_strings
os.chdir(cwd)
grouped_strings = defaultdict(set)
for s in all_strings:
grouped_strings[s[:5]].add(s)
sorted_strings = list(sorted(all_strings, key=len, reverse=True))
ctr = 0
picked = set()
unique_strings = [ ]
for s in sorted_strings:
if s[:5] in picked:
continue
unique_strings.append(s[:MAX_UNIQUE_STRING_LEN])
picked.add(s[:5])
ctr += 1
if ctr >= UNIQUE_STRING_COUNT:
break
return unique_strings
def run_pelf(pelf_path: str, ar_path: str, output_path: str):
subprocess.check_call([pelf_path, ar_path, output_path])
def run_sigmake(sigmake_path: str, sig_name: str, pat_path: str, sig_path: str):
if " " not in sig_name:
sig_name_arg = f"-n{sig_name}"
else:
sig_name_arg = f"-n\"{sig_name}\""
proc = subprocess.Popen([sigmake_path, sig_name_arg, pat_path, sig_path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
_, stderr = proc.communicate()
if b"COLLISIONS:" in stderr:
return False
return True
def process_exc_file(exc_path: str):
"""
We are doing the stupidest thing possible: For each batch of conflicts, we pick the most likely
result baed on a set of predefined rules.
TODO: Add caller-callee-based de-duplication.
"""
with open(exc_path, "r") as f:
data = f.read()
lines = data.split("\n")
# parse groups
ctr = itertools.count()
idx = 0
groups = defaultdict(dict)
for line in lines:
if line.startswith(";"):
continue
if not line:
idx = next(ctr)
else:
# parse the function name
func_name = line[:line.index("\t")].strip(" ")
groups[idx][func_name] = line
# for each group, decide the one to keep
for idx in list(groups.keys()):
g = groups[idx]
if len(g) == 1:
# don't pick anything. This is a weird case that I don't understand
continue
if all(func_name.endswith(".cold") for func_name in g):
# .cold functions. doesn't matter what we pick
continue
non_cold_names = [ ]
for func_name in g:
if func_name.endswith(".cold"):
continue
non_cold_names.append(func_name)
# sort it
non_cold_names = list(sorted(non_cold_names, key=len))
# pick the top one
the_chosen_one = non_cold_names[0]
line = g[the_chosen_one]
g[the_chosen_one] = "+" + line
# output
with open(exc_path, "w") as f:
for g in groups.values():
for line in g.values():
f.write(line + "\n")
f.write("\n")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("ar_path", help="Path of the .a file to build signatures for")
parser.add_argument("sig_name", help="Name of the signature (a string inside the signature file)")
parser.add_argument("sig_path", help="File name of the generated signature")
parser.add_argument("--compiler", help="Name of the compiler (e.g., gcc, clang). It will be stored in the meta "
"data file.")
parser.add_argument("--compiler_version", help="Version of the compiler (e.g., 6). It will be stored in the meta "
"data file.")
# parser.add_argument("--platform", help="Name of the platform (e.g., windows/linux/macos). It will be stored in
# the meta data file.")
parser.add_argument("--os", help="Name of the operating system (e.g., ubuntu/debian). It will be stored in the "
"meta data file.")
parser.add_argument("--os_version", help="Version of the operating system (e.g., 20.04). It will be stored in the "
"meta data file.")
parser.add_argument("--pelf_path", help="Path of pelf")
parser.add_argument("--sigmake_path", help="Path of sigmake")
args = parser.parse_args()
if args.pelf_path:
pelf_path = args.pelf_path
elif "pelf_path" in os.environ:
pelf_path = os.environ['pelf_path']
else:
raise ValueError("pelf_path must be specified.")
if args.sigmake_path:
sigmake_path = args.pelf_path
elif "sigmake_path" in os.environ:
sigmake_path = os.environ['sigmake_path']
else:
raise ValueError("sigmake_path must be specified.")
compiler = args.compiler
if compiler:
compiler = compiler.lower()
compiler_version = args.compiler_version
if compiler_version:
compiler_version = compiler_version.lower()
os_name = args.os
if os_name:
os_name = os_name.lower()
os_version = args.os_version
if os_version:
os_version = os_version.lower()
# Get basic information
# Get basic information
basic_info = get_basic_info(args.ar_path)
# Get unique strings from the library
unique_strings = get_unique_strings(args.ar_path)
# Build necessary file paths
sig_path_basename = os.path.basename(args.sig_path)
if "." in sig_path_basename:
sig_dir = os.path.dirname(args.sig_path)
filename = sig_path_basename[:sig_path_basename.rfind(".")]
exc_path = os.path.join(
sig_dir,
filename + ".exc"
)
meta_path = os.path.join(
sig_dir,
filename + ".meta"
)
else:
exc_path = args.sig_path + ".exc"
meta_path = args.sig_path + ".meta"
if os.path.isfile(exc_path):
# Remove existing exc files (if there is one)
os.remove(exc_path)
# Make a temporary directory
with tempfile.TemporaryDirectory() as tmpdirname:
ar_path = args.ar_path
basename = os.path.basename(ar_path)
# sanitize basename since otherwise sigmake is not happy with it
if basename.endswith(".a"):
basename = basename[:-2]
basename = basename.replace("+", "plus")
# sanitize signame as well
sig_name = args.sig_name
sig_name = sig_name.replace("+", "plus")
pat_path = os.path.join(tmpdirname, basename + ".pat")
run_pelf(pelf_path, ar_path, pat_path)
has_collision = not run_sigmake(sigmake_path, sig_name, pat_path, args.sig_path)
if has_collision:
process_exc_file(exc_path)
# run sigmake again
has_collision = not run_sigmake(sigmake_path, args.sig_name, pat_path, args.sig_path)
assert not has_collision
with open(meta_path, "w") as f:
metadata = {
'unique_strings': unique_strings,
}
metadata.update(basic_info)
if compiler_version:
metadata['compiler_version'] = compiler_version
if compiler:
metadata['compiler'] = compiler
if os_name:
metadata['os'] = os_name
if os_version:
metadata['os_version'] = os_version
f.write(json.dumps(metadata, indent=2))
if __name__ == "__main__":
main()
| bsd-2-clause | 4,011,237,977,573,441,000 | 31.681388 | 119 | 0.551351 | false |
CydarLtd/ansible | test/runner/lib/cover.py | 26 | 7465 | """Code coverage utilities."""
from __future__ import absolute_import, print_function
import os
import re
from lib.target import (
walk_module_targets,
walk_compile_targets,
)
from lib.util import (
display,
ApplicationError,
EnvironmentConfig,
run_command,
common_environment,
)
from lib.executor import (
Delegate,
install_command_requirements,
)
COVERAGE_DIR = 'test/results/coverage'
COVERAGE_FILE = os.path.join(COVERAGE_DIR, 'coverage')
COVERAGE_GROUPS = ('command', 'target', 'environment', 'version')
def command_coverage_combine(args):
"""Patch paths in coverage files and merge into a single file.
:type args: CoverageConfig
:rtype: list[str]
"""
coverage = initialize_coverage(args)
modules = dict((t.module, t.path) for t in list(walk_module_targets()))
coverage_files = [os.path.join(COVERAGE_DIR, f) for f in os.listdir(COVERAGE_DIR) if '=coverage.' in f]
ansible_path = os.path.abspath('lib/ansible/') + '/'
root_path = os.getcwd() + '/'
counter = 0
groups = {}
if args.all or args.stub:
sources = sorted(os.path.abspath(target.path) for target in walk_compile_targets())
else:
sources = []
if args.stub:
groups['=stub'] = dict((source, set()) for source in sources)
for coverage_file in coverage_files:
counter += 1
display.info('[%4d/%4d] %s' % (counter, len(coverage_files), coverage_file), verbosity=2)
original = coverage.CoverageData()
group = get_coverage_group(args, coverage_file)
if group is None:
display.warning('Unexpected name for coverage file: %s' % coverage_file)
continue
if os.path.getsize(coverage_file) == 0:
display.warning('Empty coverage file: %s' % coverage_file)
continue
try:
original.read_file(coverage_file)
except Exception as ex: # pylint: disable=locally-disabled, broad-except
display.error(str(ex))
continue
for filename in original.measured_files():
arcs = set(original.arcs(filename) or [])
if not arcs:
# This is most likely due to using an unsupported version of coverage.
display.warning('No arcs found for "%s" in coverage file: %s' % (filename, coverage_file))
continue
if '/ansible_modlib.zip/ansible/' in filename:
new_name = re.sub('^.*/ansible_modlib.zip/ansible/', ansible_path, filename)
display.info('%s -> %s' % (filename, new_name), verbosity=3)
filename = new_name
elif '/ansible_module_' in filename:
module = re.sub('^.*/ansible_module_(?P<module>.*).py$', '\\g<module>', filename)
if module not in modules:
display.warning('Skipping coverage of unknown module: %s' % module)
continue
new_name = os.path.abspath(modules[module])
display.info('%s -> %s' % (filename, new_name), verbosity=3)
filename = new_name
elif re.search('^(/.*?)?/root/ansible/', filename):
new_name = re.sub('^(/.*?)?/root/ansible/', root_path, filename)
display.info('%s -> %s' % (filename, new_name), verbosity=3)
filename = new_name
if group not in groups:
groups[group] = {}
arc_data = groups[group]
if filename not in arc_data:
arc_data[filename] = set()
arc_data[filename].update(arcs)
output_files = []
for group in sorted(groups):
arc_data = groups[group]
updated = coverage.CoverageData()
for filename in arc_data:
if not os.path.isfile(filename):
display.warning('Invalid coverage path: %s' % filename)
continue
updated.add_arcs({filename: list(arc_data[filename])})
if args.all:
updated.add_arcs(dict((source, []) for source in sources))
if not args.explain:
output_file = COVERAGE_FILE + group
updated.write_file(output_file)
output_files.append(output_file)
return sorted(output_files)
def command_coverage_report(args):
"""
:type args: CoverageConfig
"""
output_files = command_coverage_combine(args)
for output_file in output_files:
if args.group_by or args.stub:
display.info('>>> Coverage Group: %s' % ' '.join(os.path.basename(output_file).split('=')[1:]))
env = common_environment()
env.update(dict(COVERAGE_FILE=output_file))
run_command(args, env=env, cmd=['coverage', 'report'])
def command_coverage_html(args):
"""
:type args: CoverageConfig
"""
output_files = command_coverage_combine(args)
for output_file in output_files:
dir_name = 'test/results/reports/%s' % os.path.basename(output_file)
env = common_environment()
env.update(dict(COVERAGE_FILE=output_file))
run_command(args, env=env, cmd=['coverage', 'html', '-d', dir_name])
def command_coverage_xml(args):
"""
:type args: CoverageConfig
"""
output_files = command_coverage_combine(args)
for output_file in output_files:
xml_name = 'test/results/reports/%s.xml' % os.path.basename(output_file)
env = common_environment()
env.update(dict(COVERAGE_FILE=output_file))
run_command(args, env=env, cmd=['coverage', 'xml', '-o', xml_name])
def command_coverage_erase(args):
"""
:type args: CoverageConfig
"""
initialize_coverage(args)
for name in os.listdir(COVERAGE_DIR):
if not name.startswith('coverage') and '=coverage.' not in name:
continue
path = os.path.join(COVERAGE_DIR, name)
if not args.explain:
os.remove(path)
def initialize_coverage(args):
"""
:type args: CoverageConfig
:rtype: coverage
"""
if args.delegate:
raise Delegate()
if args.requirements:
install_command_requirements(args)
try:
import coverage
except ImportError:
coverage = None
if not coverage:
raise ApplicationError('You must install the "coverage" python module to use this command.')
return coverage
def get_coverage_group(args, coverage_file):
"""
:type args: CoverageConfig
:type coverage_file: str
:rtype: str
"""
parts = os.path.basename(coverage_file).split('=', 4)
if len(parts) != 5 or not parts[4].startswith('coverage.'):
return None
names = dict(
command=parts[0],
target=parts[1],
environment=parts[2],
version=parts[3],
)
group = ''
for part in COVERAGE_GROUPS:
if part in args.group_by:
group += '=%s' % names[part]
return group
class CoverageConfig(EnvironmentConfig):
"""Configuration for the coverage command."""
def __init__(self, args):
"""
:type args: any
"""
super(CoverageConfig, self).__init__(args, 'coverage')
self.group_by = frozenset(args.group_by) if 'group_by' in args and args.group_by else set() # type: frozenset[str]
self.all = args.all if 'all' in args else False # type: bool
self.stub = args.stub if 'stub' in args else False # type: bool
| gpl-3.0 | -5,508,514,888,069,618,000 | 28.389764 | 123 | 0.591293 | false |
EdwardBeckett/fail2ban | fail2ban/client/jailsreader.py | 18 | 3062 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
# vi: set ft=python sts=4 ts=4 sw=4 noet :
# This file is part of Fail2Ban.
#
# Fail2Ban is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Fail2Ban is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Fail2Ban; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# Author: Cyril Jaquier
#
__author__ = "Cyril Jaquier"
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
__license__ = "GPL"
from .configreader import ConfigReader
from .jailreader import JailReader
from ..helpers import getLogger
# Gets the instance of the logger.
logSys = getLogger(__name__)
class JailsReader(ConfigReader):
def __init__(self, force_enable=False, **kwargs):
"""
Parameters
----------
force_enable : bool, optional
Passed to JailReader to force enable the jails.
It is for internal use
"""
ConfigReader.__init__(self, **kwargs)
self.__jails = list()
self.__force_enable = force_enable
@property
def jails(self):
return self.__jails
def read(self):
self.__jails = list()
return ConfigReader.read(self, "jail")
def getOptions(self, section=None):
"""Reads configuration for jail(s) and adds enabled jails to __jails
"""
opts = []
self.__opts = ConfigReader.getOptions(self, "Definition", opts)
if section is None:
sections = self.sections()
else:
sections = [ section ]
# Get the options of all jails.
parse_status = True
for sec in sections:
if sec == 'INCLUDES':
continue
# use the cfg_share for filter/action caching and the same config for all
# jails (use_config=...), therefore don't read it here:
jail = JailReader(sec, force_enable=self.__force_enable,
share_config=self.share_config, use_config=self._cfg)
ret = jail.getOptions()
if ret:
if jail.isEnabled():
# We only add enabled jails
self.__jails.append(jail)
else:
logSys.error("Errors in jail %r. Skipping..." % sec)
parse_status = False
return parse_status
def convert(self, allow_no_files=False):
"""Convert read before __opts and jails to the commands stream
Parameters
----------
allow_missing : bool
Either to allow log files to be missing entirely. Primarily is
used for testing
"""
stream = list()
for opt in self.__opts:
if opt == "":
stream.append([])
# Convert jails
for jail in self.__jails:
stream.extend(jail.convert(allow_no_files=allow_no_files))
# Start jails
for jail in self.__jails:
stream.append(["start", jail.getName()])
return stream
| gpl-2.0 | 5,963,948,043,318,385,000 | 27.091743 | 81 | 0.6855 | false |
thinkopensolutions/geraldo | site/newsite/django_1_0/tests/regressiontests/mail/tests.py | 10 | 1404 | # coding: utf-8
r"""
# Tests for the django.core.mail.
>>> from django.core.mail import EmailMessage
>>> from django.utils.translation import ugettext_lazy
# Test normal ascii character case:
>>> email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'])
>>> message = email.message()
>>> message['Subject']
'Subject'
>>> message.get_payload()
'Content'
>>> message['From']
'[email protected]'
>>> message['To']
'[email protected]'
# Test multiple-recipient case
>>> email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]','[email protected]'])
>>> message = email.message()
>>> message['Subject']
'Subject'
>>> message.get_payload()
'Content'
>>> message['From']
'[email protected]'
>>> message['To']
'[email protected], [email protected]'
# Test for header injection
>>> email = EmailMessage('Subject\nInjection Test', 'Content', '[email protected]', ['[email protected]'])
>>> message = email.message()
Traceback (most recent call last):
...
BadHeaderError: Header values can't contain newlines (got u'Subject\nInjection Test' for header 'Subject')
>>> email = EmailMessage(ugettext_lazy('Subject\nInjection Test'), 'Content', '[email protected]', ['[email protected]'])
>>> message = email.message()
Traceback (most recent call last):
...
BadHeaderError: Header values can't contain newlines (got u'Subject\nInjection Test' for header 'Subject')
"""
| lgpl-3.0 | -8,823,524,838,124,119,000 | 28.25 | 117 | 0.69302 | false |
tudorbarascu/QGIS | python/pyplugin_installer/__init__.py | 45 | 1403 | # -*- coding: utf-8 -*-
"""
***************************************************************************
__init__.py
---------------------
Date : May 2013
Copyright : (C) 2013 by Borys Jurgiel
Email : info at borysjurgiel dot pl
This module is based on former plugin_installer plugin:
Copyright (C) 2007-2008 Matthew Perry
Copyright (C) 2008-2013 Borys Jurgiel
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
__author__ = 'Borys Jurgiel'
__date__ = 'May 2013'
__copyright__ = '(C) 2013, Borys Jurgiel'
# import functions for easier access
from . import installer
from .installer import initPluginInstaller # NOQA
def instance():
if not installer.pluginInstaller:
installer.initPluginInstaller()
return installer.pluginInstaller
| gpl-2.0 | -2,400,248,435,529,018,400 | 35.921053 | 77 | 0.445474 | false |
tudorbarascu/QGIS | python/plugins/processing/algs/qgis/Relief.py | 15 | 6936 | # -*- coding: utf-8 -*-
"""
***************************************************************************
Relief.py
---------------------
Date : December 2016
Copyright : (C) 2016 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'December 2016'
__copyright__ = '(C) 2016, Alexander Bruy'
import os
from qgis.PyQt.QtGui import QIcon, QColor
from qgis.analysis import QgsRelief
from qgis.core import (QgsProcessingParameterDefinition,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterNumber,
QgsProcessingParameterBoolean,
QgsProcessingParameterRasterDestination,
QgsProcessingParameterFileDestination,
QgsRasterFileWriter,
QgsProcessingException)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class ParameterReliefColors(QgsProcessingParameterDefinition):
def __init__(self, name='', description='', parent=None, optional=True):
super().__init__(name, description, None, optional)
self.parent = parent
self.setMetadata({'widget_wrapper': 'processing.algs.qgis.ui.ReliefColorsWidget.ReliefColorsWidgetWrapper'})
def type(self):
return 'relief_colors'
def clone(self):
return ParameterReliefColors(self.name(), self.description(), self.parent,
self.flags() & QgsProcessingParameterDefinition.FlagOptional)
@staticmethod
def valueToColors(value):
if value is None:
return None
if value == '':
return None
if isinstance(value, str):
return value.split(';')
else:
return ParameterReliefColors.colorsToString(value)
@staticmethod
def colorsToString(colors):
s = ''
for c in colors:
s += '{:f}, {:f}, {:d}, {:d}, {:d};'.format(c[0],
c[1],
c[2],
c[3],
c[4])
return s[:-1]
class Relief(QgisAlgorithm):
INPUT = 'INPUT'
Z_FACTOR = 'Z_FACTOR'
AUTO_COLORS = 'AUTO_COLORS'
COLORS = 'COLORS'
OUTPUT = 'OUTPUT'
FREQUENCY_DISTRIBUTION = 'FREQUENCY_DISTRIBUTION'
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'dem.png'))
def group(self):
return self.tr('Raster terrain analysis')
def groupId(self):
return 'rasterterrainanalysis'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT,
self.tr('Elevation layer')))
self.addParameter(QgsProcessingParameterNumber(self.Z_FACTOR,
self.tr('Z factor'), type=QgsProcessingParameterNumber.Double,
minValue=0.00, defaultValue=1.0))
self.addParameter(QgsProcessingParameterBoolean(self.AUTO_COLORS,
self.tr('Generate relief classes automatically'),
defaultValue=False))
self.addParameter(ParameterReliefColors(self.COLORS,
self.tr('Relief colors'),
self.INPUT,
True))
self.addParameter(QgsProcessingParameterRasterDestination(self.OUTPUT,
self.tr('Relief')))
self.addParameter(QgsProcessingParameterFileDestination(self.FREQUENCY_DISTRIBUTION,
self.tr('Frequency distribution'),
'CSV files (*.csv)',
optional=True,
createByDefault=False))
def name(self):
return 'relief'
def displayName(self):
return self.tr('Relief')
def processAlgorithm(self, parameters, context, feedback):
inputFile = self.parameterAsRasterLayer(parameters, self.INPUT, context).source()
zFactor = self.parameterAsDouble(parameters, self.Z_FACTOR, context)
automaticColors = self.parameterAsBoolean(parameters, self.AUTO_COLORS, context)
outputFile = self.parameterAsOutputLayer(parameters, self.OUTPUT, context)
frequencyDistribution = self.parameterAsFileOutput(parameters, self.FREQUENCY_DISTRIBUTION, context)
outputFormat = QgsRasterFileWriter.driverForExtension(os.path.splitext(outputFile)[1])
relief = QgsRelief(inputFile, outputFile, outputFormat)
if automaticColors:
reliefColors = relief.calculateOptimizedReliefClasses()
else:
colors = ParameterReliefColors.valueToColors(parameters[self.COLORS])
if colors is None or len(colors) == 0:
raise QgsProcessingException(
self.tr('Specify relief colors or activate "Generate relief classes automatically" option.'))
reliefColors = []
for c in colors:
v = c.split(',')
color = QgsRelief.ReliefColor(QColor(int(v[2]), int(v[3]), int(v[4])),
float(v[0]),
float(v[1]))
reliefColors.append(color)
relief.setReliefColors(reliefColors)
relief.setZFactor(zFactor)
if frequencyDistribution:
relief.exportFrequencyDistributionToCsv(frequencyDistribution)
relief.processRaster(feedback)
return {self.OUTPUT: outputFile, self.FREQUENCY_DISTRIBUTION: frequencyDistribution}
| gpl-2.0 | -2,099,561,983,249,656,300 | 41.814815 | 117 | 0.512687 | false |
drewp/tahoe-lafs | src/allmydata/util/abbreviate.py | 1 | 2005 |
import re
HOUR = 3600
DAY = 24*3600
WEEK = 7*DAY
MONTH = 30*DAY
YEAR = 365*DAY
def abbreviate_time(s):
def _plural(count, unit):
count = int(count)
if count == 1:
return "%d %s" % (count, unit)
return "%d %ss" % (count, unit)
if s is None:
return "unknown"
if s < 120:
return _plural(s, "second")
if s < 3*HOUR:
return _plural(s/60, "minute")
if s < 2*DAY:
return _plural(s/HOUR, "hour")
if s < 2*MONTH:
return _plural(s/DAY, "day")
if s < 4*YEAR:
return _plural(s/MONTH, "month")
return _plural(s/YEAR, "year")
def abbreviate_space(s, SI=True):
if s is None:
return "unknown"
if SI:
U = 1000.0
isuffix = "B"
else:
U = 1024.0
isuffix = "iB"
def r(count, suffix):
return "%.2f %s%s" % (count, suffix, isuffix)
if s < 1024: # 1000-1023 get emitted as bytes, even in SI mode
return "%d B" % s
if s < U*U:
return r(s/U, "k")
if s < U*U*U:
return r(s/(U*U), "M")
if s < U*U*U*U:
return r(s/(U*U*U), "G")
if s < U*U*U*U*U:
return r(s/(U*U*U*U), "T")
return r(s/(U*U*U*U*U), "P")
def abbreviate_space_both(s):
return "(%s, %s)" % (abbreviate_space(s, True),
abbreviate_space(s, False))
def parse_abbreviated_size(s):
if s is None or s == "":
return None
m = re.match(r"^(\d+)([kKmMgG]?[iB]?[bB]?)$", s)
if not m:
raise ValueError("unparseable value %s" % s)
number, suffix = m.groups()
suffix = suffix.upper()
if suffix.endswith("B"):
suffix = suffix[:-1]
multiplier = {"": 1,
"I": 1,
"K": 1000,
"M": 1000 * 1000,
"G": 1000 * 1000 * 1000,
"KI": 1024,
"MI": 1024*1024,
"GI": 1024*1024*1024,
}[suffix]
return int(number) * multiplier
| gpl-2.0 | -7,455,435,370,564,310,000 | 25.038961 | 66 | 0.468828 | false |
afandria/sky_engine | third_party/ply/yacc.py | 465 | 128492 | # -----------------------------------------------------------------------------
# ply: yacc.py
#
# Copyright (C) 2001-2011,
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
#
# This implements an LR parser that is constructed from grammar rules defined
# as Python functions. The grammer is specified by supplying the BNF inside
# Python documentation strings. The inspiration for this technique was borrowed
# from John Aycock's Spark parsing system. PLY might be viewed as cross between
# Spark and the GNU bison utility.
#
# The current implementation is only somewhat object-oriented. The
# LR parser itself is defined in terms of an object (which allows multiple
# parsers to co-exist). However, most of the variables used during table
# construction are defined in terms of global variables. Users shouldn't
# notice unless they are trying to define multiple parsers at the same
# time using threads (in which case they should have their head examined).
#
# This implementation supports both SLR and LALR(1) parsing. LALR(1)
# support was originally implemented by Elias Ioup ([email protected]),
# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles,
# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced
# by the more efficient DeRemer and Pennello algorithm.
#
# :::::::: WARNING :::::::
#
# Construction of LR parsing tables is fairly complicated and expensive.
# To make this module run fast, a *LOT* of work has been put into
# optimization---often at the expensive of readability and what might
# consider to be good Python "coding style." Modify the code at your
# own risk!
# ----------------------------------------------------------------------------
__version__ = "3.4"
__tabversion__ = "3.2" # Table version
#-----------------------------------------------------------------------------
# === User configurable parameters ===
#
# Change these to modify the default behavior of yacc (if you wish)
#-----------------------------------------------------------------------------
yaccdebug = 1 # Debugging mode. If set, yacc generates a
# a 'parser.out' file in the current directory
debug_file = 'parser.out' # Default name of the debugging file
tab_module = 'parsetab' # Default name of the table module
default_lr = 'LALR' # Default LR table generation method
error_count = 3 # Number of symbols that must be shifted to leave recovery mode
yaccdevel = 0 # Set to True if developing yacc. This turns off optimized
# implementations of certain functions.
resultlimit = 40 # Size limit of results when running in debug mode.
pickle_protocol = 0 # Protocol to use when writing pickle files
import re, types, sys, os.path
# Compatibility function for python 2.6/3.0
if sys.version_info[0] < 3:
def func_code(f):
return f.func_code
else:
def func_code(f):
return f.__code__
# Compatibility
try:
MAXINT = sys.maxint
except AttributeError:
MAXINT = sys.maxsize
# Python 2.x/3.0 compatibility.
def load_ply_lex():
if sys.version_info[0] < 3:
import lex
else:
import ply.lex as lex
return lex
# This object is a stand-in for a logging object created by the
# logging module. PLY will use this by default to create things
# such as the parser.out file. If a user wants more detailed
# information, they can create their own logging object and pass
# it into PLY.
class PlyLogger(object):
def __init__(self,f):
self.f = f
def debug(self,msg,*args,**kwargs):
self.f.write((msg % args) + "\n")
info = debug
def warning(self,msg,*args,**kwargs):
self.f.write("WARNING: "+ (msg % args) + "\n")
def error(self,msg,*args,**kwargs):
self.f.write("ERROR: " + (msg % args) + "\n")
critical = debug
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self,name):
return self
def __call__(self,*args,**kwargs):
return self
# Exception raised for yacc-related errors
class YaccError(Exception): pass
# Format the result message that the parser produces when running in debug mode.
def format_result(r):
repr_str = repr(r)
if '\n' in repr_str: repr_str = repr(repr_str)
if len(repr_str) > resultlimit:
repr_str = repr_str[:resultlimit]+" ..."
result = "<%s @ 0x%x> (%s)" % (type(r).__name__,id(r),repr_str)
return result
# Format stack entries when the parser is running in debug mode
def format_stack_entry(r):
repr_str = repr(r)
if '\n' in repr_str: repr_str = repr(repr_str)
if len(repr_str) < 16:
return repr_str
else:
return "<%s @ 0x%x>" % (type(r).__name__,id(r))
#-----------------------------------------------------------------------------
# === LR Parsing Engine ===
#
# The following classes are used for the LR parser itself. These are not
# used during table construction and are independent of the actual LR
# table generation algorithm
#-----------------------------------------------------------------------------
# This class is used to hold non-terminal grammar symbols during parsing.
# It normally has the following attributes set:
# .type = Grammar symbol type
# .value = Symbol value
# .lineno = Starting line number
# .endlineno = Ending line number (optional, set automatically)
# .lexpos = Starting lex position
# .endlexpos = Ending lex position (optional, set automatically)
class YaccSymbol:
def __str__(self): return self.type
def __repr__(self): return str(self)
# This class is a wrapper around the objects actually passed to each
# grammar rule. Index lookup and assignment actually assign the
# .value attribute of the underlying YaccSymbol object.
# The lineno() method returns the line number of a given
# item (or 0 if not defined). The linespan() method returns
# a tuple of (startline,endline) representing the range of lines
# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos)
# representing the range of positional information for a symbol.
class YaccProduction:
def __init__(self,s,stack=None):
self.slice = s
self.stack = stack
self.lexer = None
self.parser= None
def __getitem__(self,n):
if n >= 0: return self.slice[n].value
else: return self.stack[n].value
def __setitem__(self,n,v):
self.slice[n].value = v
def __getslice__(self,i,j):
return [s.value for s in self.slice[i:j]]
def __len__(self):
return len(self.slice)
def lineno(self,n):
return getattr(self.slice[n],"lineno",0)
def set_lineno(self,n,lineno):
self.slice[n].lineno = lineno
def linespan(self,n):
startline = getattr(self.slice[n],"lineno",0)
endline = getattr(self.slice[n],"endlineno",startline)
return startline,endline
def lexpos(self,n):
return getattr(self.slice[n],"lexpos",0)
def lexspan(self,n):
startpos = getattr(self.slice[n],"lexpos",0)
endpos = getattr(self.slice[n],"endlexpos",startpos)
return startpos,endpos
def error(self):
raise SyntaxError
# -----------------------------------------------------------------------------
# == LRParser ==
#
# The LR Parsing engine.
# -----------------------------------------------------------------------------
class LRParser:
def __init__(self,lrtab,errorf):
self.productions = lrtab.lr_productions
self.action = lrtab.lr_action
self.goto = lrtab.lr_goto
self.errorfunc = errorf
def errok(self):
self.errorok = 1
def restart(self):
del self.statestack[:]
del self.symstack[:]
sym = YaccSymbol()
sym.type = '$end'
self.symstack.append(sym)
self.statestack.append(0)
def parse(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
if debug or yaccdevel:
if isinstance(debug,int):
debug = PlyLogger(sys.stderr)
return self.parsedebug(input,lexer,debug,tracking,tokenfunc)
elif tracking:
return self.parseopt(input,lexer,debug,tracking,tokenfunc)
else:
return self.parseopt_notrack(input,lexer,debug,tracking,tokenfunc)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parsedebug().
#
# This is the debugging enabled version of parse(). All changes made to the
# parsing engine should be made here. For the non-debugging version,
# copy this code to a method parseopt() and delete all of the sections
# enclosed in:
#
# #--! DEBUG
# statements
# #--! DEBUG
#
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parsedebug(self,input=None,lexer=None,debug=None,tracking=0,tokenfunc=None):
lookahead = None # Current lookahead symbol
lookaheadstack = [ ] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# --! DEBUG
debug.info("PLY: PARSE DEBUG START")
# --! DEBUG
# If no lexer was given, we will try to use the lex module
if not lexer:
lex = load_ply_lex()
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set up the state and symbol stacks
statestack = [ ] # Stack of parsing states
self.statestack = statestack
symstack = [ ] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = "$end"
symstack.append(sym)
state = 0
while 1:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
# --! DEBUG
debug.debug('')
debug.debug('State : %s', state)
# --! DEBUG
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = "$end"
# --! DEBUG
debug.debug('Stack : %s',
("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
# --! DEBUG
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
# --! DEBUG
debug.debug("Action : Shift and goto state %s", t)
# --! DEBUG
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
# --! DEBUG
if plen:
debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, "["+",".join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+"]",-t)
else:
debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, [],-t)
# --! DEBUG
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# --! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1,"endlineno",t1.lineno)
sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos)
# --! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
# --! DEBUG
debug.info("Result : %s", format_result(pslice[0]))
# --! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
# --! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
# --! TRACKING
targ = [ sym ]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
# --! DEBUG
debug.info("Result : %s", format_result(pslice[0]))
# --! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n,"value",None)
# --! DEBUG
debug.info("Done : Returning %s", format_result(result))
debug.info("PLY: PARSE DEBUG END")
# --! DEBUG
return result
if t == None:
# --! DEBUG
debug.error('Error : %s',
("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
# --! DEBUG
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type == "$end":
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
if errtoken and not hasattr(errtoken,'lexer'):
errtoken.lexer = lexer
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != "$end":
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == "$end":
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
state = statestack[-1] # Potential bug fix
continue
# Call an error function here
raise RuntimeError("yacc: internal parser error!!!\n")
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt().
#
# Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY.
# Edit the debug version above, then copy any modifications to the method
# below while removing #--! DEBUG sections.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
lookahead = None # Current lookahead symbol
lookaheadstack = [ ] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
lex = load_ply_lex()
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set up the state and symbol stacks
statestack = [ ] # Stack of parsing states
self.statestack = statestack
symstack = [ ] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while 1:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# --! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1,"endlineno",t1.lineno)
sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos)
# --! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
# --! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
# --! TRACKING
targ = [ sym ]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
return getattr(n,"value",None)
if t == None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
if errtoken and not hasattr(errtoken,'lexer'):
errtoken.lexer = lexer
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
state = statestack[-1] # Potential bug fix
continue
# Call an error function here
raise RuntimeError("yacc: internal parser error!!!\n")
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt_notrack().
#
# Optimized version of parseopt() with line number tracking removed.
# DO NOT EDIT THIS CODE DIRECTLY. Copy the optimized version and remove
# code in the #--! TRACKING sections
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt_notrack(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
lookahead = None # Current lookahead symbol
lookaheadstack = [ ] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
lex = load_ply_lex()
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set up the state and symbol stacks
statestack = [ ] # Stack of parsing states
self.statestack = statestack
symstack = [ ] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while 1:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
targ = [ sym ]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
return getattr(n,"value",None)
if t == None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
if errtoken and not hasattr(errtoken,'lexer'):
errtoken.lexer = lexer
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
state = statestack[-1] # Potential bug fix
continue
# Call an error function here
raise RuntimeError("yacc: internal parser error!!!\n")
# -----------------------------------------------------------------------------
# === Grammar Representation ===
#
# The following functions, classes, and variables are used to represent and
# manipulate the rules that make up a grammar.
# -----------------------------------------------------------------------------
import re
# regex matching identifiers
_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
# -----------------------------------------------------------------------------
# class Production:
#
# This class stores the raw information about a single production or grammar rule.
# A grammar rule refers to a specification such as this:
#
# expr : expr PLUS term
#
# Here are the basic attributes defined on all productions
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','PLUS','term']
# prec - Production precedence level
# number - Production number.
# func - Function that executes on reduce
# file - File where production function is defined
# lineno - Line number where production function is defined
#
# The following attributes are defined or optional.
#
# len - Length of the production (number of symbols on right hand side)
# usyms - Set of unique symbols found in the production
# -----------------------------------------------------------------------------
class Production(object):
reduced = 0
def __init__(self,number,name,prod,precedence=('right',0),func=None,file='',line=0):
self.name = name
self.prod = tuple(prod)
self.number = number
self.func = func
self.callable = None
self.file = file
self.line = line
self.prec = precedence
# Internal settings used during table construction
self.len = len(self.prod) # Length of the production
# Create a list of unique production symbols used in the production
self.usyms = [ ]
for s in self.prod:
if s not in self.usyms:
self.usyms.append(s)
# List of all LR items for the production
self.lr_items = []
self.lr_next = None
# Create a string representation
if self.prod:
self.str = "%s -> %s" % (self.name," ".join(self.prod))
else:
self.str = "%s -> <empty>" % self.name
def __str__(self):
return self.str
def __repr__(self):
return "Production("+str(self)+")"
def __len__(self):
return len(self.prod)
def __nonzero__(self):
return 1
def __getitem__(self,index):
return self.prod[index]
# Return the nth lr_item from the production (or None if at the end)
def lr_item(self,n):
if n > len(self.prod): return None
p = LRItem(self,n)
# Precompute the list of productions immediately following. Hack. Remove later
try:
p.lr_after = Prodnames[p.prod[n+1]]
except (IndexError,KeyError):
p.lr_after = []
try:
p.lr_before = p.prod[n-1]
except IndexError:
p.lr_before = None
return p
# Bind the production function name to a callable
def bind(self,pdict):
if self.func:
self.callable = pdict[self.func]
# This class serves as a minimal standin for Production objects when
# reading table data from files. It only contains information
# actually used by the LR parsing engine, plus some additional
# debugging information.
class MiniProduction(object):
def __init__(self,str,name,len,func,file,line):
self.name = name
self.len = len
self.func = func
self.callable = None
self.file = file
self.line = line
self.str = str
def __str__(self):
return self.str
def __repr__(self):
return "MiniProduction(%s)" % self.str
# Bind the production function name to a callable
def bind(self,pdict):
if self.func:
self.callable = pdict[self.func]
# -----------------------------------------------------------------------------
# class LRItem
#
# This class represents a specific stage of parsing a production rule. For
# example:
#
# expr : expr . PLUS term
#
# In the above, the "." represents the current location of the parse. Here
# basic attributes:
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','.', 'PLUS','term']
# number - Production number.
#
# lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term'
# then lr_next refers to 'expr -> expr PLUS . term'
# lr_index - LR item index (location of the ".") in the prod list.
# lookaheads - LALR lookahead symbols for this item
# len - Length of the production (number of symbols on right hand side)
# lr_after - List of all productions that immediately follow
# lr_before - Grammar symbol immediately before
# -----------------------------------------------------------------------------
class LRItem(object):
def __init__(self,p,n):
self.name = p.name
self.prod = list(p.prod)
self.number = p.number
self.lr_index = n
self.lookaheads = { }
self.prod.insert(n,".")
self.prod = tuple(self.prod)
self.len = len(self.prod)
self.usyms = p.usyms
def __str__(self):
if self.prod:
s = "%s -> %s" % (self.name," ".join(self.prod))
else:
s = "%s -> <empty>" % self.name
return s
def __repr__(self):
return "LRItem("+str(self)+")"
# -----------------------------------------------------------------------------
# rightmost_terminal()
#
# Return the rightmost terminal from a list of symbols. Used in add_production()
# -----------------------------------------------------------------------------
def rightmost_terminal(symbols, terminals):
i = len(symbols) - 1
while i >= 0:
if symbols[i] in terminals:
return symbols[i]
i -= 1
return None
# -----------------------------------------------------------------------------
# === GRAMMAR CLASS ===
#
# The following class represents the contents of the specified grammar along
# with various computed properties such as first sets, follow sets, LR items, etc.
# This data is used for critical parts of the table generation process later.
# -----------------------------------------------------------------------------
class GrammarError(YaccError): pass
class Grammar(object):
def __init__(self,terminals):
self.Productions = [None] # A list of all of the productions. The first
# entry is always reserved for the purpose of
# building an augmented grammar
self.Prodnames = { } # A dictionary mapping the names of nonterminals to a list of all
# productions of that nonterminal.
self.Prodmap = { } # A dictionary that is only used to detect duplicate
# productions.
self.Terminals = { } # A dictionary mapping the names of terminal symbols to a
# list of the rules where they are used.
for term in terminals:
self.Terminals[term] = []
self.Terminals['error'] = []
self.Nonterminals = { } # A dictionary mapping names of nonterminals to a list
# of rule numbers where they are used.
self.First = { } # A dictionary of precomputed FIRST(x) symbols
self.Follow = { } # A dictionary of precomputed FOLLOW(x) symbols
self.Precedence = { } # Precedence rules for each terminal. Contains tuples of the
# form ('right',level) or ('nonassoc', level) or ('left',level)
self.UsedPrecedence = { } # Precedence rules that were actually used by the grammer.
# This is only used to provide error checking and to generate
# a warning about unused precedence rules.
self.Start = None # Starting symbol for the grammar
def __len__(self):
return len(self.Productions)
def __getitem__(self,index):
return self.Productions[index]
# -----------------------------------------------------------------------------
# set_precedence()
#
# Sets the precedence for a given terminal. assoc is the associativity such as
# 'left','right', or 'nonassoc'. level is a numeric level.
#
# -----------------------------------------------------------------------------
def set_precedence(self,term,assoc,level):
assert self.Productions == [None],"Must call set_precedence() before add_production()"
if term in self.Precedence:
raise GrammarError("Precedence already specified for terminal '%s'" % term)
if assoc not in ['left','right','nonassoc']:
raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'")
self.Precedence[term] = (assoc,level)
# -----------------------------------------------------------------------------
# add_production()
#
# Given an action function, this function assembles a production rule and
# computes its precedence level.
#
# The production rule is supplied as a list of symbols. For example,
# a rule such as 'expr : expr PLUS term' has a production name of 'expr' and
# symbols ['expr','PLUS','term'].
#
# Precedence is determined by the precedence of the right-most non-terminal
# or the precedence of a terminal specified by %prec.
#
# A variety of error checks are performed to make sure production symbols
# are valid and that %prec is used correctly.
# -----------------------------------------------------------------------------
def add_production(self,prodname,syms,func=None,file='',line=0):
if prodname in self.Terminals:
raise GrammarError("%s:%d: Illegal rule name '%s'. Already defined as a token" % (file,line,prodname))
if prodname == 'error':
raise GrammarError("%s:%d: Illegal rule name '%s'. error is a reserved word" % (file,line,prodname))
if not _is_identifier.match(prodname):
raise GrammarError("%s:%d: Illegal rule name '%s'" % (file,line,prodname))
# Look for literal tokens
for n,s in enumerate(syms):
if s[0] in "'\"":
try:
c = eval(s)
if (len(c) > 1):
raise GrammarError("%s:%d: Literal token %s in rule '%s' may only be a single character" % (file,line,s, prodname))
if not c in self.Terminals:
self.Terminals[c] = []
syms[n] = c
continue
except SyntaxError:
pass
if not _is_identifier.match(s) and s != '%prec':
raise GrammarError("%s:%d: Illegal name '%s' in rule '%s'" % (file,line,s, prodname))
# Determine the precedence level
if '%prec' in syms:
if syms[-1] == '%prec':
raise GrammarError("%s:%d: Syntax error. Nothing follows %%prec" % (file,line))
if syms[-2] != '%prec':
raise GrammarError("%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule" % (file,line))
precname = syms[-1]
prodprec = self.Precedence.get(precname,None)
if not prodprec:
raise GrammarError("%s:%d: Nothing known about the precedence of '%s'" % (file,line,precname))
else:
self.UsedPrecedence[precname] = 1
del syms[-2:] # Drop %prec from the rule
else:
# If no %prec, precedence is determined by the rightmost terminal symbol
precname = rightmost_terminal(syms,self.Terminals)
prodprec = self.Precedence.get(precname,('right',0))
# See if the rule is already in the rulemap
map = "%s -> %s" % (prodname,syms)
if map in self.Prodmap:
m = self.Prodmap[map]
raise GrammarError("%s:%d: Duplicate rule %s. " % (file,line, m) +
"Previous definition at %s:%d" % (m.file, m.line))
# From this point on, everything is valid. Create a new Production instance
pnumber = len(self.Productions)
if not prodname in self.Nonterminals:
self.Nonterminals[prodname] = [ ]
# Add the production number to Terminals and Nonterminals
for t in syms:
if t in self.Terminals:
self.Terminals[t].append(pnumber)
else:
if not t in self.Nonterminals:
self.Nonterminals[t] = [ ]
self.Nonterminals[t].append(pnumber)
# Create a production and add it to the list of productions
p = Production(pnumber,prodname,syms,prodprec,func,file,line)
self.Productions.append(p)
self.Prodmap[map] = p
# Add to the global productions list
try:
self.Prodnames[prodname].append(p)
except KeyError:
self.Prodnames[prodname] = [ p ]
return 0
# -----------------------------------------------------------------------------
# set_start()
#
# Sets the starting symbol and creates the augmented grammar. Production
# rule 0 is S' -> start where start is the start symbol.
# -----------------------------------------------------------------------------
def set_start(self,start=None):
if not start:
start = self.Productions[1].name
if start not in self.Nonterminals:
raise GrammarError("start symbol %s undefined" % start)
self.Productions[0] = Production(0,"S'",[start])
self.Nonterminals[start].append(0)
self.Start = start
# -----------------------------------------------------------------------------
# find_unreachable()
#
# Find all of the nonterminal symbols that can't be reached from the starting
# symbol. Returns a list of nonterminals that can't be reached.
# -----------------------------------------------------------------------------
def find_unreachable(self):
# Mark all symbols that are reachable from a symbol s
def mark_reachable_from(s):
if reachable[s]:
# We've already reached symbol s.
return
reachable[s] = 1
for p in self.Prodnames.get(s,[]):
for r in p.prod:
mark_reachable_from(r)
reachable = { }
for s in list(self.Terminals) + list(self.Nonterminals):
reachable[s] = 0
mark_reachable_from( self.Productions[0].prod[0] )
return [s for s in list(self.Nonterminals)
if not reachable[s]]
# -----------------------------------------------------------------------------
# infinite_cycles()
#
# This function looks at the various parsing rules and tries to detect
# infinite recursion cycles (grammar rules where there is no possible way
# to derive a string of only terminals).
# -----------------------------------------------------------------------------
def infinite_cycles(self):
terminates = {}
# Terminals:
for t in self.Terminals:
terminates[t] = 1
terminates['$end'] = 1
# Nonterminals:
# Initialize to false:
for n in self.Nonterminals:
terminates[n] = 0
# Then propagate termination until no change:
while 1:
some_change = 0
for (n,pl) in self.Prodnames.items():
# Nonterminal n terminates iff any of its productions terminates.
for p in pl:
# Production p terminates iff all of its rhs symbols terminate.
for s in p.prod:
if not terminates[s]:
# The symbol s does not terminate,
# so production p does not terminate.
p_terminates = 0
break
else:
# didn't break from the loop,
# so every symbol s terminates
# so production p terminates.
p_terminates = 1
if p_terminates:
# symbol n terminates!
if not terminates[n]:
terminates[n] = 1
some_change = 1
# Don't need to consider any more productions for this n.
break
if not some_change:
break
infinite = []
for (s,term) in terminates.items():
if not term:
if not s in self.Prodnames and not s in self.Terminals and s != 'error':
# s is used-but-not-defined, and we've already warned of that,
# so it would be overkill to say that it's also non-terminating.
pass
else:
infinite.append(s)
return infinite
# -----------------------------------------------------------------------------
# undefined_symbols()
#
# Find all symbols that were used the grammar, but not defined as tokens or
# grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol
# and prod is the production where the symbol was used.
# -----------------------------------------------------------------------------
def undefined_symbols(self):
result = []
for p in self.Productions:
if not p: continue
for s in p.prod:
if not s in self.Prodnames and not s in self.Terminals and s != 'error':
result.append((s,p))
return result
# -----------------------------------------------------------------------------
# unused_terminals()
#
# Find all terminals that were defined, but not used by the grammar. Returns
# a list of all symbols.
# -----------------------------------------------------------------------------
def unused_terminals(self):
unused_tok = []
for s,v in self.Terminals.items():
if s != 'error' and not v:
unused_tok.append(s)
return unused_tok
# ------------------------------------------------------------------------------
# unused_rules()
#
# Find all grammar rules that were defined, but not used (maybe not reachable)
# Returns a list of productions.
# ------------------------------------------------------------------------------
def unused_rules(self):
unused_prod = []
for s,v in self.Nonterminals.items():
if not v:
p = self.Prodnames[s][0]
unused_prod.append(p)
return unused_prod
# -----------------------------------------------------------------------------
# unused_precedence()
#
# Returns a list of tuples (term,precedence) corresponding to precedence
# rules that were never used by the grammar. term is the name of the terminal
# on which precedence was applied and precedence is a string such as 'left' or
# 'right' corresponding to the type of precedence.
# -----------------------------------------------------------------------------
def unused_precedence(self):
unused = []
for termname in self.Precedence:
if not (termname in self.Terminals or termname in self.UsedPrecedence):
unused.append((termname,self.Precedence[termname][0]))
return unused
# -------------------------------------------------------------------------
# _first()
#
# Compute the value of FIRST1(beta) where beta is a tuple of symbols.
#
# During execution of compute_first1, the result may be incomplete.
# Afterward (e.g., when called from compute_follow()), it will be complete.
# -------------------------------------------------------------------------
def _first(self,beta):
# We are computing First(x1,x2,x3,...,xn)
result = [ ]
for x in beta:
x_produces_empty = 0
# Add all the non-<empty> symbols of First[x] to the result.
for f in self.First[x]:
if f == '<empty>':
x_produces_empty = 1
else:
if f not in result: result.append(f)
if x_produces_empty:
# We have to consider the next x in beta,
# i.e. stay in the loop.
pass
else:
# We don't have to consider any further symbols in beta.
break
else:
# There was no 'break' from the loop,
# so x_produces_empty was true for all x in beta,
# so beta produces empty as well.
result.append('<empty>')
return result
# -------------------------------------------------------------------------
# compute_first()
#
# Compute the value of FIRST1(X) for all symbols
# -------------------------------------------------------------------------
def compute_first(self):
if self.First:
return self.First
# Terminals:
for t in self.Terminals:
self.First[t] = [t]
self.First['$end'] = ['$end']
# Nonterminals:
# Initialize to the empty set:
for n in self.Nonterminals:
self.First[n] = []
# Then propagate symbols until no change:
while 1:
some_change = 0
for n in self.Nonterminals:
for p in self.Prodnames[n]:
for f in self._first(p.prod):
if f not in self.First[n]:
self.First[n].append( f )
some_change = 1
if not some_change:
break
return self.First
# ---------------------------------------------------------------------
# compute_follow()
#
# Computes all of the follow sets for every non-terminal symbol. The
# follow set is the set of all symbols that might follow a given
# non-terminal. See the Dragon book, 2nd Ed. p. 189.
# ---------------------------------------------------------------------
def compute_follow(self,start=None):
# If already computed, return the result
if self.Follow:
return self.Follow
# If first sets not computed yet, do that first.
if not self.First:
self.compute_first()
# Add '$end' to the follow list of the start symbol
for k in self.Nonterminals:
self.Follow[k] = [ ]
if not start:
start = self.Productions[1].name
self.Follow[start] = [ '$end' ]
while 1:
didadd = 0
for p in self.Productions[1:]:
# Here is the production set
for i in range(len(p.prod)):
B = p.prod[i]
if B in self.Nonterminals:
# Okay. We got a non-terminal in a production
fst = self._first(p.prod[i+1:])
hasempty = 0
for f in fst:
if f != '<empty>' and f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = 1
if f == '<empty>':
hasempty = 1
if hasempty or i == (len(p.prod)-1):
# Add elements of follow(a) to follow(b)
for f in self.Follow[p.name]:
if f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = 1
if not didadd: break
return self.Follow
# -----------------------------------------------------------------------------
# build_lritems()
#
# This function walks the list of productions and builds a complete set of the
# LR items. The LR items are stored in two ways: First, they are uniquely
# numbered and placed in the list _lritems. Second, a linked list of LR items
# is built for each production. For example:
#
# E -> E PLUS E
#
# Creates the list
#
# [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
# -----------------------------------------------------------------------------
def build_lritems(self):
for p in self.Productions:
lastlri = p
i = 0
lr_items = []
while 1:
if i > len(p):
lri = None
else:
lri = LRItem(p,i)
# Precompute the list of productions immediately following
try:
lri.lr_after = self.Prodnames[lri.prod[i+1]]
except (IndexError,KeyError):
lri.lr_after = []
try:
lri.lr_before = lri.prod[i-1]
except IndexError:
lri.lr_before = None
lastlri.lr_next = lri
if not lri: break
lr_items.append(lri)
lastlri = lri
i += 1
p.lr_items = lr_items
# -----------------------------------------------------------------------------
# == Class LRTable ==
#
# This basic class represents a basic table of LR parsing information.
# Methods for generating the tables are not defined here. They are defined
# in the derived class LRGeneratedTable.
# -----------------------------------------------------------------------------
class VersionError(YaccError): pass
class LRTable(object):
def __init__(self):
self.lr_action = None
self.lr_goto = None
self.lr_productions = None
self.lr_method = None
def read_table(self,module):
if isinstance(module,types.ModuleType):
parsetab = module
else:
if sys.version_info[0] < 3:
exec("import %s as parsetab" % module)
else:
env = { }
exec("import %s as parsetab" % module, env, env)
parsetab = env['parsetab']
if parsetab._tabversion != __tabversion__:
raise VersionError("yacc table file version is out of date")
self.lr_action = parsetab._lr_action
self.lr_goto = parsetab._lr_goto
self.lr_productions = []
for p in parsetab._lr_productions:
self.lr_productions.append(MiniProduction(*p))
self.lr_method = parsetab._lr_method
return parsetab._lr_signature
def read_pickle(self,filename):
try:
import cPickle as pickle
except ImportError:
import pickle
in_f = open(filename,"rb")
tabversion = pickle.load(in_f)
if tabversion != __tabversion__:
raise VersionError("yacc table file version is out of date")
self.lr_method = pickle.load(in_f)
signature = pickle.load(in_f)
self.lr_action = pickle.load(in_f)
self.lr_goto = pickle.load(in_f)
productions = pickle.load(in_f)
self.lr_productions = []
for p in productions:
self.lr_productions.append(MiniProduction(*p))
in_f.close()
return signature
# Bind all production function names to callable objects in pdict
def bind_callables(self,pdict):
for p in self.lr_productions:
p.bind(pdict)
# -----------------------------------------------------------------------------
# === LR Generator ===
#
# The following classes and functions are used to generate LR parsing tables on
# a grammar.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# digraph()
# traverse()
#
# The following two functions are used to compute set valued functions
# of the form:
#
# F(x) = F'(x) U U{F(y) | x R y}
#
# This is used to compute the values of Read() sets as well as FOLLOW sets
# in LALR(1) generation.
#
# Inputs: X - An input set
# R - A relation
# FP - Set-valued function
# ------------------------------------------------------------------------------
def digraph(X,R,FP):
N = { }
for x in X:
N[x] = 0
stack = []
F = { }
for x in X:
if N[x] == 0: traverse(x,N,stack,F,X,R,FP)
return F
def traverse(x,N,stack,F,X,R,FP):
stack.append(x)
d = len(stack)
N[x] = d
F[x] = FP(x) # F(X) <- F'(x)
rel = R(x) # Get y's related to x
for y in rel:
if N[y] == 0:
traverse(y,N,stack,F,X,R,FP)
N[x] = min(N[x],N[y])
for a in F.get(y,[]):
if a not in F[x]: F[x].append(a)
if N[x] == d:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
while element != x:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
class LALRError(YaccError): pass
# -----------------------------------------------------------------------------
# == LRGeneratedTable ==
#
# This class implements the LR table generation algorithm. There are no
# public methods except for write()
# -----------------------------------------------------------------------------
class LRGeneratedTable(LRTable):
def __init__(self,grammar,method='LALR',log=None):
if method not in ['SLR','LALR']:
raise LALRError("Unsupported method %s" % method)
self.grammar = grammar
self.lr_method = method
# Set up the logger
if not log:
log = NullLogger()
self.log = log
# Internal attributes
self.lr_action = {} # Action table
self.lr_goto = {} # Goto table
self.lr_productions = grammar.Productions # Copy of grammar Production array
self.lr_goto_cache = {} # Cache of computed gotos
self.lr0_cidhash = {} # Cache of closures
self._add_count = 0 # Internal counter used to detect cycles
# Diagonistic information filled in by the table generator
self.sr_conflict = 0
self.rr_conflict = 0
self.conflicts = [] # List of conflicts
self.sr_conflicts = []
self.rr_conflicts = []
# Build the tables
self.grammar.build_lritems()
self.grammar.compute_first()
self.grammar.compute_follow()
self.lr_parse_table()
# Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
def lr0_closure(self,I):
self._add_count += 1
# Add everything in I to J
J = I[:]
didadd = 1
while didadd:
didadd = 0
for j in J:
for x in j.lr_after:
if getattr(x,"lr0_added",0) == self._add_count: continue
# Add B --> .G to J
J.append(x.lr_next)
x.lr0_added = self._add_count
didadd = 1
return J
# Compute the LR(0) goto function goto(I,X) where I is a set
# of LR(0) items and X is a grammar symbol. This function is written
# in a way that guarantees uniqueness of the generated goto sets
# (i.e. the same goto set will never be returned as two different Python
# objects). With uniqueness, we can later do fast set comparisons using
# id(obj) instead of element-wise comparison.
def lr0_goto(self,I,x):
# First we look for a previously cached entry
g = self.lr_goto_cache.get((id(I),x),None)
if g: return g
# Now we generate the goto set in a way that guarantees uniqueness
# of the result
s = self.lr_goto_cache.get(x,None)
if not s:
s = { }
self.lr_goto_cache[x] = s
gs = [ ]
for p in I:
n = p.lr_next
if n and n.lr_before == x:
s1 = s.get(id(n),None)
if not s1:
s1 = { }
s[id(n)] = s1
gs.append(n)
s = s1
g = s.get('$end',None)
if not g:
if gs:
g = self.lr0_closure(gs)
s['$end'] = g
else:
s['$end'] = gs
self.lr_goto_cache[(id(I),x)] = g
return g
# Compute the LR(0) sets of item function
def lr0_items(self):
C = [ self.lr0_closure([self.grammar.Productions[0].lr_next]) ]
i = 0
for I in C:
self.lr0_cidhash[id(I)] = i
i += 1
# Loop over the items in C and each grammar symbols
i = 0
while i < len(C):
I = C[i]
i += 1
# Collect all of the symbols that could possibly be in the goto(I,X) sets
asyms = { }
for ii in I:
for s in ii.usyms:
asyms[s] = None
for x in asyms:
g = self.lr0_goto(I,x)
if not g: continue
if id(g) in self.lr0_cidhash: continue
self.lr0_cidhash[id(g)] = len(C)
C.append(g)
return C
# -----------------------------------------------------------------------------
# ==== LALR(1) Parsing ====
#
# LALR(1) parsing is almost exactly the same as SLR except that instead of
# relying upon Follow() sets when performing reductions, a more selective
# lookahead set that incorporates the state of the LR(0) machine is utilized.
# Thus, we mainly just have to focus on calculating the lookahead sets.
#
# The method used here is due to DeRemer and Pennelo (1982).
#
# DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
# Lookahead Sets", ACM Transactions on Programming Languages and Systems,
# Vol. 4, No. 4, Oct. 1982, pp. 615-649
#
# Further details can also be found in:
#
# J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
# McGraw-Hill Book Company, (1985).
#
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# compute_nullable_nonterminals()
#
# Creates a dictionary containing all of the non-terminals that might produce
# an empty production.
# -----------------------------------------------------------------------------
def compute_nullable_nonterminals(self):
nullable = {}
num_nullable = 0
while 1:
for p in self.grammar.Productions[1:]:
if p.len == 0:
nullable[p.name] = 1
continue
for t in p.prod:
if not t in nullable: break
else:
nullable[p.name] = 1
if len(nullable) == num_nullable: break
num_nullable = len(nullable)
return nullable
# -----------------------------------------------------------------------------
# find_nonterminal_trans(C)
#
# Given a set of LR(0) items, this functions finds all of the non-terminal
# transitions. These are transitions in which a dot appears immediately before
# a non-terminal. Returns a list of tuples of the form (state,N) where state
# is the state number and N is the nonterminal symbol.
#
# The input C is the set of LR(0) items.
# -----------------------------------------------------------------------------
def find_nonterminal_transitions(self,C):
trans = []
for state in range(len(C)):
for p in C[state]:
if p.lr_index < p.len - 1:
t = (state,p.prod[p.lr_index+1])
if t[1] in self.grammar.Nonterminals:
if t not in trans: trans.append(t)
state = state + 1
return trans
# -----------------------------------------------------------------------------
# dr_relation()
#
# Computes the DR(p,A) relationships for non-terminal transitions. The input
# is a tuple (state,N) where state is a number and N is a nonterminal symbol.
#
# Returns a list of terminals.
# -----------------------------------------------------------------------------
def dr_relation(self,C,trans,nullable):
dr_set = { }
state,N = trans
terms = []
g = self.lr0_goto(C[state],N)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index+1]
if a in self.grammar.Terminals:
if a not in terms: terms.append(a)
# This extra bit is to handle the start state
if state == 0 and N == self.grammar.Productions[0].prod[0]:
terms.append('$end')
return terms
# -----------------------------------------------------------------------------
# reads_relation()
#
# Computes the READS() relation (p,A) READS (t,C).
# -----------------------------------------------------------------------------
def reads_relation(self,C, trans, empty):
# Look for empty transitions
rel = []
state, N = trans
g = self.lr0_goto(C[state],N)
j = self.lr0_cidhash.get(id(g),-1)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index + 1]
if a in empty:
rel.append((j,a))
return rel
# -----------------------------------------------------------------------------
# compute_lookback_includes()
#
# Determines the lookback and includes relations
#
# LOOKBACK:
#
# This relation is determined by running the LR(0) state machine forward.
# For example, starting with a production "N : . A B C", we run it forward
# to obtain "N : A B C ." We then build a relationship between this final
# state and the starting state. These relationships are stored in a dictionary
# lookdict.
#
# INCLUDES:
#
# Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
#
# This relation is used to determine non-terminal transitions that occur
# inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
# if the following holds:
#
# B -> LAT, where T -> epsilon and p' -L-> p
#
# L is essentially a prefix (which may be empty), T is a suffix that must be
# able to derive an empty string. State p' must lead to state p with the string L.
#
# -----------------------------------------------------------------------------
def compute_lookback_includes(self,C,trans,nullable):
lookdict = {} # Dictionary of lookback relations
includedict = {} # Dictionary of include relations
# Make a dictionary of non-terminal transitions
dtrans = {}
for t in trans:
dtrans[t] = 1
# Loop over all transitions and compute lookbacks and includes
for state,N in trans:
lookb = []
includes = []
for p in C[state]:
if p.name != N: continue
# Okay, we have a name match. We now follow the production all the way
# through the state machine until we get the . on the right hand side
lr_index = p.lr_index
j = state
while lr_index < p.len - 1:
lr_index = lr_index + 1
t = p.prod[lr_index]
# Check to see if this symbol and state are a non-terminal transition
if (j,t) in dtrans:
# Yes. Okay, there is some chance that this is an includes relation
# the only way to know for certain is whether the rest of the
# production derives empty
li = lr_index + 1
while li < p.len:
if p.prod[li] in self.grammar.Terminals: break # No forget it
if not p.prod[li] in nullable: break
li = li + 1
else:
# Appears to be a relation between (j,t) and (state,N)
includes.append((j,t))
g = self.lr0_goto(C[j],t) # Go to next set
j = self.lr0_cidhash.get(id(g),-1) # Go to next state
# When we get here, j is the final state, now we have to locate the production
for r in C[j]:
if r.name != p.name: continue
if r.len != p.len: continue
i = 0
# This look is comparing a production ". A B C" with "A B C ."
while i < r.lr_index:
if r.prod[i] != p.prod[i+1]: break
i = i + 1
else:
lookb.append((j,r))
for i in includes:
if not i in includedict: includedict[i] = []
includedict[i].append((state,N))
lookdict[(state,N)] = lookb
return lookdict,includedict
# -----------------------------------------------------------------------------
# compute_read_sets()
#
# Given a set of LR(0) items, this function computes the read sets.
#
# Inputs: C = Set of LR(0) items
# ntrans = Set of nonterminal transitions
# nullable = Set of empty transitions
#
# Returns a set containing the read sets
# -----------------------------------------------------------------------------
def compute_read_sets(self,C, ntrans, nullable):
FP = lambda x: self.dr_relation(C,x,nullable)
R = lambda x: self.reads_relation(C,x,nullable)
F = digraph(ntrans,R,FP)
return F
# -----------------------------------------------------------------------------
# compute_follow_sets()
#
# Given a set of LR(0) items, a set of non-terminal transitions, a readset,
# and an include set, this function computes the follow sets
#
# Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
#
# Inputs:
# ntrans = Set of nonterminal transitions
# readsets = Readset (previously computed)
# inclsets = Include sets (previously computed)
#
# Returns a set containing the follow sets
# -----------------------------------------------------------------------------
def compute_follow_sets(self,ntrans,readsets,inclsets):
FP = lambda x: readsets[x]
R = lambda x: inclsets.get(x,[])
F = digraph(ntrans,R,FP)
return F
# -----------------------------------------------------------------------------
# add_lookaheads()
#
# Attaches the lookahead symbols to grammar rules.
#
# Inputs: lookbacks - Set of lookback relations
# followset - Computed follow set
#
# This function directly attaches the lookaheads to productions contained
# in the lookbacks set
# -----------------------------------------------------------------------------
def add_lookaheads(self,lookbacks,followset):
for trans,lb in lookbacks.items():
# Loop over productions in lookback
for state,p in lb:
if not state in p.lookaheads:
p.lookaheads[state] = []
f = followset.get(trans,[])
for a in f:
if a not in p.lookaheads[state]: p.lookaheads[state].append(a)
# -----------------------------------------------------------------------------
# add_lalr_lookaheads()
#
# This function does all of the work of adding lookahead information for use
# with LALR parsing
# -----------------------------------------------------------------------------
def add_lalr_lookaheads(self,C):
# Determine all of the nullable nonterminals
nullable = self.compute_nullable_nonterminals()
# Find all non-terminal transitions
trans = self.find_nonterminal_transitions(C)
# Compute read sets
readsets = self.compute_read_sets(C,trans,nullable)
# Compute lookback/includes relations
lookd, included = self.compute_lookback_includes(C,trans,nullable)
# Compute LALR FOLLOW sets
followsets = self.compute_follow_sets(trans,readsets,included)
# Add all of the lookaheads
self.add_lookaheads(lookd,followsets)
# -----------------------------------------------------------------------------
# lr_parse_table()
#
# This function constructs the parse tables for SLR or LALR
# -----------------------------------------------------------------------------
def lr_parse_table(self):
Productions = self.grammar.Productions
Precedence = self.grammar.Precedence
goto = self.lr_goto # Goto array
action = self.lr_action # Action array
log = self.log # Logger for output
actionp = { } # Action production array (temporary)
log.info("Parsing method: %s", self.lr_method)
# Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
# This determines the number of states
C = self.lr0_items()
if self.lr_method == 'LALR':
self.add_lalr_lookaheads(C)
# Build the parser table, state by state
st = 0
for I in C:
# Loop over each production in I
actlist = [ ] # List of actions
st_action = { }
st_actionp = { }
st_goto = { }
log.info("")
log.info("state %d", st)
log.info("")
for p in I:
log.info(" (%d) %s", p.number, str(p))
log.info("")
for p in I:
if p.len == p.lr_index + 1:
if p.name == "S'":
# Start symbol. Accept!
st_action["$end"] = 0
st_actionp["$end"] = p
else:
# We are at the end of a production. Reduce!
if self.lr_method == 'LALR':
laheads = p.lookaheads[st]
else:
laheads = self.grammar.Follow[p.name]
for a in laheads:
actlist.append((a,p,"reduce using rule %d (%s)" % (p.number,p)))
r = st_action.get(a,None)
if r is not None:
# Whoa. Have a shift/reduce or reduce/reduce conflict
if r > 0:
# Need to decide on shift or reduce here
# By default we favor shifting. Need to add
# some precedence rules here.
sprec,slevel = Productions[st_actionp[a].number].prec
rprec,rlevel = Precedence.get(a,('right',0))
if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
# We really need to reduce here.
st_action[a] = -p.number
st_actionp[a] = p
if not slevel and not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as reduce",a)
self.sr_conflicts.append((st,a,'reduce'))
Productions[p.number].reduced += 1
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the shift
if not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as shift",a)
self.sr_conflicts.append((st,a,'shift'))
elif r < 0:
# Reduce/reduce conflict. In this case, we favor the rule
# that was defined first in the grammar file
oldp = Productions[-r]
pp = Productions[p.number]
if oldp.line > pp.line:
st_action[a] = -p.number
st_actionp[a] = p
chosenp,rejectp = pp,oldp
Productions[p.number].reduced += 1
Productions[oldp.number].reduced -= 1
else:
chosenp,rejectp = oldp,pp
self.rr_conflicts.append((st,chosenp,rejectp))
log.info(" ! reduce/reduce conflict for %s resolved using rule %d (%s)", a,st_actionp[a].number, st_actionp[a])
else:
raise LALRError("Unknown conflict in state %d" % st)
else:
st_action[a] = -p.number
st_actionp[a] = p
Productions[p.number].reduced += 1
else:
i = p.lr_index
a = p.prod[i+1] # Get symbol right after the "."
if a in self.grammar.Terminals:
g = self.lr0_goto(I,a)
j = self.lr0_cidhash.get(id(g),-1)
if j >= 0:
# We are in a shift state
actlist.append((a,p,"shift and go to state %d" % j))
r = st_action.get(a,None)
if r is not None:
# Whoa have a shift/reduce or shift/shift conflict
if r > 0:
if r != j:
raise LALRError("Shift/shift conflict in state %d" % st)
elif r < 0:
# Do a precedence check.
# - if precedence of reduce rule is higher, we reduce.
# - if precedence of reduce is same and left assoc, we reduce.
# - otherwise we shift
rprec,rlevel = Productions[st_actionp[a].number].prec
sprec,slevel = Precedence.get(a,('right',0))
if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')):
# We decide to shift here... highest precedence to shift
Productions[st_actionp[a].number].reduced -= 1
st_action[a] = j
st_actionp[a] = p
if not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as shift",a)
self.sr_conflicts.append((st,a,'shift'))
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the reduce
if not slevel and not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as reduce",a)
self.sr_conflicts.append((st,a,'reduce'))
else:
raise LALRError("Unknown conflict in state %d" % st)
else:
st_action[a] = j
st_actionp[a] = p
# Print the actions associated with each terminal
_actprint = { }
for a,p,m in actlist:
if a in st_action:
if p is st_actionp[a]:
log.info(" %-15s %s",a,m)
_actprint[(a,m)] = 1
log.info("")
# Print the actions that were not used. (debugging)
not_used = 0
for a,p,m in actlist:
if a in st_action:
if p is not st_actionp[a]:
if not (a,m) in _actprint:
log.debug(" ! %-15s [ %s ]",a,m)
not_used = 1
_actprint[(a,m)] = 1
if not_used:
log.debug("")
# Construct the goto table for this state
nkeys = { }
for ii in I:
for s in ii.usyms:
if s in self.grammar.Nonterminals:
nkeys[s] = None
for n in nkeys:
g = self.lr0_goto(I,n)
j = self.lr0_cidhash.get(id(g),-1)
if j >= 0:
st_goto[n] = j
log.info(" %-30s shift and go to state %d",n,j)
action[st] = st_action
actionp[st] = st_actionp
goto[st] = st_goto
st += 1
# -----------------------------------------------------------------------------
# write()
#
# This function writes the LR parsing tables to a file
# -----------------------------------------------------------------------------
def write_table(self,modulename,outputdir='',signature=""):
basemodulename = modulename.split(".")[-1]
filename = os.path.join(outputdir,basemodulename) + ".py"
try:
f = open(filename,"w")
f.write("""
# %s
# This file is automatically generated. Do not edit.
_tabversion = %r
_lr_method = %r
_lr_signature = %r
""" % (filename, __tabversion__, self.lr_method, signature))
# Change smaller to 0 to go back to original tables
smaller = 1
# Factor out names to try and make smaller
if smaller:
items = { }
for s,nd in self.lr_action.items():
for name,v in nd.items():
i = items.get(name)
if not i:
i = ([],[])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write("\n_lr_action_items = {")
for k,v in items.items():
f.write("%r:([" % k)
for i in v[0]:
f.write("%r," % i)
f.write("],[")
for i in v[1]:
f.write("%r," % i)
f.write("]),")
f.write("}\n")
f.write("""
_lr_action = { }
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = { }
_lr_action[_x][_k] = _y
del _lr_action_items
""")
else:
f.write("\n_lr_action = { ");
for k,v in self.lr_action.items():
f.write("(%r,%r):%r," % (k[0],k[1],v))
f.write("}\n");
if smaller:
# Factor out names to try and make smaller
items = { }
for s,nd in self.lr_goto.items():
for name,v in nd.items():
i = items.get(name)
if not i:
i = ([],[])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write("\n_lr_goto_items = {")
for k,v in items.items():
f.write("%r:([" % k)
for i in v[0]:
f.write("%r," % i)
f.write("],[")
for i in v[1]:
f.write("%r," % i)
f.write("]),")
f.write("}\n")
f.write("""
_lr_goto = { }
for _k, _v in _lr_goto_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_goto: _lr_goto[_x] = { }
_lr_goto[_x][_k] = _y
del _lr_goto_items
""")
else:
f.write("\n_lr_goto = { ");
for k,v in self.lr_goto.items():
f.write("(%r,%r):%r," % (k[0],k[1],v))
f.write("}\n");
# Write production table
f.write("_lr_productions = [\n")
for p in self.lr_productions:
if p.func:
f.write(" (%r,%r,%d,%r,%r,%d),\n" % (p.str,p.name, p.len, p.func,p.file,p.line))
else:
f.write(" (%r,%r,%d,None,None,None),\n" % (str(p),p.name, p.len))
f.write("]\n")
f.close()
except IOError:
e = sys.exc_info()[1]
sys.stderr.write("Unable to create '%s'\n" % filename)
sys.stderr.write(str(e)+"\n")
return
# -----------------------------------------------------------------------------
# pickle_table()
#
# This function pickles the LR parsing tables to a supplied file object
# -----------------------------------------------------------------------------
def pickle_table(self,filename,signature=""):
try:
import cPickle as pickle
except ImportError:
import pickle
outf = open(filename,"wb")
pickle.dump(__tabversion__,outf,pickle_protocol)
pickle.dump(self.lr_method,outf,pickle_protocol)
pickle.dump(signature,outf,pickle_protocol)
pickle.dump(self.lr_action,outf,pickle_protocol)
pickle.dump(self.lr_goto,outf,pickle_protocol)
outp = []
for p in self.lr_productions:
if p.func:
outp.append((p.str,p.name, p.len, p.func,p.file,p.line))
else:
outp.append((str(p),p.name,p.len,None,None,None))
pickle.dump(outp,outf,pickle_protocol)
outf.close()
# -----------------------------------------------------------------------------
# === INTROSPECTION ===
#
# The following functions and classes are used to implement the PLY
# introspection features followed by the yacc() function itself.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
try:
raise RuntimeError
except RuntimeError:
e,b,t = sys.exc_info()
f = t.tb_frame
while levels > 0:
f = f.f_back
levels -= 1
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# parse_grammar()
#
# This takes a raw grammar rule string and parses it into production data
# -----------------------------------------------------------------------------
def parse_grammar(doc,file,line):
grammar = []
# Split the doc string into lines
pstrings = doc.splitlines()
lastp = None
dline = line
for ps in pstrings:
dline += 1
p = ps.split()
if not p: continue
try:
if p[0] == '|':
# This is a continuation of a previous rule
if not lastp:
raise SyntaxError("%s:%d: Misplaced '|'" % (file,dline))
prodname = lastp
syms = p[1:]
else:
prodname = p[0]
lastp = prodname
syms = p[2:]
assign = p[1]
if assign != ':' and assign != '::=':
raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file,dline))
grammar.append((file,dline,prodname,syms))
except SyntaxError:
raise
except Exception:
raise SyntaxError("%s:%d: Syntax error in rule '%s'" % (file,dline,ps.strip()))
return grammar
# -----------------------------------------------------------------------------
# ParserReflect()
#
# This class represents information extracted for building a parser including
# start symbol, error function, tokens, precedence list, action functions,
# etc.
# -----------------------------------------------------------------------------
class ParserReflect(object):
def __init__(self,pdict,log=None):
self.pdict = pdict
self.start = None
self.error_func = None
self.tokens = None
self.files = {}
self.grammar = []
self.error = 0
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_start()
self.get_error_func()
self.get_tokens()
self.get_precedence()
self.get_pfunctions()
# Validate all of the information
def validate_all(self):
self.validate_start()
self.validate_error_func()
self.validate_tokens()
self.validate_precedence()
self.validate_pfunctions()
self.validate_files()
return self.error
# Compute a signature over the grammar
def signature(self):
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
sig = md5()
if self.start:
sig.update(self.start.encode('latin-1'))
if self.prec:
sig.update("".join(["".join(p) for p in self.prec]).encode('latin-1'))
if self.tokens:
sig.update(" ".join(self.tokens).encode('latin-1'))
for f in self.pfuncs:
if f[3]:
sig.update(f[3].encode('latin-1'))
except (TypeError,ValueError):
pass
return sig.digest()
# -----------------------------------------------------------------------------
# validate_file()
#
# This method checks to see if there are duplicated p_rulename() functions
# in the parser module file. Without this function, it is really easy for
# users to make mistakes by cutting and pasting code fragments (and it's a real
# bugger to try and figure out why the resulting parser doesn't work). Therefore,
# we just do a little regular expression pattern matching of def statements
# to try and detect duplicates.
# -----------------------------------------------------------------------------
def validate_files(self):
# Match def p_funcname(
fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
for filename in self.files.keys():
base,ext = os.path.splitext(filename)
if ext != '.py': return 1 # No idea. Assume it's okay.
try:
f = open(filename)
lines = f.readlines()
f.close()
except IOError:
continue
counthash = { }
for linen,l in enumerate(lines):
linen += 1
m = fre.match(l)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
self.log.warning("%s:%d: Function %s redefined. Previously defined on line %d", filename,linen,name,prev)
# Get the start symbol
def get_start(self):
self.start = self.pdict.get('start')
# Validate the start symbol
def validate_start(self):
if self.start is not None:
if not isinstance(self.start,str):
self.log.error("'start' must be a string")
# Look for error handler
def get_error_func(self):
self.error_func = self.pdict.get('p_error')
# Validate the error function
def validate_error_func(self):
if self.error_func:
if isinstance(self.error_func,types.FunctionType):
ismethod = 0
elif isinstance(self.error_func, types.MethodType):
ismethod = 1
else:
self.log.error("'p_error' defined, but is not a function or method")
self.error = 1
return
eline = func_code(self.error_func).co_firstlineno
efile = func_code(self.error_func).co_filename
self.files[efile] = 1
if (func_code(self.error_func).co_argcount != 1+ismethod):
self.log.error("%s:%d: p_error() requires 1 argument",efile,eline)
self.error = 1
# Get the tokens map
def get_tokens(self):
tokens = self.pdict.get("tokens",None)
if not tokens:
self.log.error("No token list is defined")
self.error = 1
return
if not isinstance(tokens,(list, tuple)):
self.log.error("tokens must be a list or tuple")
self.error = 1
return
if not tokens:
self.log.error("tokens is empty")
self.error = 1
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
# Validate the tokens.
if 'error' in self.tokens:
self.log.error("Illegal token name 'error'. Is a reserved word")
self.error = 1
return
terminals = {}
for n in self.tokens:
if n in terminals:
self.log.warning("Token '%s' multiply defined", n)
terminals[n] = 1
# Get the precedence map (if any)
def get_precedence(self):
self.prec = self.pdict.get("precedence",None)
# Validate and parse the precedence map
def validate_precedence(self):
preclist = []
if self.prec:
if not isinstance(self.prec,(list,tuple)):
self.log.error("precedence must be a list or tuple")
self.error = 1
return
for level,p in enumerate(self.prec):
if not isinstance(p,(list,tuple)):
self.log.error("Bad precedence table")
self.error = 1
return
if len(p) < 2:
self.log.error("Malformed precedence entry %s. Must be (assoc, term, ..., term)",p)
self.error = 1
return
assoc = p[0]
if not isinstance(assoc,str):
self.log.error("precedence associativity must be a string")
self.error = 1
return
for term in p[1:]:
if not isinstance(term,str):
self.log.error("precedence items must be strings")
self.error = 1
return
preclist.append((term,assoc,level+1))
self.preclist = preclist
# Get all p_functions from the grammar
def get_pfunctions(self):
p_functions = []
for name, item in self.pdict.items():
if name[:2] != 'p_': continue
if name == 'p_error': continue
if isinstance(item,(types.FunctionType,types.MethodType)):
line = func_code(item).co_firstlineno
file = func_code(item).co_filename
p_functions.append((line,file,name,item.__doc__))
# Sort all of the actions by line number
p_functions.sort()
self.pfuncs = p_functions
# Validate all of the p_functions
def validate_pfunctions(self):
grammar = []
# Check for non-empty symbols
if len(self.pfuncs) == 0:
self.log.error("no rules of the form p_rulename are defined")
self.error = 1
return
for line, file, name, doc in self.pfuncs:
func = self.pdict[name]
if isinstance(func, types.MethodType):
reqargs = 2
else:
reqargs = 1
if func_code(func).co_argcount > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,func.__name__)
self.error = 1
elif func_code(func).co_argcount < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument",file,line,func.__name__)
self.error = 1
elif not func.__doc__:
self.log.warning("%s:%d: No documentation string specified in function '%s' (ignored)",file,line,func.__name__)
else:
try:
parsed_g = parse_grammar(doc,file,line)
for g in parsed_g:
grammar.append((name, g))
except SyntaxError:
e = sys.exc_info()[1]
self.log.error(str(e))
self.error = 1
# Looks like a valid grammar rule
# Mark the file in which defined.
self.files[file] = 1
# Secondary validation step that looks for p_ definitions that are not functions
# or functions that look like they might be grammar rules.
for n,v in self.pdict.items():
if n[0:2] == 'p_' and isinstance(v, (types.FunctionType, types.MethodType)): continue
if n[0:2] == 't_': continue
if n[0:2] == 'p_' and n != 'p_error':
self.log.warning("'%s' not defined as a function", n)
if ((isinstance(v,types.FunctionType) and func_code(v).co_argcount == 1) or
(isinstance(v,types.MethodType) and func_code(v).co_argcount == 2)):
try:
doc = v.__doc__.split(" ")
if doc[1] == ':':
self.log.warning("%s:%d: Possible grammar rule '%s' defined without p_ prefix",
func_code(v).co_filename, func_code(v).co_firstlineno,n)
except Exception:
pass
self.grammar = grammar
# -----------------------------------------------------------------------------
# yacc(module)
#
# Build a parser
# -----------------------------------------------------------------------------
def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None,
check_recursion=1, optimize=0, write_tables=1, debugfile=debug_file,outputdir='',
debuglog=None, errorlog = None, picklefile=None):
global parse # Reference to the parsing method of the last built parser
# If pickling is enabled, table files are not created
if picklefile:
write_tables = 0
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
# Get the module dictionary used for the parser
if module:
_items = [(k,getattr(module,k)) for k in dir(module)]
pdict = dict(_items)
else:
pdict = get_caller_module_dict(2)
# Collect parser information from the dictionary
pinfo = ParserReflect(pdict,log=errorlog)
pinfo.get_all()
if pinfo.error:
raise YaccError("Unable to build parser")
# Check signature against table files (if any)
signature = pinfo.signature()
# Read the tables
try:
lr = LRTable()
if picklefile:
read_signature = lr.read_pickle(picklefile)
else:
read_signature = lr.read_table(tabmodule)
if optimize or (read_signature == signature):
try:
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr,pinfo.error_func)
parse = parser.parse
return parser
except Exception:
e = sys.exc_info()[1]
errorlog.warning("There was a problem loading the table file: %s", repr(e))
except VersionError:
e = sys.exc_info()
errorlog.warning(str(e))
except Exception:
pass
if debuglog is None:
if debug:
debuglog = PlyLogger(open(debugfile,"w"))
else:
debuglog = NullLogger()
debuglog.info("Created by PLY version %s (http://www.dabeaz.com/ply)", __version__)
errors = 0
# Validate the parser information
if pinfo.validate_all():
raise YaccError("Unable to build parser")
if not pinfo.error_func:
errorlog.warning("no p_error() function is defined")
# Create a grammar object
grammar = Grammar(pinfo.tokens)
# Set precedence level for terminals
for term, assoc, level in pinfo.preclist:
try:
grammar.set_precedence(term,assoc,level)
except GrammarError:
e = sys.exc_info()[1]
errorlog.warning("%s",str(e))
# Add productions to the grammar
for funcname, gram in pinfo.grammar:
file, line, prodname, syms = gram
try:
grammar.add_production(prodname,syms,funcname,file,line)
except GrammarError:
e = sys.exc_info()[1]
errorlog.error("%s",str(e))
errors = 1
# Set the grammar start symbols
try:
if start is None:
grammar.set_start(pinfo.start)
else:
grammar.set_start(start)
except GrammarError:
e = sys.exc_info()[1]
errorlog.error(str(e))
errors = 1
if errors:
raise YaccError("Unable to build parser")
# Verify the grammar structure
undefined_symbols = grammar.undefined_symbols()
for sym, prod in undefined_symbols:
errorlog.error("%s:%d: Symbol '%s' used, but not defined as a token or a rule",prod.file,prod.line,sym)
errors = 1
unused_terminals = grammar.unused_terminals()
if unused_terminals:
debuglog.info("")
debuglog.info("Unused terminals:")
debuglog.info("")
for term in unused_terminals:
errorlog.warning("Token '%s' defined, but not used", term)
debuglog.info(" %s", term)
# Print out all productions to the debug log
if debug:
debuglog.info("")
debuglog.info("Grammar")
debuglog.info("")
for n,p in enumerate(grammar.Productions):
debuglog.info("Rule %-5d %s", n, p)
# Find unused non-terminals
unused_rules = grammar.unused_rules()
for prod in unused_rules:
errorlog.warning("%s:%d: Rule '%s' defined, but not used", prod.file, prod.line, prod.name)
if len(unused_terminals) == 1:
errorlog.warning("There is 1 unused token")
if len(unused_terminals) > 1:
errorlog.warning("There are %d unused tokens", len(unused_terminals))
if len(unused_rules) == 1:
errorlog.warning("There is 1 unused rule")
if len(unused_rules) > 1:
errorlog.warning("There are %d unused rules", len(unused_rules))
if debug:
debuglog.info("")
debuglog.info("Terminals, with rules where they appear")
debuglog.info("")
terms = list(grammar.Terminals)
terms.sort()
for term in terms:
debuglog.info("%-20s : %s", term, " ".join([str(s) for s in grammar.Terminals[term]]))
debuglog.info("")
debuglog.info("Nonterminals, with rules where they appear")
debuglog.info("")
nonterms = list(grammar.Nonterminals)
nonterms.sort()
for nonterm in nonterms:
debuglog.info("%-20s : %s", nonterm, " ".join([str(s) for s in grammar.Nonterminals[nonterm]]))
debuglog.info("")
if check_recursion:
unreachable = grammar.find_unreachable()
for u in unreachable:
errorlog.warning("Symbol '%s' is unreachable",u)
infinite = grammar.infinite_cycles()
for inf in infinite:
errorlog.error("Infinite recursion detected for symbol '%s'", inf)
errors = 1
unused_prec = grammar.unused_precedence()
for term, assoc in unused_prec:
errorlog.error("Precedence rule '%s' defined for unknown symbol '%s'", assoc, term)
errors = 1
if errors:
raise YaccError("Unable to build parser")
# Run the LRGeneratedTable on the grammar
if debug:
errorlog.debug("Generating %s tables", method)
lr = LRGeneratedTable(grammar,method,debuglog)
if debug:
num_sr = len(lr.sr_conflicts)
# Report shift/reduce and reduce/reduce conflicts
if num_sr == 1:
errorlog.warning("1 shift/reduce conflict")
elif num_sr > 1:
errorlog.warning("%d shift/reduce conflicts", num_sr)
num_rr = len(lr.rr_conflicts)
if num_rr == 1:
errorlog.warning("1 reduce/reduce conflict")
elif num_rr > 1:
errorlog.warning("%d reduce/reduce conflicts", num_rr)
# Write out conflicts to the output file
if debug and (lr.sr_conflicts or lr.rr_conflicts):
debuglog.warning("")
debuglog.warning("Conflicts:")
debuglog.warning("")
for state, tok, resolution in lr.sr_conflicts:
debuglog.warning("shift/reduce conflict for %s in state %d resolved as %s", tok, state, resolution)
already_reported = {}
for state, rule, rejected in lr.rr_conflicts:
if (state,id(rule),id(rejected)) in already_reported:
continue
debuglog.warning("reduce/reduce conflict in state %d resolved using rule (%s)", state, rule)
debuglog.warning("rejected rule (%s) in state %d", rejected,state)
errorlog.warning("reduce/reduce conflict in state %d resolved using rule (%s)", state, rule)
errorlog.warning("rejected rule (%s) in state %d", rejected, state)
already_reported[state,id(rule),id(rejected)] = 1
warned_never = []
for state, rule, rejected in lr.rr_conflicts:
if not rejected.reduced and (rejected not in warned_never):
debuglog.warning("Rule (%s) is never reduced", rejected)
errorlog.warning("Rule (%s) is never reduced", rejected)
warned_never.append(rejected)
# Write the table file if requested
if write_tables:
lr.write_table(tabmodule,outputdir,signature)
# Write a pickled version of the tables
if picklefile:
lr.pickle_table(picklefile,signature)
# Build the parser
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr,pinfo.error_func)
parse = parser.parse
return parser
| bsd-3-clause | 5,688,214,284,204,307,000 | 38.222222 | 176 | 0.464472 | false |
09zwcbupt/undergrad_thesis | ext/poxdesk/qx/tool/pylib/graph/algorithms/heuristics/Euclidean.py | 4 | 3477 | # Copyright (c) 2008-2009 Pedro Matiello <[email protected]>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
A* heuristic for euclidean graphs.
"""
# Imports
import warnings
class euclidean(object):
"""
A* heuristic for Euclidean graphs.
This heuristic has three requirements:
1. All nodes should have the attribute 'position';
2. The weight of all edges should be the euclidean distance between the nodes it links;
3. The C{optimize()} method should be called before the heuristic search.
A small example for clarification:
>>> g = graph.graph()
>>> g.add_nodes(['A','B','C'])
>>> g.add_node_attribute('A', ('position',(0,0)))
>>> g.add_node_attribute('B', ('position',(1,1)))
>>> g.add_node_attribute('C', ('position',(0,2)))
>>> g.add_edge('A','B', wt=2)
>>> g.add_edge('B','C', wt=2)
>>> g.add_edge('A','C', wt=4)
>>> h = graph.heuristics.euclidean()
>>> h.optimize(g)
>>> g.heuristic_search('A', 'C', h)
"""
def __init__(self):
"""
Initialize the heuristic object.
"""
self.distances = {}
def optimize(self, graph):
"""
Build a dictionary mapping each pair of nodes to a number (the distance between them).
@type graph: graph
@param graph: Graph.
"""
for start in graph.nodes():
for end in graph.nodes():
for each in graph.get_node_attributes(start):
if (each[0] == 'position'):
start_attr = each[1]
break
for each in graph.get_node_attributes(end):
if (each[0] == 'position'):
end_attr = each[1]
break
dist = 0
for i in xrange(len(start_attr)):
dist = dist + (float(start_attr[i]) - float(end_attr[i]))**2
self.distances[(start,end)] = dist
def __call__(self, start, end):
"""
Estimate how far start is from end.
@type start: node
@param start: Start node.
@type end: node
@param end: End node.
"""
assert len(self.distances.keys()) > 0, "You need to optimize this heuristic for your graph before it can be used to estimate."
return self.distances[(start,end)] | gpl-3.0 | -7,340,203,254,417,700,000 | 34.489796 | 134 | 0.59304 | false |
MichaelTong/cassandra-rapid | doc/source/_util/cql.py | 64 | 6877 | # -*- coding: utf-8 -*-
"""
CQL pygments lexer
~~~~~~~~~~~~~~~~~~
Lexer for the Cassandra Query Language (CQL).
This is heavily inspired from the pygments SQL lexer (and the Postgres one in particular) but adapted to CQL
keywords and specificities.
TODO: This has been hacked quickly, but once it's more tested, we could submit it upstream.
In particular, we have alot of keywords whose meaning depends on the context and we could potentially improve
their handling. For instance, SET is a keyword, but also a type name (that's why currently we also consider
map and list as keywords, not types; we could disambiguate by looking if there is a '<' afterwards). Or things
like USERS, which can is used in some documentation example as a table name but is a keyword too (we could
only consider it a keyword if after LIST for instance). Similarly, type nanes are not reserved, so they and
are sometime used as column identifiers (also, timestamp is both a type and a keyword). I "think" we can
somewhat disambiguate through "states", but unclear how far it's worth going.
We could also add the predefined functions?
"""
import re
from pygments.lexer import Lexer, RegexLexer, do_insertions, bygroups, words
from pygments.token import Punctuation, Whitespace, Error, \
Text, Comment, Operator, Keyword, Name, String, Number, Generic, Literal
from pygments.lexers import get_lexer_by_name, ClassNotFound
from pygments.util import iteritems
__all__ = [ 'CQLLexer' ]
language_re = re.compile(r"\s+LANGUAGE\s+'?(\w+)'?", re.IGNORECASE)
KEYWORDS = (
'SELECT',
'FROM',
'AS',
'WHERE',
'AND',
'KEY',
'KEYS',
'ENTRIES',
'FULL',
'INSERT',
'UPDATE',
'WITH',
'LIMIT',
'PER',
'PARTITION',
'USING',
'USE',
'DISTINCT',
'COUNT',
'SET',
'BEGIN',
'UNLOGGED',
'BATCH',
'APPLY',
'TRUNCATE',
'DELETE',
'IN',
'CREATE',
'KEYSPACE',
'SCHEMA',
'KEYSPACES',
'COLUMNFAMILY',
'TABLE',
'MATERIALIZED',
'VIEW',
'INDEX',
'CUSTOM',
'ON',
'TO',
'DROP',
'PRIMARY',
'INTO',
'VALUES',
'TIMESTAMP',
'TTL',
'CAST',
'ALTER',
'RENAME',
'ADD',
'TYPE',
'COMPACT',
'STORAGE',
'ORDER',
'BY',
'ASC',
'DESC',
'ALLOW',
'FILTERING',
'IF',
'IS',
'CONTAINS',
'GRANT',
'ALL',
'PERMISSION',
'PERMISSIONS',
'OF',
'REVOKE',
'MODIFY',
'AUTHORIZE',
'DESCRIBE',
'EXECUTE',
'NORECURSIVE',
'MBEAN',
'MBEANS',
'USER',
'USERS',
'ROLE',
'ROLES',
'SUPERUSER',
'NOSUPERUSER',
'PASSWORD',
'LOGIN',
'NOLOGIN',
'OPTIONS',
'CLUSTERING',
'TOKEN',
'WRITETIME',
'NULL',
'NOT',
'EXISTS',
'MAP',
'LIST',
'NAN',
'INFINITY',
'TUPLE',
'TRIGGER',
'STATIC',
'FROZEN',
'FUNCTION',
'FUNCTIONS',
'AGGREGATE',
'SFUNC',
'STYPE',
'FINALFUNC',
'INITCOND',
'RETURNS',
'CALLED',
'INPUT',
'LANGUAGE',
'OR',
'REPLACE',
'JSON',
'LIKE',
)
DATATYPES = (
'ASCII',
'BIGINT',
'BLOB',
'BOOLEAN',
'COUNTER',
'DATE',
'DECIMAL',
'DOUBLE',
'EMPTY',
'FLOAT',
'INET',
'INT',
'SMALLINT',
'TEXT',
'TIME',
'TIMESTAMP',
'TIMEUUID',
'TINYINT',
'UUID',
'VARCHAR',
'VARINT',
)
def language_callback(lexer, match):
"""Parse the content of a $-string using a lexer
The lexer is chosen looking for a nearby LANGUAGE or assumed as
java if no LANGUAGE has been found.
"""
l = None
m = language_re.match(lexer.text[max(0, match.start()-100):match.start()])
if m is not None:
l = lexer._get_lexer(m.group(1))
else:
l = lexer._get_lexer('java')
# 1 = $, 2 = delimiter, 3 = $
yield (match.start(1), String, match.group(1))
yield (match.start(2), String.Delimiter, match.group(2))
yield (match.start(3), String, match.group(3))
# 4 = string contents
if l:
for x in l.get_tokens_unprocessed(match.group(4)):
yield x
else:
yield (match.start(4), String, match.group(4))
# 5 = $, 6 = delimiter, 7 = $
yield (match.start(5), String, match.group(5))
yield (match.start(6), String.Delimiter, match.group(6))
yield (match.start(7), String, match.group(7))
class CQLLexer(RegexLexer):
"""
Lexer for the Cassandra Query Language.
"""
name = 'Cassandra Query Language'
aliases = ['cql']
filenames = ['*.cql']
mimetypes = ['text/x-cql']
flags = re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'--.*\n?', Comment.Single),
(r'//.*\n?', Comment.Single),
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'(' + '|'.join(s.replace(" ", "\s+")
for s in DATATYPES)
+ r')\b', Name.Builtin),
(words(KEYWORDS, suffix=r'\b'), Keyword),
(r'[+*/<>=~!@#%^&|`?-]+', Operator),
(r'\$\d+', Name.Variable),
# Using Number instead of the more accurate Literal because the latter don't seem to e highlighted in most
# styles
(r'[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}', Number), # UUIDs
(r'0x[0-9a-fA-F]+', Number), # Blobs
(r'([0-9]*\.[0-9]*|[0-9]+)(e[+-]?[0-9]+)?', Number.Float),
(r'[0-9]+', Number.Integer),
(r"((?:E|U&)?)(')", bygroups(String.Affix, String.Single), 'string'),
# quoted identifier
(r'((?:U&)?)(")', bygroups(String.Affix, String.Name), 'quoted-ident'),
(r'(?s)(\$)([^$]*)(\$)(.*?)(\$)(\2)(\$)', language_callback),
(r'[a-z_]\w*', Name),
(r'[;:()\[\]{},.]', Punctuation),
],
'multiline-comments': [
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[^/*]+', Comment.Multiline),
(r'[/*]', Comment.Multiline)
],
'string': [
(r"[^']+", String.Single),
(r"''", String.Single),
(r"'", String.Single, '#pop'),
],
'quoted-ident': [
(r'[^"]+', String.Name),
(r'""', String.Name),
(r'"', String.Name, '#pop'),
],
}
def get_tokens_unprocessed(self, text, *args):
# Have a copy of the entire text to be used by `language_callback`.
self.text = text
for x in RegexLexer.get_tokens_unprocessed(self, text, *args):
yield x
def _get_lexer(self, lang):
return get_lexer_by_name(lang, **self.options)
| apache-2.0 | -9,124,501,696,933,517,000 | 24.756554 | 120 | 0.522612 | false |
code4futuredotorg/reeborg_tw | src/libraries/brython_old/Lib/unittest/test/testmock/testcallable.py | 739 | 4234 | # Copyright (C) 2007-2012 Michael Foord & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# http://www.voidspace.org.uk/python/mock/
import unittest
from unittest.test.testmock.support import is_instance, X, SomeClass
from unittest.mock import (
Mock, MagicMock, NonCallableMagicMock,
NonCallableMock, patch, create_autospec,
CallableMixin
)
class TestCallable(unittest.TestCase):
def assertNotCallable(self, mock):
self.assertTrue(is_instance(mock, NonCallableMagicMock))
self.assertFalse(is_instance(mock, CallableMixin))
def test_non_callable(self):
for mock in NonCallableMagicMock(), NonCallableMock():
self.assertRaises(TypeError, mock)
self.assertFalse(hasattr(mock, '__call__'))
self.assertIn(mock.__class__.__name__, repr(mock))
def test_heirarchy(self):
self.assertTrue(issubclass(MagicMock, Mock))
self.assertTrue(issubclass(NonCallableMagicMock, NonCallableMock))
def test_attributes(self):
one = NonCallableMock()
self.assertTrue(issubclass(type(one.one), Mock))
two = NonCallableMagicMock()
self.assertTrue(issubclass(type(two.two), MagicMock))
def test_subclasses(self):
class MockSub(Mock):
pass
one = MockSub()
self.assertTrue(issubclass(type(one.one), MockSub))
class MagicSub(MagicMock):
pass
two = MagicSub()
self.assertTrue(issubclass(type(two.two), MagicSub))
def test_patch_spec(self):
patcher = patch('%s.X' % __name__, spec=True)
mock = patcher.start()
self.addCleanup(patcher.stop)
instance = mock()
mock.assert_called_once_with()
self.assertNotCallable(instance)
self.assertRaises(TypeError, instance)
def test_patch_spec_set(self):
patcher = patch('%s.X' % __name__, spec_set=True)
mock = patcher.start()
self.addCleanup(patcher.stop)
instance = mock()
mock.assert_called_once_with()
self.assertNotCallable(instance)
self.assertRaises(TypeError, instance)
def test_patch_spec_instance(self):
patcher = patch('%s.X' % __name__, spec=X())
mock = patcher.start()
self.addCleanup(patcher.stop)
self.assertNotCallable(mock)
self.assertRaises(TypeError, mock)
def test_patch_spec_set_instance(self):
patcher = patch('%s.X' % __name__, spec_set=X())
mock = patcher.start()
self.addCleanup(patcher.stop)
self.assertNotCallable(mock)
self.assertRaises(TypeError, mock)
def test_patch_spec_callable_class(self):
class CallableX(X):
def __call__(self):
pass
class Sub(CallableX):
pass
class Multi(SomeClass, Sub):
pass
for arg in 'spec', 'spec_set':
for Klass in CallableX, Sub, Multi:
with patch('%s.X' % __name__, **{arg: Klass}) as mock:
instance = mock()
mock.assert_called_once_with()
self.assertTrue(is_instance(instance, MagicMock))
# inherited spec
self.assertRaises(AttributeError, getattr, instance,
'foobarbaz')
result = instance()
# instance is callable, result has no spec
instance.assert_called_once_with()
result(3, 2, 1)
result.assert_called_once_with(3, 2, 1)
result.foo(3, 2, 1)
result.foo.assert_called_once_with(3, 2, 1)
def test_create_autopsec(self):
mock = create_autospec(X)
instance = mock()
self.assertRaises(TypeError, instance)
mock = create_autospec(X())
self.assertRaises(TypeError, mock)
def test_create_autospec_instance(self):
mock = create_autospec(SomeClass, instance=True)
self.assertRaises(TypeError, mock)
mock.wibble()
mock.wibble.assert_called_once_with()
self.assertRaises(TypeError, mock.wibble, 'some', 'args')
| agpl-3.0 | 6,419,948,093,175,247,000 | 27.802721 | 74 | 0.590458 | false |
zerlgi/zcswebapp | zcswebapp-1.0/lib/scudcloud.py | 1 | 14669 | #!/usr/bin/env python3
import sys, os
from cookiejar import PersistentCookieJar
from leftpane import LeftPane
from notifier import Notifier
from resources import Resources
from systray import Systray
from wrapper import Wrapper
from os.path import expanduser
from PyQt4 import QtCore, QtGui, QtWebKit
from PyQt4.Qt import QApplication, QKeySequence
from PyQt4.QtCore import QUrl, QSettings
from PyQt4.QtWebKit import QWebSettings
# Auto-detection of Unity and Dbusmenu in gi repository
try:
from gi.repository import Unity, Dbusmenu
except ImportError:
Unity = None
Dbusmenu = None
from launcher import DummyLauncher
class zcswebapp(QtGui.QMainWindow):
plugins = True
debug = False
forceClose = False
messages = 0
def __init__(self, parent = None, settings_path = ""):
super(zcswebapp, self).__init__(parent)
self.setWindowTitle('zcswebapp')
self.settings_path = settings_path
self.notifier = Notifier(Resources.APP_NAME, Resources.get_path('zcswebapp.png'))
self.settings = QSettings(self.settings_path + '/zcswebapp.cfg', QSettings.IniFormat)
self.identifier = self.settings.value("Domain")
if Unity is not None:
self.launcher = Unity.LauncherEntry.get_for_desktop_id("zcswebapp.desktop")
else:
self.launcher = DummyLauncher(self)
self.webSettings()
self.leftPane = LeftPane(self)
webView = Wrapper(self)
webView.page().networkAccessManager().setCookieJar(self.cookiesjar)
self.stackedWidget = QtGui.QStackedWidget()
self.stackedWidget.addWidget(webView)
centralWidget = QtGui.QWidget(self)
layout = QtGui.QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
layout.addWidget(self.leftPane)
layout.addWidget(self.stackedWidget)
centralWidget.setLayout(layout)
self.setCentralWidget(centralWidget)
self.addMenu()
self.tray = Systray(self)
self.systray(zcswebapp.minimized)
self.installEventFilter(self)
if self.identifier is None:
webView.load(QtCore.QUrl(Resources.SIGNIN_URL))
else:
webView.load(QtCore.QUrl(self.domain()))
webView.show()
def webSettings(self):
self.cookiesjar = PersistentCookieJar(self)
self.zoom = self.readZoom()
# Required by Youtube videos (HTML5 video support only on Qt5)
QWebSettings.globalSettings().setAttribute(QWebSettings.PluginsEnabled, self.plugins)
# We don't want Java
QWebSettings.globalSettings().setAttribute(QWebSettings.JavaEnabled, False)
# We don't need History
QWebSettings.globalSettings().setAttribute(QWebSettings.PrivateBrowsingEnabled, True)
# Required for copy and paste clipboard integration
QWebSettings.globalSettings().setAttribute(QWebSettings.JavascriptCanAccessClipboard, True)
# Enabling Inspeclet only when --debug=True (requires more CPU usage)
QWebSettings.globalSettings().setAttribute(QWebSettings.DeveloperExtrasEnabled, self.debug)
def toggleFullScreen(self):
if self.isFullScreen():
self.showMaximized()
else:
self.showFullScreen()
def restore(self):
geometry = self.settings.value("geometry")
if geometry is not None:
self.restoreGeometry(geometry)
windowState = self.settings.value("windowState")
if windowState is not None:
self.restoreState(windowState)
else:
self.showMaximized()
def systray(self, show=None):
if show is None:
show = self.settings.value("Systray") == "True"
if show:
self.tray.show()
self.menus["file"]["close"].setEnabled(True)
self.settings.setValue("Systray", "True")
else:
self.tray.setVisible(False)
self.menus["file"]["close"].setEnabled(False)
self.settings.setValue("Systray", "False")
def readZoom(self):
default = 1
if self.settings.value("Zoom") is not None:
default = float(self.settings.value("Zoom"))
return default
def setZoom(self, factor=1):
if factor > 0:
for i in range(0, self.stackedWidget.count()):
widget = self.stackedWidget.widget(i)
widget.setZoomFactor(factor)
self.settings.setValue("Zoom", factor)
def zoomIn(self):
self.setZoom(self.current().zoomFactor() + 0.1)
def zoomOut(self):
self.setZoom(self.current().zoomFactor() - 0.1)
def zoomReset(self):
self.setZoom()
def addMenu(self):
self.menus = {
"file": {
"preferences": self.createAction("Preferences", self.current().preferences),
"systray": self.createAction("Close to Tray", self.systray, None, True),
"addTeam": self.createAction("Sign in to Another Team", self.current().addTeam),
"signout": self.createAction("Signout", self.current().logout),
"close": self.createAction("Close", self.close, QKeySequence.Close),
"exit": self.createAction("Quit", self.exit, QKeySequence.Quit)
},
"edit": {
"undo": self.current().pageAction(QtWebKit.QWebPage.Undo),
"redo": self.current().pageAction(QtWebKit.QWebPage.Redo),
"cut": self.current().pageAction(QtWebKit.QWebPage.Cut),
"copy": self.current().pageAction(QtWebKit.QWebPage.Copy),
"paste": self.current().pageAction(QtWebKit.QWebPage.Paste),
"back": self.current().pageAction(QtWebKit.QWebPage.Back),
"forward": self.current().pageAction(QtWebKit.QWebPage.Forward),
"reload": self.current().pageAction(QtWebKit.QWebPage.Reload)
},
"view": {
"zoomin": self.createAction("Zoom In", self.zoomIn, QKeySequence.ZoomIn),
"zoomout": self.createAction("Zoom Out", self.zoomOut, QKeySequence.ZoomOut),
"reset": self.createAction("Reset", self.zoomReset, QtCore.Qt.CTRL + QtCore.Qt.Key_0),
"fullscreen": self.createAction("Toggle Full Screen", self.toggleFullScreen, QtCore.Qt.Key_F11)
},
"help": {
"help": self.createAction("Help and Feedback", self.current().help, QKeySequence.HelpContents),
"center": self.createAction("Slack Help Center", self.current().helpCenter),
"about": self.createAction("About", self.current().about)
}
}
menu = self.menuBar()
fileMenu = menu.addMenu("&File")
fileMenu.addAction(self.menus["file"]["preferences"])
fileMenu.addAction(self.menus["file"]["systray"])
fileMenu.addSeparator()
fileMenu.addAction(self.menus["file"]["addTeam"])
fileMenu.addAction(self.menus["file"]["signout"])
fileMenu.addSeparator()
fileMenu.addAction(self.menus["file"]["close"])
fileMenu.addAction(self.menus["file"]["exit"])
editMenu = menu.addMenu("&Edit")
editMenu.addAction(self.menus["edit"]["undo"])
editMenu.addAction(self.menus["edit"]["redo"])
editMenu.addSeparator()
editMenu.addAction(self.menus["edit"]["cut"])
editMenu.addAction(self.menus["edit"]["copy"])
editMenu.addAction(self.menus["edit"]["paste"])
editMenu.addSeparator()
editMenu.addAction(self.menus["edit"]["back"])
editMenu.addAction(self.menus["edit"]["forward"])
editMenu.addAction(self.menus["edit"]["reload"])
viewMenu = menu.addMenu("&View")
viewMenu.addAction(self.menus["view"]["zoomin"])
viewMenu.addAction(self.menus["view"]["zoomout"])
viewMenu.addAction(self.menus["view"]["reset"])
viewMenu.addSeparator()
viewMenu.addAction(self.menus["view"]["fullscreen"])
helpMenu = menu.addMenu("&Help")
helpMenu.addAction(self.menus["help"]["help"])
helpMenu.addAction(self.menus["help"]["center"])
helpMenu.addSeparator()
helpMenu.addAction(self.menus["help"]["about"])
self.enableMenus(False)
showSystray = self.settings.value("Systray") == "True"
self.menus["file"]["systray"].setChecked(showSystray)
self.menus["file"]["close"].setEnabled(showSystray)
def enableMenus(self, enabled):
self.menus["file"]["preferences"].setEnabled(enabled == True)
self.menus["file"]["addTeam"].setEnabled(enabled == True)
self.menus["file"]["signout"].setEnabled(enabled == True)
self.menus["help"]["help"].setEnabled(enabled == True)
def createAction(self, text, slot, shortcut=None, checkable=False):
action = QtGui.QAction(text, self)
if shortcut is not None:
action.setShortcut(shortcut)
action.triggered.connect(slot)
if checkable:
action.setCheckable(True)
return action
def domain(self):
if self.identifier.endswith(".slack.com"):
return self.identifier
else:
return "https://"+self.identifier+".slack.com"
def current(self):
return self.stackedWidget.currentWidget()
def teams(self, teams):
if teams is not None and len(teams) > 1:
self.leftPane.show()
for t in teams:
try:
self.leftPane.addTeam(t['id'], t['team_name'], t['team_url'], t['team_icon']['image_88'], t == teams[0])
except:
self.leftPane.addTeam(t['id'], t['team_name'], t['team_url'], '', t == teams[0])
def switchTo(self, url):
qUrl = QtCore.QUrl(url)
index = -1
for i in range(0, self.stackedWidget.count()):
if self.stackedWidget.widget(i).url().toString().startswith(url):
index = i
break
if index != -1:
self.stackedWidget.setCurrentIndex(index)
else:
webView = Wrapper(self)
webView.page().networkAccessManager().setCookieJar(self.cookiesjar)
webView.load(qUrl)
webView.show()
self.stackedWidget.addWidget(webView)
self.stackedWidget.setCurrentWidget(webView)
self.quicklist(self.current().listChannels())
self.enableMenus(self.current().isConnected())
# Save the last used team as default
self.settings.setValue("Domain", 'https://'+qUrl.host())
def eventFilter(self, obj, event):
if event.type() == QtCore.QEvent.ActivationChange and self.isActiveWindow():
self.focusInEvent(event)
if event.type() == QtCore.QEvent.KeyPress:
# Ctrl + <n>
if QtGui.QApplication.keyboardModifiers() == QtCore.Qt.ControlModifier:
if event.key() == QtCore.Qt.Key_1: self.leftPane.click(0)
elif event.key() == QtCore.Qt.Key_2: self.leftPane.click(1)
elif event.key() == QtCore.Qt.Key_3: self.leftPane.click(2)
elif event.key() == QtCore.Qt.Key_4: self.leftPane.click(3)
elif event.key() == QtCore.Qt.Key_5: self.leftPane.click(4)
elif event.key() == QtCore.Qt.Key_6: self.leftPane.click(5)
elif event.key() == QtCore.Qt.Key_7: self.leftPane.click(6)
elif event.key() == QtCore.Qt.Key_8: self.leftPane.click(7)
elif event.key() == QtCore.Qt.Key_9: self.leftPane.click(8)
# Ctrl + Shift + <key>
if (QtGui.QApplication.keyboardModifiers() & QtCore.Qt.ShiftModifier) and (QtGui.QApplication.keyboardModifiers() & QtCore.Qt.ShiftModifier):
if event.key() == QtCore.Qt.Key_V: self.current().createSnippet()
return QtGui.QMainWindow.eventFilter(self, obj, event);
def focusInEvent(self, event):
self.launcher.set_property("urgent", False)
self.tray.stopAlert()
def titleChanged(self):
self.setWindowTitle(self.current().title())
def closeEvent(self, event):
if not self.forceClose and self.settings.value("Systray") == "True":
self.hide()
event.ignore()
else:
self.cookiesjar.save()
self.settings.setValue("geometry", self.saveGeometry())
self.settings.setValue("windowState", self.saveState())
def show(self):
self.setWindowState(self.windowState() & ~QtCore.Qt.WindowMinimized | QtCore.Qt.WindowActive)
self.activateWindow()
self.setVisible(True)
def exit(self):
self.forceClose = True
self.close()
def quicklist(self, channels):
if Dbusmenu is not None:
ql = Dbusmenu.Menuitem.new()
self.launcher.set_property("quicklist", ql)
if channels is not None:
for c in channels:
if c['is_member']:
item = Dbusmenu.Menuitem.new ()
item.property_set (Dbusmenu.MENUITEM_PROP_LABEL, "#"+c['name'])
item.property_set ("id", c['name'])
item.property_set_bool (Dbusmenu.MENUITEM_PROP_VISIBLE, True)
item.connect(Dbusmenu.MENUITEM_SIGNAL_ITEM_ACTIVATED, self.current().openChannel)
ql.child_append(item)
self.launcher.set_property("quicklist", ql)
def notify(self, title, message):
self.notifier.notify(title, message)
self.alert()
def alert(self):
if not self.isActiveWindow():
self.launcher.set_property("urgent", True)
self.tray.alert()
def count(self):
total = 0
for i in range(0, self.stackedWidget.count()):
widget = self.stackedWidget.widget(i)
if widget.messages == 0:
self.leftPane.stopAlert(widget.team())
else:
self.leftPane.alert(widget.team())
total+=widget.messages
if total > self.messages:
self.alert()
if 0 == total:
self.launcher.set_property("count_visible", False)
self.tray.setCounter(0)
else:
self.tray.setCounter(total)
self.launcher.set_property("count", total)
self.launcher.set_property("count_visible", True)
self.messages = total
| mit | -6,082,477,828,872,237,000 | 42.271386 | 153 | 0.601404 | false |
onceuponatimeforever/oh-mainline | mysite/search/migrations/0020_remove_project_icon_field.py | 17 | 3867 | # This file is part of OpenHatch.
# Copyright (C) 2009 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from south.db import db
from django.db import models
from mysite.search.models import *
class Migration:
def forwards(self, orm):
# Deleting field 'Project.icon'
db.delete_column('search_project', 'icon')
def backwards(self, orm):
# Adding field 'Project.icon'
db.add_column('search_project', 'icon', orm['search.project:icon'])
models = {
'search.bug': {
'bize_size_tag_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'canonical_bug_link': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'date_reported': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
'good_for_newcomers': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'last_polled': ('django.db.models.fields.DateTimeField', [], {}),
'last_touched': ('django.db.models.fields.DateTimeField', [], {}),
'looks_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'people_involved': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'submitter_realname': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'submitter_username': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'search.project': {
'date_icon_was_fetched_from_ohloh': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'icon_for_profile': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_for_search_result': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_raw': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_smaller_for_badge': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
}
}
complete_apps = ['search']
| agpl-3.0 | -4,285,620,888,069,940,000 | 54.242857 | 145 | 0.592449 | false |
Jgarcia-IAS/ReporsitorioVacioOdoo | openerp/addons/base_setup/res_config.py | 261 | 5089 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import re
from openerp.report.render.rml2pdf import customfonts
class base_config_settings(osv.osv_memory):
_name = 'base.config.settings'
_inherit = 'res.config.settings'
_columns = {
'module_multi_company': fields.boolean('Manage multiple companies',
help='Work in multi-company environments, with appropriate security access between companies.\n'
'-This installs the module multi_company.'),
'module_share': fields.boolean('Allow documents sharing',
help="""Share or embbed any screen of Odoo."""),
'module_portal': fields.boolean('Activate the customer portal',
help="""Give your customers access to their documents."""),
'module_auth_oauth': fields.boolean('Use external authentication providers, sign in with google, facebook, ...'),
'module_base_import': fields.boolean("Allow users to import data from CSV files"),
'module_google_drive': fields.boolean('Attach Google documents to any record',
help="""This installs the module google_docs."""),
'module_google_calendar': fields.boolean('Allow the users to synchronize their calendar with Google Calendar',
help="""This installs the module google_calendar."""),
'font': fields.many2one('res.font', string="Report Font", domain=[('mode', 'in', ('Normal', 'Regular', 'all', 'Book'))],
help="Set the font into the report header, it will be used as default font in the RML reports of the user company"),
}
_defaults= {
'font': lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.font.id,
}
def open_company(self, cr, uid, ids, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context)
return {
'type': 'ir.actions.act_window',
'name': 'Your Company',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'res.company',
'res_id': user.company_id.id,
'target': 'current',
}
def _change_header(self, header,font):
""" Replace default fontname use in header and setfont tag """
default_para = re.sub('fontName.?=.?".*"', 'fontName="%s"'% font,header)
return re.sub('(<setFont.?name.?=.?)(".*?")(.)', '\g<1>"%s"\g<3>'% font,default_para)
def set_base_defaults(self, cr, uid, ids, context=None):
ir_model_data = self.pool.get('ir.model.data')
wizard = self.browse(cr, uid, ids, context)[0]
if wizard.font:
user = self.pool.get('res.users').browse(cr, uid, uid, context)
font_name = wizard.font.name
user.company_id.write({'font': wizard.font.id,'rml_header': self._change_header(user.company_id.rml_header,font_name), 'rml_header2': self._change_header(user.company_id.rml_header2, font_name), 'rml_header3': self._change_header(user.company_id.rml_header3, font_name)})
return {}
def act_discover_fonts(self, cr, uid, ids, context=None):
return self.pool.get("res.font").font_scan(cr, uid, context=context)
# Preferences wizard for Sales & CRM.
# It is defined here because it is inherited independently in modules sale, crm.
class sale_config_settings(osv.osv_memory):
_name = 'sale.config.settings'
_inherit = 'res.config.settings'
_columns = {
'module_web_linkedin': fields.boolean('Get contacts automatically from linkedIn',
help="""When you create a new contact (person or company), you will be able to load all the data from LinkedIn (photos, address, etc)."""),
'module_crm': fields.boolean('CRM'),
'module_sale' : fields.boolean('SALE'),
'module_mass_mailing': fields.boolean(
'Manage mass mailing campaigns',
help='Get access to statistics with your mass mailing, manage campaigns.'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | agpl-3.0 | -8,962,227,130,952,623,000 | 50.938776 | 283 | 0.614463 | false |
GaussDing/django | tests/template_tests/filter_tests/test_autoescape.py | 513 | 1342 | from django.test import SimpleTestCase
from ..utils import SafeClass, UnsafeClass, setup
class AutoescapeStringfilterTests(SimpleTestCase):
"""
Filters decorated with stringfilter still respect is_safe.
"""
@setup({'autoescape-stringfilter01': '{{ unsafe|capfirst }}'})
def test_autoescape_stringfilter01(self):
output = self.engine.render_to_string('autoescape-stringfilter01', {'unsafe': UnsafeClass()})
self.assertEqual(output, 'You & me')
@setup({'autoescape-stringfilter02': '{% autoescape off %}{{ unsafe|capfirst }}{% endautoescape %}'})
def test_autoescape_stringfilter02(self):
output = self.engine.render_to_string('autoescape-stringfilter02', {'unsafe': UnsafeClass()})
self.assertEqual(output, 'You & me')
@setup({'autoescape-stringfilter03': '{{ safe|capfirst }}'})
def test_autoescape_stringfilter03(self):
output = self.engine.render_to_string('autoescape-stringfilter03', {'safe': SafeClass()})
self.assertEqual(output, 'You > me')
@setup({'autoescape-stringfilter04': '{% autoescape off %}{{ safe|capfirst }}{% endautoescape %}'})
def test_autoescape_stringfilter04(self):
output = self.engine.render_to_string('autoescape-stringfilter04', {'safe': SafeClass()})
self.assertEqual(output, 'You > me')
| bsd-3-clause | 5,072,677,828,539,771,000 | 45.275862 | 105 | 0.683308 | false |
vipul-sharma20/oh-mainline | vendor/packages/kombu/kombu/transport/redis.py | 15 | 33278 | """
kombu.transport.redis
=====================
Redis transport.
"""
from __future__ import absolute_import
import numbers
import socket
from bisect import bisect
from collections import namedtuple
from contextlib import contextmanager
from time import time
from amqp import promise
from anyjson import loads, dumps
from kombu.exceptions import InconsistencyError, VersionMismatch
from kombu.five import Empty, values, string_t
from kombu.log import get_logger
from kombu.utils import cached_property, uuid
from kombu.utils.eventio import poll, READ, ERR
from kombu.utils.encoding import bytes_to_str
from kombu.utils.url import _parse_url
NO_ROUTE_ERROR = """
Cannot route message for exchange {0!r}: Table empty or key no longer exists.
Probably the key ({1!r}) has been removed from the Redis database.
"""
try:
from billiard.util import register_after_fork
except ImportError: # pragma: no cover
try:
from multiprocessing.util import register_after_fork # noqa
except ImportError:
def register_after_fork(*args, **kwargs): # noqa
pass
try:
import redis
except ImportError: # pragma: no cover
redis = None # noqa
from . import virtual
logger = get_logger('kombu.transport.redis')
crit, warn = logger.critical, logger.warn
DEFAULT_PORT = 6379
DEFAULT_DB = 0
PRIORITY_STEPS = [0, 3, 6, 9]
error_classes_t = namedtuple('error_classes_t', (
'connection_errors', 'channel_errors',
))
# This implementation may seem overly complex, but I assure you there is
# a good reason for doing it this way.
#
# Consuming from several connections enables us to emulate channels,
# which means we can have different service guarantees for individual
# channels.
#
# So we need to consume messages from multiple connections simultaneously,
# and using epoll means we don't have to do so using multiple threads.
#
# Also it means we can easily use PUBLISH/SUBSCRIBE to do fanout
# exchanges (broadcast), as an alternative to pushing messages to fanout-bound
# queues manually.
def get_redis_error_classes():
from redis import exceptions
# This exception suddenly changed name between redis-py versions
if hasattr(exceptions, 'InvalidData'):
DataError = exceptions.InvalidData
else:
DataError = exceptions.DataError
return error_classes_t(
(virtual.Transport.connection_errors + (
InconsistencyError,
socket.error,
IOError,
OSError,
exceptions.ConnectionError,
exceptions.AuthenticationError)),
(virtual.Transport.channel_errors + (
DataError,
exceptions.InvalidResponse,
exceptions.ResponseError)),
)
class MutexHeld(Exception):
pass
@contextmanager
def Mutex(client, name, expire):
lock_id = uuid()
i_won = client.setnx(name, lock_id)
try:
if i_won:
client.expire(name, expire)
yield
else:
if not client.ttl(name):
client.expire(name, expire)
raise MutexHeld()
finally:
if i_won:
pipe = client.pipeline(True)
try:
pipe.watch(name)
if pipe.get(name) == lock_id:
pipe.multi()
pipe.delete(name)
pipe.execute()
pipe.unwatch()
except redis.WatchError:
pass
class QoS(virtual.QoS):
restore_at_shutdown = True
def __init__(self, *args, **kwargs):
super(QoS, self).__init__(*args, **kwargs)
self._vrestore_count = 0
def append(self, message, delivery_tag):
delivery = message.delivery_info
EX, RK = delivery['exchange'], delivery['routing_key']
with self.pipe_or_acquire() as pipe:
pipe.zadd(self.unacked_index_key, delivery_tag, time()) \
.hset(self.unacked_key, delivery_tag,
dumps([message._raw, EX, RK])) \
.execute()
super(QoS, self).append(message, delivery_tag)
def restore_unacked(self):
for tag in self._delivered:
self.restore_by_tag(tag)
self._delivered.clear()
def ack(self, delivery_tag):
self._remove_from_indices(delivery_tag).execute()
super(QoS, self).ack(delivery_tag)
def reject(self, delivery_tag, requeue=False):
if requeue:
self.restore_by_tag(delivery_tag, leftmost=True)
self.ack(delivery_tag)
@contextmanager
def pipe_or_acquire(self, pipe=None):
if pipe:
yield pipe
else:
with self.channel.conn_or_acquire() as client:
yield client.pipeline()
def _remove_from_indices(self, delivery_tag, pipe=None):
with self.pipe_or_acquire(pipe) as pipe:
return pipe.zrem(self.unacked_index_key, delivery_tag) \
.hdel(self.unacked_key, delivery_tag)
def restore_visible(self, start=0, num=10, interval=10):
self._vrestore_count += 1
if (self._vrestore_count - 1) % interval:
return
with self.channel.conn_or_acquire() as client:
ceil = time() - self.visibility_timeout
try:
with Mutex(client, self.unacked_mutex_key,
self.unacked_mutex_expire):
visible = client.zrevrangebyscore(
self.unacked_index_key, ceil, 0,
start=num and start, num=num, withscores=True)
for tag, score in visible or []:
self.restore_by_tag(tag, client)
except MutexHeld:
pass
def restore_by_tag(self, tag, client=None, leftmost=False):
with self.channel.conn_or_acquire(client) as client:
p, _, _ = self._remove_from_indices(
tag, client.pipeline().hget(self.unacked_key, tag)).execute()
if p:
M, EX, RK = loads(bytes_to_str(p)) # json is unicode
self.channel._do_restore_message(M, EX, RK, client, leftmost)
@cached_property
def unacked_key(self):
return self.channel.unacked_key
@cached_property
def unacked_index_key(self):
return self.channel.unacked_index_key
@cached_property
def unacked_mutex_key(self):
return self.channel.unacked_mutex_key
@cached_property
def unacked_mutex_expire(self):
return self.channel.unacked_mutex_expire
@cached_property
def visibility_timeout(self):
return self.channel.visibility_timeout
class MultiChannelPoller(object):
eventflags = READ | ERR
#: Set by :meth:`get` while reading from the socket.
_in_protected_read = False
#: Set of one-shot callbacks to call after reading from socket.
after_read = None
def __init__(self):
# active channels
self._channels = set()
# file descriptor -> channel map.
self._fd_to_chan = {}
# channel -> socket map
self._chan_to_sock = {}
# poll implementation (epoll/kqueue/select)
self.poller = poll()
# one-shot callbacks called after reading from socket.
self.after_read = set()
def close(self):
for fd in values(self._chan_to_sock):
try:
self.poller.unregister(fd)
except (KeyError, ValueError):
pass
self._channels.clear()
self._fd_to_chan.clear()
self._chan_to_sock.clear()
def add(self, channel):
self._channels.add(channel)
def discard(self, channel):
self._channels.discard(channel)
def _on_connection_disconnect(self, connection):
sock = getattr(connection, '_sock', None)
if sock is not None:
self.poller.unregister(sock)
def _register(self, channel, client, type):
if (channel, client, type) in self._chan_to_sock:
self._unregister(channel, client, type)
if client.connection._sock is None: # not connected yet.
client.connection.connect()
sock = client.connection._sock
self._fd_to_chan[sock.fileno()] = (channel, type)
self._chan_to_sock[(channel, client, type)] = sock
self.poller.register(sock, self.eventflags)
def _unregister(self, channel, client, type):
self.poller.unregister(self._chan_to_sock[(channel, client, type)])
def _register_BRPOP(self, channel):
"""enable BRPOP mode for channel."""
ident = channel, channel.client, 'BRPOP'
if channel.client.connection._sock is None or \
ident not in self._chan_to_sock:
channel._in_poll = False
self._register(*ident)
if not channel._in_poll: # send BRPOP
channel._brpop_start()
def _register_LISTEN(self, channel):
"""enable LISTEN mode for channel."""
if channel.subclient.connection._sock is None:
channel._in_listen = False
self._register(channel, channel.subclient, 'LISTEN')
if not channel._in_listen:
channel._subscribe() # send SUBSCRIBE
def on_poll_start(self):
for channel in self._channels:
if channel.active_queues: # BRPOP mode?
if channel.qos.can_consume():
self._register_BRPOP(channel)
if channel.active_fanout_queues: # LISTEN mode?
self._register_LISTEN(channel)
def on_poll_init(self, poller):
self.poller = poller
for channel in self._channels:
return channel.qos.restore_visible(
num=channel.unacked_restore_limit,
)
def maybe_restore_messages(self):
for channel in self._channels:
if channel.active_queues:
# only need to do this once, as they are not local to channel.
return channel.qos.restore_visible(
num=channel.unacked_restore_limit,
)
def on_readable(self, fileno):
chan, type = self._fd_to_chan[fileno]
if chan.qos.can_consume():
return chan.handlers[type]()
def handle_event(self, fileno, event):
if event & READ:
return self.on_readable(fileno), self
elif event & ERR:
chan, type = self._fd_to_chan[fileno]
chan._poll_error(type)
def get(self, timeout=None):
self._in_protected_read = True
try:
for channel in self._channels:
if channel.active_queues: # BRPOP mode?
if channel.qos.can_consume():
self._register_BRPOP(channel)
if channel.active_fanout_queues: # LISTEN mode?
self._register_LISTEN(channel)
events = self.poller.poll(timeout)
for fileno, event in events or []:
ret = self.handle_event(fileno, event)
if ret:
return ret
# - no new data, so try to restore messages.
# - reset active redis commands.
self.maybe_restore_messages()
raise Empty()
finally:
self._in_protected_read = False
while self.after_read:
try:
fun = self.after_read.pop()
except KeyError:
break
else:
fun()
@property
def fds(self):
return self._fd_to_chan
class Channel(virtual.Channel):
QoS = QoS
_client = None
_subclient = None
supports_fanout = True
keyprefix_queue = '_kombu.binding.%s'
keyprefix_fanout = '/{db}.'
sep = '\x06\x16'
_in_poll = False
_in_listen = False
_fanout_queues = {}
ack_emulation = True
unacked_key = 'unacked'
unacked_index_key = 'unacked_index'
unacked_mutex_key = 'unacked_mutex'
unacked_mutex_expire = 300 # 5 minutes
unacked_restore_limit = None
visibility_timeout = 3600 # 1 hour
priority_steps = PRIORITY_STEPS
socket_timeout = None
max_connections = 10
#: Transport option to enable disable fanout keyprefix.
#: Should be enabled by default, but that is not
#: backwards compatible. Can also be string, in which
#: case it changes the default prefix ('/{db}.') into to something
#: else. The prefix must include a leading slash and a trailing dot.
fanout_prefix = False
#: If enabled the fanout exchange will support patterns in routing
#: and binding keys (like a topic exchange but using PUB/SUB).
#: This will be enabled by default in a future version.
fanout_patterns = False
_pool = None
from_transport_options = (
virtual.Channel.from_transport_options +
('ack_emulation',
'unacked_key',
'unacked_index_key',
'unacked_mutex_key',
'unacked_mutex_expire',
'visibility_timeout',
'unacked_restore_limit',
'fanout_prefix',
'fanout_patterns',
'socket_timeout',
'max_connections',
'priority_steps') # <-- do not add comma here!
)
def __init__(self, *args, **kwargs):
super_ = super(Channel, self)
super_.__init__(*args, **kwargs)
if not self.ack_emulation: # disable visibility timeout
self.QoS = virtual.QoS
self._queue_cycle = []
self.Client = self._get_client()
self.ResponseError = self._get_response_error()
self.active_fanout_queues = set()
self.auto_delete_queues = set()
self._fanout_to_queue = {}
self.handlers = {'BRPOP': self._brpop_read, 'LISTEN': self._receive}
if self.fanout_prefix:
if isinstance(self.fanout_prefix, string_t):
self.keyprefix_fanout = self.fanout_prefix
else:
# previous versions did not set a fanout, so cannot enable
# by default.
self.keyprefix_fanout = ''
# Evaluate connection.
try:
self.client.info()
except Exception:
if self._pool:
self._pool.disconnect()
raise
self.connection.cycle.add(self) # add to channel poller.
# copy errors, in case channel closed but threads still
# are still waiting for data.
self.connection_errors = self.connection.connection_errors
register_after_fork(self, self._after_fork)
def _after_fork(self):
if self._pool is not None:
self._pool.disconnect()
def _on_connection_disconnect(self, connection):
if self.connection and self.connection.cycle:
self.connection.cycle._on_connection_disconnect(connection)
def _do_restore_message(self, payload, exchange, routing_key,
client=None, leftmost=False):
with self.conn_or_acquire(client) as client:
try:
try:
payload['headers']['redelivered'] = True
except KeyError:
pass
for queue in self._lookup(exchange, routing_key):
(client.lpush if leftmost else client.rpush)(
queue, dumps(payload),
)
except Exception:
crit('Could not restore message: %r', payload, exc_info=True)
def _restore(self, message, leftmost=False):
if not self.ack_emulation:
return super(Channel, self)._restore(message)
tag = message.delivery_tag
with self.conn_or_acquire() as client:
P, _ = client.pipeline() \
.hget(self.unacked_key, tag) \
.hdel(self.unacked_key, tag) \
.execute()
if P:
M, EX, RK = loads(bytes_to_str(P)) # json is unicode
self._do_restore_message(M, EX, RK, client, leftmost)
def _restore_at_beginning(self, message):
return self._restore(message, leftmost=True)
def basic_consume(self, queue, *args, **kwargs):
if queue in self._fanout_queues:
exchange, _ = self._fanout_queues[queue]
self.active_fanout_queues.add(queue)
self._fanout_to_queue[exchange] = queue
ret = super(Channel, self).basic_consume(queue, *args, **kwargs)
self._update_cycle()
return ret
def basic_cancel(self, consumer_tag):
# If we are busy reading messages we may experience
# a race condition where a message is consumed after
# cancelling, so we must delay this operation until reading
# is complete (Issue celery/celery#1773).
connection = self.connection
if connection:
if connection.cycle._in_protected_read:
return connection.cycle.after_read.add(
promise(self._basic_cancel, (consumer_tag, )),
)
return self._basic_cancel(consumer_tag)
def _basic_cancel(self, consumer_tag):
try:
queue = self._tag_to_queue[consumer_tag]
except KeyError:
return
try:
self.active_fanout_queues.remove(queue)
except KeyError:
pass
else:
self._unsubscribe_from(queue)
try:
exchange, _ = self._fanout_queues[queue]
self._fanout_to_queue.pop(exchange)
except KeyError:
pass
ret = super(Channel, self).basic_cancel(consumer_tag)
self._update_cycle()
return ret
def _get_publish_topic(self, exchange, routing_key):
if routing_key and self.fanout_patterns:
return ''.join([self.keyprefix_fanout, exchange, '/', routing_key])
return ''.join([self.keyprefix_fanout, exchange])
def _get_subscribe_topic(self, queue):
exchange, routing_key = self._fanout_queues[queue]
return self._get_publish_topic(exchange, routing_key)
def _subscribe(self):
keys = [self._get_subscribe_topic(queue)
for queue in self.active_fanout_queues]
if not keys:
return
c = self.subclient
if c.connection._sock is None:
c.connection.connect()
self._in_listen = True
c.psubscribe(keys)
def _unsubscribe_from(self, queue):
topic = self._get_subscribe_topic(queue)
c = self.subclient
should_disconnect = False
if c.connection._sock is None:
c.connection.connect()
should_disconnect = True
try:
c.unsubscribe([topic])
finally:
if should_disconnect and c.connection:
c.connection.disconnect()
def _handle_message(self, client, r):
if bytes_to_str(r[0]) == 'unsubscribe' and r[2] == 0:
client.subscribed = False
elif bytes_to_str(r[0]) == 'pmessage':
return {'type': r[0], 'pattern': r[1],
'channel': r[2], 'data': r[3]}
else:
return {'type': r[0], 'pattern': None,
'channel': r[1], 'data': r[2]}
def _receive(self):
c = self.subclient
response = None
try:
response = c.parse_response()
except self.connection_errors:
self._in_listen = False
raise Empty()
if response is not None:
payload = self._handle_message(c, response)
if bytes_to_str(payload['type']).endswith('message'):
channel = bytes_to_str(payload['channel'])
if payload['data']:
if channel[0] == '/':
_, _, channel = channel.partition('.')
try:
message = loads(bytes_to_str(payload['data']))
except (TypeError, ValueError):
warn('Cannot process event on channel %r: %s',
channel, repr(payload)[:4096], exc_info=1)
raise Empty()
exchange = channel.split('/', 1)[0]
return message, self._fanout_to_queue[exchange]
raise Empty()
def _brpop_start(self, timeout=1):
queues = self._consume_cycle()
if not queues:
return
keys = [self._q_for_pri(queue, pri) for pri in PRIORITY_STEPS
for queue in queues] + [timeout or 0]
self._in_poll = True
self.client.connection.send_command('BRPOP', *keys)
def _brpop_read(self, **options):
try:
try:
dest__item = self.client.parse_response(self.client.connection,
'BRPOP',
**options)
except self.connection_errors:
# if there's a ConnectionError, disconnect so the next
# iteration will reconnect automatically.
self.client.connection.disconnect()
raise Empty()
if dest__item:
dest, item = dest__item
dest = bytes_to_str(dest).rsplit(self.sep, 1)[0]
self._rotate_cycle(dest)
return loads(bytes_to_str(item)), dest
else:
raise Empty()
finally:
self._in_poll = False
def _poll_error(self, type, **options):
if type == 'LISTEN':
self.subclient.parse_response()
else:
self.client.parse_response(self.client.connection, type)
def _get(self, queue):
with self.conn_or_acquire() as client:
for pri in PRIORITY_STEPS:
item = client.rpop(self._q_for_pri(queue, pri))
if item:
return loads(bytes_to_str(item))
raise Empty()
def _size(self, queue):
with self.conn_or_acquire() as client:
cmds = client.pipeline()
for pri in PRIORITY_STEPS:
cmds = cmds.llen(self._q_for_pri(queue, pri))
sizes = cmds.execute()
return sum(size for size in sizes
if isinstance(size, numbers.Integral))
def _q_for_pri(self, queue, pri):
pri = self.priority(pri)
return '%s%s%s' % ((queue, self.sep, pri) if pri else (queue, '', ''))
def priority(self, n):
steps = self.priority_steps
return steps[bisect(steps, n) - 1]
def _put(self, queue, message, **kwargs):
"""Deliver message."""
try:
pri = max(min(int(
message['properties']['delivery_info']['priority']), 9), 0)
except (TypeError, ValueError, KeyError):
pri = 0
with self.conn_or_acquire() as client:
client.lpush(self._q_for_pri(queue, pri), dumps(message))
def _put_fanout(self, exchange, message, routing_key, **kwargs):
"""Deliver fanout message."""
with self.conn_or_acquire() as client:
client.publish(
self._get_publish_topic(exchange, routing_key),
dumps(message),
)
def _new_queue(self, queue, auto_delete=False, **kwargs):
if auto_delete:
self.auto_delete_queues.add(queue)
def _queue_bind(self, exchange, routing_key, pattern, queue):
if self.typeof(exchange).type == 'fanout':
# Mark exchange as fanout.
self._fanout_queues[queue] = (
exchange, routing_key.replace('#', '*'),
)
with self.conn_or_acquire() as client:
client.sadd(self.keyprefix_queue % (exchange, ),
self.sep.join([routing_key or '',
pattern or '',
queue or '']))
def _delete(self, queue, exchange, routing_key, pattern, *args):
self.auto_delete_queues.discard(queue)
with self.conn_or_acquire() as client:
client.srem(self.keyprefix_queue % (exchange, ),
self.sep.join([routing_key or '',
pattern or '',
queue or '']))
cmds = client.pipeline()
for pri in PRIORITY_STEPS:
cmds = cmds.delete(self._q_for_pri(queue, pri))
cmds.execute()
def _has_queue(self, queue, **kwargs):
with self.conn_or_acquire() as client:
cmds = client.pipeline()
for pri in PRIORITY_STEPS:
cmds = cmds.exists(self._q_for_pri(queue, pri))
return any(cmds.execute())
def get_table(self, exchange):
key = self.keyprefix_queue % exchange
with self.conn_or_acquire() as client:
values = client.smembers(key)
if not values:
raise InconsistencyError(NO_ROUTE_ERROR.format(exchange, key))
return [tuple(bytes_to_str(val).split(self.sep)) for val in values]
def _purge(self, queue):
with self.conn_or_acquire() as client:
cmds = client.pipeline()
for pri in PRIORITY_STEPS:
priq = self._q_for_pri(queue, pri)
cmds = cmds.llen(priq).delete(priq)
sizes = cmds.execute()
return sum(sizes[::2])
def close(self):
if self._pool:
self._pool.disconnect()
if not self.closed:
# remove from channel poller.
self.connection.cycle.discard(self)
# delete fanout bindings
for queue in self._fanout_queues:
if queue in self.auto_delete_queues:
self.queue_delete(queue)
self._close_clients()
super(Channel, self).close()
def _close_clients(self):
# Close connections
for attr in 'client', 'subclient':
try:
self.__dict__[attr].connection.disconnect()
except (KeyError, AttributeError, self.ResponseError):
pass
def _prepare_virtual_host(self, vhost):
if not isinstance(vhost, numbers.Integral):
if not vhost or vhost == '/':
vhost = DEFAULT_DB
elif vhost.startswith('/'):
vhost = vhost[1:]
try:
vhost = int(vhost)
except ValueError:
raise ValueError(
'Database is int between 0 and limit - 1, not {0}'.format(
vhost,
))
return vhost
def _connparams(self):
conninfo = self.connection.client
connparams = {'host': conninfo.hostname or '127.0.0.1',
'port': conninfo.port or DEFAULT_PORT,
'virtual_host': conninfo.virtual_host,
'password': conninfo.password,
'max_connections': self.max_connections,
'socket_timeout': self.socket_timeout}
host = connparams['host']
if '://' in host:
scheme, _, _, _, _, path, query = _parse_url(host)
if scheme == 'socket':
connparams.update({
'connection_class': redis.UnixDomainSocketConnection,
'path': '/' + path}, **query)
connparams.pop('host', None)
connparams.pop('port', None)
connparams['db'] = self._prepare_virtual_host(
connparams.pop('virtual_host', None))
channel = self
connection_cls = (
connparams.get('connection_class') or
redis.Connection
)
class Connection(connection_cls):
def disconnect(self):
channel._on_connection_disconnect(self)
super(Connection, self).disconnect()
connparams['connection_class'] = Connection
return connparams
def _create_client(self):
return self.Client(connection_pool=self.pool)
def _get_pool(self):
params = self._connparams()
self.keyprefix_fanout = self.keyprefix_fanout.format(db=params['db'])
return redis.ConnectionPool(**params)
def _get_client(self):
if redis.VERSION < (2, 4, 4):
raise VersionMismatch(
'Redis transport requires redis-py versions 2.4.4 or later. '
'You have {0.__version__}'.format(redis))
# KombuRedis maintains a connection attribute on it's instance and
# uses that when executing commands
# This was added after redis-py was changed.
class KombuRedis(redis.Redis): # pragma: no cover
def __init__(self, *args, **kwargs):
super(KombuRedis, self).__init__(*args, **kwargs)
self.connection = self.connection_pool.get_connection('_')
return KombuRedis
@contextmanager
def conn_or_acquire(self, client=None):
if client:
yield client
else:
if self._in_poll:
client = self._create_client()
try:
yield client
finally:
self.pool.release(client.connection)
else:
yield self.client
@property
def pool(self):
if self._pool is None:
self._pool = self._get_pool()
return self._pool
@cached_property
def client(self):
"""Client used to publish messages, BRPOP etc."""
return self._create_client()
@cached_property
def subclient(self):
"""Pub/Sub connection used to consume fanout queues."""
client = self._create_client()
pubsub = client.pubsub()
pool = pubsub.connection_pool
pubsub.connection = pool.get_connection('pubsub', pubsub.shard_hint)
return pubsub
def _update_cycle(self):
"""Update fair cycle between queues.
We cycle between queues fairly to make sure that
each queue is equally likely to be consumed from,
so that a very busy queue will not block others.
This works by using Redis's `BRPOP` command and
by rotating the most recently used queue to the
and of the list. See Kombu github issue #166 for
more discussion of this method.
"""
self._queue_cycle = list(self.active_queues)
def _consume_cycle(self):
"""Get a fresh list of queues from the queue cycle."""
active = len(self.active_queues)
return self._queue_cycle[0:active]
def _rotate_cycle(self, used):
"""Move most recently used queue to end of list."""
cycle = self._queue_cycle
try:
cycle.append(cycle.pop(cycle.index(used)))
except ValueError:
pass
def _get_response_error(self):
from redis import exceptions
return exceptions.ResponseError
@property
def active_queues(self):
"""Set of queues being consumed from (excluding fanout queues)."""
return set(queue for queue in self._active_queues
if queue not in self.active_fanout_queues)
class Transport(virtual.Transport):
Channel = Channel
polling_interval = None # disable sleep between unsuccessful polls.
default_port = DEFAULT_PORT
supports_ev = True
driver_type = 'redis'
driver_name = 'redis'
def __init__(self, *args, **kwargs):
if redis is None:
raise ImportError('Missing redis library (pip install redis)')
super(Transport, self).__init__(*args, **kwargs)
# Get redis-py exceptions.
self.connection_errors, self.channel_errors = self._get_errors()
# All channels share the same poller.
self.cycle = MultiChannelPoller()
def driver_version(self):
return redis.__version__
def register_with_event_loop(self, connection, loop):
cycle = self.cycle
cycle.on_poll_init(loop.poller)
cycle_poll_start = cycle.on_poll_start
add_reader = loop.add_reader
on_readable = self.on_readable
def _on_disconnect(connection):
if connection._sock:
loop.remove(connection._sock)
cycle._on_connection_disconnect = _on_disconnect
def on_poll_start():
cycle_poll_start()
[add_reader(fd, on_readable, fd) for fd in cycle.fds]
loop.on_tick.add(on_poll_start)
loop.call_repeatedly(10, cycle.maybe_restore_messages)
def on_readable(self, fileno):
"""Handle AIO event for one of our file descriptors."""
item = self.cycle.on_readable(fileno)
if item:
message, queue = item
if not queue or queue not in self._callbacks:
raise KeyError(
'Message for queue {0!r} without consumers: {1}'.format(
queue, message))
self._callbacks[queue](message)
def _get_errors(self):
"""Utility to import redis-py's exceptions at runtime."""
return get_redis_error_classes()
| agpl-3.0 | 7,035,478,193,859,772,000 | 33.77325 | 79 | 0.562714 | false |
ABaldwinHunter/django-clone | tests/forms_tests/tests/test_media.py | 76 | 23851 | # -*- coding: utf-8 -*-
from django.forms import CharField, Form, Media, MultiWidget, TextInput
from django.template import Context, Template
from django.test import SimpleTestCase, override_settings
from django.utils.encoding import force_text
@override_settings(
STATIC_URL='http://media.example.com/static/',
)
class FormsMediaTestCase(SimpleTestCase):
"""Tests for the media handling on widgets and forms"""
def test_construction(self):
# Check construction of media objects
m = Media(
css={'all': ('path/to/css1', '/path/to/css2')},
js=('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3'),
)
self.assertEqual(
str(m),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>"""
)
class Foo:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
m3 = Media(Foo)
self.assertEqual(
str(m3),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>"""
)
# A widget can exist without a media definition
class MyWidget(TextInput):
pass
w = MyWidget()
self.assertEqual(str(w.media), '')
def test_media_dsl(self):
###############################################################
# DSL Class-based media definitions
###############################################################
# A widget can define media if it needs to.
# Any absolute path will be preserved; relative paths are combined
# with the value of settings.MEDIA_URL
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
w1 = MyWidget1()
self.assertEqual(
str(w1.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>"""
)
# Media objects can be interrogated by media type
self.assertEqual(
str(w1.media['css']),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />"""
)
self.assertEqual(
str(w1.media['js']),
"""<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>"""
)
def test_combine_media(self):
# Media objects can be combined. Any given media resource will appear only
# once. Duplicated media definitions are ignored.
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2', '/path/to/css3')
}
js = ('/path/to/js1', '/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
w1 = MyWidget1()
w2 = MyWidget2()
w3 = MyWidget3()
self.assertEqual(
str(w1.media + w2.media + w3.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>"""
)
# Check that media addition hasn't affected the original objects
self.assertEqual(
str(w1.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>"""
)
# Regression check for #12879: specifying the same CSS or JS file
# multiple times in a single Media instance should result in that file
# only being included once.
class MyWidget4(TextInput):
class Media:
css = {'all': ('/path/to/css1', '/path/to/css1')}
js = ('/path/to/js1', '/path/to/js1')
w4 = MyWidget4()
self.assertEqual(str(w4.media), """<link href="/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>""")
def test_media_property(self):
###############################################################
# Property-based media definitions
###############################################################
# Widget media can be defined as a property
class MyWidget4(TextInput):
def _media(self):
return Media(css={'all': ('/some/path',)}, js=('/some/js',))
media = property(_media)
w4 = MyWidget4()
self.assertEqual(str(w4.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>""")
# Media properties can reference the media of their parents
class MyWidget5(MyWidget4):
def _media(self):
return super(MyWidget5, self).media + Media(css={'all': ('/other/path',)}, js=('/other/js',))
media = property(_media)
w5 = MyWidget5()
self.assertEqual(str(w5.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>
<script type="text/javascript" src="/other/js"></script>""")
def test_media_property_parent_references(self):
# Media properties can reference the media of their parents,
# even if the parent media was defined using a class
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget6(MyWidget1):
def _media(self):
return super(MyWidget6, self).media + Media(css={'all': ('/other/path',)}, js=('/other/js',))
media = property(_media)
w6 = MyWidget6()
self.assertEqual(
str(w6.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/other/js"></script>"""
)
def test_media_inheritance(self):
###############################################################
# Inheritance of media
###############################################################
# If a widget extends another but provides no media definition, it inherits the parent widget's media
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget7(MyWidget1):
pass
w7 = MyWidget7()
self.assertEqual(
str(w7.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>"""
)
# If a widget extends another but defines media, it extends the parent widget's media by default
class MyWidget8(MyWidget1):
class Media:
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
w8 = MyWidget8()
self.assertEqual(
str(w8.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>"""
)
def test_media_inheritance_from_property(self):
# If a widget extends another but defines media, it extends the parents widget's media,
# even if the parent defined media using a property.
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget4(TextInput):
def _media(self):
return Media(css={'all': ('/some/path',)}, js=('/some/js',))
media = property(_media)
class MyWidget9(MyWidget4):
class Media:
css = {
'all': ('/other/path',)
}
js = ('/other/js',)
w9 = MyWidget9()
self.assertEqual(
str(w9.media),
"""<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>
<script type="text/javascript" src="/other/js"></script>"""
)
# A widget can disable media inheritance by specifying 'extend=False'
class MyWidget10(MyWidget1):
class Media:
extend = False
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
w10 = MyWidget10()
self.assertEqual(str(w10.media), """<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_extends(self):
# A widget can explicitly enable full media inheritance by specifying 'extend=True'
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget11(MyWidget1):
class Media:
extend = True
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
w11 = MyWidget11()
self.assertEqual(
str(w11.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>"""
)
def test_media_inheritance_single_type(self):
# A widget can enable inheritance of one media type by specifying extend as a tuple
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget12(MyWidget1):
class Media:
extend = ('css',)
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
w12 = MyWidget12()
self.assertEqual(
str(w12.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>"""
)
def test_multi_media(self):
###############################################################
# Multi-media handling for CSS
###############################################################
# A widget can define CSS media for multiple output media types
class MultimediaWidget(TextInput):
class Media:
css = {
'screen, print': ('/file1', '/file2'),
'screen': ('/file3',),
'print': ('/file4',)
}
js = ('/path/to/js1', '/path/to/js4')
multimedia = MultimediaWidget()
self.assertEqual(
str(multimedia.media),
"""<link href="/file4" type="text/css" media="print" rel="stylesheet" />
<link href="/file3" type="text/css" media="screen" rel="stylesheet" />
<link href="/file1" type="text/css" media="screen, print" rel="stylesheet" />
<link href="/file2" type="text/css" media="screen, print" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>"""
)
def test_multi_widget(self):
###############################################################
# Multiwidget media handling
###############################################################
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2', '/path/to/css3')
}
js = ('/path/to/js1', '/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
# MultiWidgets have a default media definition that gets all the
# media from the component widgets
class MyMultiWidget(MultiWidget):
def __init__(self, attrs=None):
widgets = [MyWidget1, MyWidget2, MyWidget3]
super(MyMultiWidget, self).__init__(widgets, attrs)
mymulti = MyMultiWidget()
self.assertEqual(
str(mymulti.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>"""
)
def test_form_media(self):
###############################################################
# Media processing for forms
###############################################################
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2', '/path/to/css3')
}
js = ('/path/to/js1', '/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
# You can ask a form for the media required by its widgets.
class MyForm(Form):
field1 = CharField(max_length=20, widget=MyWidget1())
field2 = CharField(max_length=20, widget=MyWidget2())
f1 = MyForm()
self.assertEqual(
str(f1.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>"""
)
# Form media can be combined to produce a single media definition.
class AnotherForm(Form):
field3 = CharField(max_length=20, widget=MyWidget3())
f2 = AnotherForm()
self.assertEqual(
str(f1.media + f2.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>"""
)
# Forms can also define media, following the same rules as widgets.
class FormWithMedia(Form):
field1 = CharField(max_length=20, widget=MyWidget1())
field2 = CharField(max_length=20, widget=MyWidget2())
class Media:
js = ('/some/form/javascript',)
css = {
'all': ('/some/form/css',)
}
f3 = FormWithMedia()
self.assertEqual(
str(f3.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>
<script type="text/javascript" src="/some/form/javascript"></script>"""
)
# Media works in templates
self.assertEqual(
Template("{{ form.media.js }}{{ form.media.css }}").render(Context({'form': f3})),
"""<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>
<script type="text/javascript" src="/some/form/javascript"></script>"""
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />"""
)
def test_html_safe(self):
media = Media(css={'all': ['/path/to/css']}, js=['/path/to/js'])
self.assertTrue(hasattr(Media, '__html__'))
self.assertEqual(force_text(media), media.__html__())
| bsd-3-clause | 2,379,364,266,218,648,000 | 44.691571 | 120 | 0.552094 | false |
EricMuller/mywebmarks-backend | requirements/twisted/Twisted-17.1.0/src/twisted/trial/_dist/test/test_workertrial.py | 12 | 5153 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.trial._dist.workertrial}.
"""
import errno
import sys
import os
from twisted.protocols.amp import AMP
from twisted.python.compat import _PY3, NativeStringIO as StringIO
from twisted.test.proto_helpers import StringTransport
from twisted.trial.unittest import TestCase
from twisted.trial._dist.workertrial import WorkerLogObserver, main, _setupPath
from twisted.trial._dist import (
workertrial, _WORKER_AMP_STDIN, _WORKER_AMP_STDOUT, workercommands,
managercommands)
class FakeAMP(AMP):
"""
A fake amp protocol.
"""
class WorkerLogObserverTests(TestCase):
"""
Tests for L{WorkerLogObserver}.
"""
def test_emit(self):
"""
L{WorkerLogObserver} forwards data to L{managercommands.TestWrite}.
"""
calls = []
class FakeClient(object):
def callRemote(self, method, **kwargs):
calls.append((method, kwargs))
observer = WorkerLogObserver(FakeClient())
observer.emit({'message': ['Some log']})
self.assertEqual(
calls, [(managercommands.TestWrite, {'out': 'Some log'})])
class MainTests(TestCase):
"""
Tests for L{main}.
"""
def setUp(self):
self.readStream = StringIO()
self.writeStream = StringIO()
self.patch(workertrial, 'startLoggingWithObserver',
self.startLoggingWithObserver)
self.addCleanup(setattr, sys, "argv", sys.argv)
sys.argv = ["trial"]
def fdopen(self, fd, mode=None):
"""
Fake C{os.fdopen} implementation which returns C{self.readStream} for
the stdin fd and C{self.writeStream} for the stdout fd.
"""
if fd == _WORKER_AMP_STDIN:
self.assertIdentical(None, mode)
return self.readStream
elif fd == _WORKER_AMP_STDOUT:
self.assertEqual('w', mode)
return self.writeStream
else:
raise AssertionError("Unexpected fd %r" % (fd,))
def startLoggingWithObserver(self, emit, setStdout):
"""
Override C{startLoggingWithObserver} for not starting logging.
"""
self.assertFalse(setStdout)
def test_empty(self):
"""
If no data is ever written, L{main} exits without writing data out.
"""
main(self.fdopen)
self.assertEqual('', self.writeStream.getvalue())
def test_forwardCommand(self):
"""
L{main} forwards data from its input stream to a L{WorkerProtocol}
instance which writes data to the output stream.
"""
client = FakeAMP()
clientTransport = StringTransport()
client.makeConnection(clientTransport)
client.callRemote(workercommands.Run, testCase=b"doesntexist")
self.readStream = clientTransport.io
self.readStream.seek(0, 0)
main(self.fdopen)
self.assertIn(
"No module named 'doesntexist'", self.writeStream.getvalue())
if _PY3:
test_forwardCommand.skip = "Does not work on Python 3 (https://tm.tl/8944)"
def test_readInterrupted(self):
"""
If reading the input stream fails with a C{IOError} with errno
C{EINTR}, L{main} ignores it and continues reading.
"""
excInfos = []
class FakeStream(object):
count = 0
def read(oself, size):
oself.count += 1
if oself.count == 1:
raise IOError(errno.EINTR)
else:
excInfos.append(sys.exc_info())
return ''
self.readStream = FakeStream()
main(self.fdopen)
self.assertEqual('', self.writeStream.getvalue())
self.assertEqual([(None, None, None)], excInfos)
def test_otherReadError(self):
"""
L{main} only ignores C{IOError} with C{EINTR} errno: otherwise, the
error pops out.
"""
class FakeStream(object):
count = 0
def read(oself, size):
oself.count += 1
if oself.count == 1:
raise IOError("Something else")
return ''
self.readStream = FakeStream()
self.assertRaises(IOError, main, self.fdopen)
class SetupPathTests(TestCase):
"""
Tests for L{_setupPath} C{sys.path} manipulation.
"""
def setUp(self):
self.addCleanup(setattr, sys, "path", sys.path[:])
def test_overridePath(self):
"""
L{_setupPath} overrides C{sys.path} if B{TRIAL_PYTHONPATH} is specified
in the environment.
"""
environ = {"TRIAL_PYTHONPATH": os.pathsep.join(["foo", "bar"])}
_setupPath(environ)
self.assertEqual(["foo", "bar"], sys.path)
def test_noVariable(self):
"""
L{_setupPath} doesn't change C{sys.path} if B{TRIAL_PYTHONPATH} is not
present in the environment.
"""
originalPath = sys.path[:]
_setupPath({})
self.assertEqual(originalPath, sys.path)
| mit | -2,294,088,270,421,492,000 | 26.704301 | 83 | 0.590142 | false |
samtx/whatsmyrankine | venv/lib/python2.7/site-packages/flask/views.py | 782 | 5642 | # -*- coding: utf-8 -*-
"""
flask.views
~~~~~~~~~~~
This module provides class-based views inspired by the ones in Django.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from .globals import request
from ._compat import with_metaclass
http_method_funcs = frozenset(['get', 'post', 'head', 'options',
'delete', 'put', 'trace', 'patch'])
class View(object):
"""Alternative way to use view functions. A subclass has to implement
:meth:`dispatch_request` which is called with the view arguments from
the URL routing system. If :attr:`methods` is provided the methods
do not have to be passed to the :meth:`~flask.Flask.add_url_rule`
method explicitly::
class MyView(View):
methods = ['GET']
def dispatch_request(self, name):
return 'Hello %s!' % name
app.add_url_rule('/hello/<name>', view_func=MyView.as_view('myview'))
When you want to decorate a pluggable view you will have to either do that
when the view function is created (by wrapping the return value of
:meth:`as_view`) or you can use the :attr:`decorators` attribute::
class SecretView(View):
methods = ['GET']
decorators = [superuser_required]
def dispatch_request(self):
...
The decorators stored in the decorators list are applied one after another
when the view function is created. Note that you can *not* use the class
based decorators since those would decorate the view class and not the
generated view function!
"""
#: A for which methods this pluggable view can handle.
methods = None
#: The canonical way to decorate class-based views is to decorate the
#: return value of as_view(). However since this moves parts of the
#: logic from the class declaration to the place where it's hooked
#: into the routing system.
#:
#: You can place one or more decorators in this list and whenever the
#: view function is created the result is automatically decorated.
#:
#: .. versionadded:: 0.8
decorators = []
def dispatch_request(self):
"""Subclasses have to override this method to implement the
actual view function code. This method is called with all
the arguments from the URL rule.
"""
raise NotImplementedError()
@classmethod
def as_view(cls, name, *class_args, **class_kwargs):
"""Converts the class into an actual view function that can be used
with the routing system. Internally this generates a function on the
fly which will instantiate the :class:`View` on each request and call
the :meth:`dispatch_request` method on it.
The arguments passed to :meth:`as_view` are forwarded to the
constructor of the class.
"""
def view(*args, **kwargs):
self = view.view_class(*class_args, **class_kwargs)
return self.dispatch_request(*args, **kwargs)
if cls.decorators:
view.__name__ = name
view.__module__ = cls.__module__
for decorator in cls.decorators:
view = decorator(view)
# we attach the view class to the view function for two reasons:
# first of all it allows us to easily figure out what class-based
# view this thing came from, secondly it's also used for instantiating
# the view class so you can actually replace it with something else
# for testing purposes and debugging.
view.view_class = cls
view.__name__ = name
view.__doc__ = cls.__doc__
view.__module__ = cls.__module__
view.methods = cls.methods
return view
class MethodViewType(type):
def __new__(cls, name, bases, d):
rv = type.__new__(cls, name, bases, d)
if 'methods' not in d:
methods = set(rv.methods or [])
for key in d:
if key in http_method_funcs:
methods.add(key.upper())
# if we have no method at all in there we don't want to
# add a method list. (This is for instance the case for
# the baseclass or another subclass of a base method view
# that does not introduce new methods).
if methods:
rv.methods = sorted(methods)
return rv
class MethodView(with_metaclass(MethodViewType, View)):
"""Like a regular class-based view but that dispatches requests to
particular methods. For instance if you implement a method called
:meth:`get` it means you will response to ``'GET'`` requests and
the :meth:`dispatch_request` implementation will automatically
forward your request to that. Also :attr:`options` is set for you
automatically::
class CounterAPI(MethodView):
def get(self):
return session.get('counter', 0)
def post(self):
session['counter'] = session.get('counter', 0) + 1
return 'OK'
app.add_url_rule('/counter', view_func=CounterAPI.as_view('counter'))
"""
def dispatch_request(self, *args, **kwargs):
meth = getattr(self, request.method.lower(), None)
# if the request method is HEAD and we don't have a handler for it
# retry with GET
if meth is None and request.method == 'HEAD':
meth = getattr(self, 'get', None)
assert meth is not None, 'Unimplemented method %r' % request.method
return meth(*args, **kwargs)
| mit | 1,829,622,443,447,455,200 | 36.865772 | 78 | 0.61698 | false |
alxgu/ansible | lib/ansible/plugins/lookup/credstash.py | 43 | 4385 | # (c) 2015, Ensighten <[email protected]>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: credstash
version_added: "2.0"
short_description: retrieve secrets from Credstash on AWS
requirements:
- credstash (python library)
description:
- "Credstash is a small utility for managing secrets using AWS's KMS and DynamoDB: https://github.com/fugue/credstash"
options:
_terms:
description: term or list of terms to lookup in the credit store
type: list
required: True
table:
description: name of the credstash table to query
default: 'credential-store'
required: True
version:
description: Credstash version
region:
description: AWS region
profile_name:
description: AWS profile to use for authentication
env:
- name: AWS_PROFILE
aws_access_key_id:
description: AWS access key ID
env:
- name: AWS_ACCESS_KEY_ID
aws_secret_access_key:
description: AWS access key
env:
- name: AWS_SECRET_ACCESS_KEY
aws_session_token:
description: AWS session token
env:
- name: AWS_SESSION_TOKEN
"""
EXAMPLES = """
- name: first use credstash to store your secrets
shell: credstash put my-github-password secure123
- name: "Test credstash lookup plugin -- get my github password"
debug: msg="Credstash lookup! {{ lookup('credstash', 'my-github-password') }}"
- name: "Test credstash lookup plugin -- get my other password from us-west-1"
debug: msg="Credstash lookup! {{ lookup('credstash', 'my-other-password', region='us-west-1') }}"
- name: "Test credstash lookup plugin -- get the company's github password"
debug: msg="Credstash lookup! {{ lookup('credstash', 'company-github-password', table='company-passwords') }}"
- name: Example play using the 'context' feature
hosts: localhost
vars:
context:
app: my_app
environment: production
tasks:
- name: "Test credstash lookup plugin -- get the password with a context passed as a variable"
debug: msg="{{ lookup('credstash', 'some-password', context=context) }}"
- name: "Test credstash lookup plugin -- get the password with a context defined here"
debug: msg="{{ lookup('credstash', 'some-password', context=dict(app='my_app', environment='production')) }}"
"""
RETURN = """
_raw:
description:
- value(s) stored in Credstash
"""
import os
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
CREDSTASH_INSTALLED = False
try:
import credstash
CREDSTASH_INSTALLED = True
except ImportError:
CREDSTASH_INSTALLED = False
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
if not CREDSTASH_INSTALLED:
raise AnsibleError('The credstash lookup plugin requires credstash to be installed.')
ret = []
for term in terms:
try:
version = kwargs.pop('version', '')
region = kwargs.pop('region', None)
table = kwargs.pop('table', 'credential-store')
profile_name = kwargs.pop('profile_name', os.getenv('AWS_PROFILE', None))
aws_access_key_id = kwargs.pop('aws_access_key_id', os.getenv('AWS_ACCESS_KEY_ID', None))
aws_secret_access_key = kwargs.pop('aws_secret_access_key', os.getenv('AWS_SECRET_ACCESS_KEY', None))
aws_session_token = kwargs.pop('aws_session_token', os.getenv('AWS_SESSION_TOKEN', None))
kwargs_pass = {'profile_name': profile_name, 'aws_access_key_id': aws_access_key_id,
'aws_secret_access_key': aws_secret_access_key, 'aws_session_token': aws_session_token}
val = credstash.getSecret(term, version, region, table, context=kwargs, **kwargs_pass)
except credstash.ItemNotFound:
raise AnsibleError('Key {0} not found'.format(term))
except Exception as e:
raise AnsibleError('Encountered exception while fetching {0}: {1}'.format(term, e))
ret.append(val)
return ret
| gpl-3.0 | 7,706,076,775,639,221,000 | 35.848739 | 124 | 0.639453 | false |
jgcaaprom/android_external_chromium_org | third_party/closure_linter/closure_linter/errors.py | 99 | 4184 | #!/usr/bin/env python
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Error codes for JavaScript style checker."""
__author__ = ('[email protected] (Robert Walker)',
'[email protected] (Andy Perelson)')
def ByName(name):
"""Get the error code for the given error name.
Args:
name: The name of the error
Returns:
The error code
"""
return globals()[name]
# "File-fatal" errors - these errors stop further parsing of a single file
FILE_NOT_FOUND = -1
FILE_DOES_NOT_PARSE = -2
# Spacing
EXTRA_SPACE = 1
MISSING_SPACE = 2
EXTRA_LINE = 3
MISSING_LINE = 4
ILLEGAL_TAB = 5
WRONG_INDENTATION = 6
WRONG_BLANK_LINE_COUNT = 7
# Semicolons
MISSING_SEMICOLON = 10
MISSING_SEMICOLON_AFTER_FUNCTION = 11
ILLEGAL_SEMICOLON_AFTER_FUNCTION = 12
REDUNDANT_SEMICOLON = 13
# Miscellaneous
ILLEGAL_PROTOTYPE_MEMBER_VALUE = 100
LINE_TOO_LONG = 110
LINE_STARTS_WITH_OPERATOR = 120
COMMA_AT_END_OF_LITERAL = 121
MULTI_LINE_STRING = 130
UNNECESSARY_DOUBLE_QUOTED_STRING = 131
UNUSED_PRIVATE_MEMBER = 132
UNUSED_LOCAL_VARIABLE = 133
# Requires, provides
GOOG_REQUIRES_NOT_ALPHABETIZED = 140
GOOG_PROVIDES_NOT_ALPHABETIZED = 141
MISSING_GOOG_REQUIRE = 142
MISSING_GOOG_PROVIDE = 143
EXTRA_GOOG_REQUIRE = 144
EXTRA_GOOG_PROVIDE = 145
# JsDoc
INVALID_JSDOC_TAG = 200
INVALID_USE_OF_DESC_TAG = 201
NO_BUG_NUMBER_AFTER_BUG_TAG = 202
MISSING_PARAMETER_DOCUMENTATION = 210
EXTRA_PARAMETER_DOCUMENTATION = 211
WRONG_PARAMETER_DOCUMENTATION = 212
MISSING_JSDOC_TAG_TYPE = 213
MISSING_JSDOC_TAG_DESCRIPTION = 214
MISSING_JSDOC_PARAM_NAME = 215
OUT_OF_ORDER_JSDOC_TAG_TYPE = 216
MISSING_RETURN_DOCUMENTATION = 217
UNNECESSARY_RETURN_DOCUMENTATION = 218
MISSING_BRACES_AROUND_TYPE = 219
MISSING_MEMBER_DOCUMENTATION = 220
MISSING_PRIVATE = 221
EXTRA_PRIVATE = 222
INVALID_OVERRIDE_PRIVATE = 223
INVALID_INHERIT_DOC_PRIVATE = 224
MISSING_JSDOC_TAG_THIS = 225
UNNECESSARY_BRACES_AROUND_INHERIT_DOC = 226
INVALID_AUTHOR_TAG_DESCRIPTION = 227
JSDOC_PREFER_QUESTION_TO_PIPE_NULL = 230
JSDOC_ILLEGAL_QUESTION_WITH_PIPE = 231
JSDOC_MISSING_OPTIONAL_TYPE = 232
JSDOC_MISSING_OPTIONAL_PREFIX = 233
JSDOC_MISSING_VAR_ARGS_TYPE = 234
JSDOC_MISSING_VAR_ARGS_NAME = 235
# TODO(robbyw): Split this in to more specific syntax problems.
INCORRECT_SUPPRESS_SYNTAX = 250
INVALID_SUPPRESS_TYPE = 251
UNNECESSARY_SUPPRESS = 252
# File ending
FILE_MISSING_NEWLINE = 300
FILE_IN_BLOCK = 301
# Interfaces
INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS = 400
INTERFACE_METHOD_CANNOT_HAVE_CODE = 401
# Comments
MISSING_END_OF_SCOPE_COMMENT = 500
MALFORMED_END_OF_SCOPE_COMMENT = 501
# goog.scope - Namespace aliasing
# TODO(nnaze) Add additional errors here and in aliaspass.py
INVALID_USE_OF_GOOG_SCOPE = 600
EXTRA_GOOG_SCOPE_USAGE = 601
# ActionScript specific errors:
# TODO(user): move these errors to their own file and move all JavaScript
# specific errors to their own file as well.
# All ActionScript specific errors should have error number at least 1000.
FUNCTION_MISSING_RETURN_TYPE = 1132
PARAMETER_MISSING_TYPE = 1133
VAR_MISSING_TYPE = 1134
PARAMETER_MISSING_DEFAULT_VALUE = 1135
IMPORTS_NOT_ALPHABETIZED = 1140
IMPORT_CONTAINS_WILDCARD = 1141
UNUSED_IMPORT = 1142
INVALID_TRACE_SEVERITY_LEVEL = 1250
MISSING_TRACE_SEVERITY_LEVEL = 1251
MISSING_TRACE_MESSAGE = 1252
REMOVE_TRACE_BEFORE_SUBMIT = 1253
REMOVE_COMMENT_BEFORE_SUBMIT = 1254
# End of list of ActionScript specific errors.
NEW_ERRORS = frozenset([
# Errors added after 2.0.2:
WRONG_INDENTATION,
MISSING_SEMICOLON,
# Errors added after 2.3.9:
JSDOC_MISSING_VAR_ARGS_TYPE,
JSDOC_MISSING_VAR_ARGS_NAME,
# Errors added after 2.3.13:
])
| bsd-3-clause | -8,520,601,316,505,334,000 | 27.462585 | 74 | 0.756453 | false |
teltek/edx-platform | common/djangoapps/third_party_auth/migrations/0019_consolidate_slug.py | 14 | 2234 | # -*- coding: utf-8 -*-
"""
Custom migration script to add slug field to all ProviderConfig models.
"""
from __future__ import unicode_literals
from django.db import migrations, models
from django.utils.text import slugify
def fill_slug_field(apps, schema_editor):
"""
Fill in the slug field for each ProviderConfig class for backwards compatability.
"""
OAuth2ProviderConfig = apps.get_model('third_party_auth', 'OAuth2ProviderConfig')
SAMLProviderConfig = apps.get_model('third_party_auth', 'SAMLProviderConfig')
LTIProviderConfig = apps.get_model('third_party_auth', 'LTIProviderConfig')
for config in OAuth2ProviderConfig.objects.all():
config.slug = config.provider_slug
config.save()
for config in SAMLProviderConfig.objects.all():
config.slug = config.idp_slug
config.save()
for config in LTIProviderConfig.objects.all():
config.slug = slugify(config.lti_consumer_key)
config.save()
class Migration(migrations.Migration):
dependencies = [
('third_party_auth', '0018_auto_20180327_1631'),
]
operations = [
migrations.AddField(
model_name='ltiproviderconfig',
name='slug',
field=models.SlugField(default=b'default', help_text=b'A short string uniquely identifying this provider. Cannot contain spaces and should be a usable as a CSS class. Examples: "ubc", "mit-staging"', max_length=30),
),
migrations.AddField(
model_name='oauth2providerconfig',
name='slug',
field=models.SlugField(default=b'default', help_text=b'A short string uniquely identifying this provider. Cannot contain spaces and should be a usable as a CSS class. Examples: "ubc", "mit-staging"', max_length=30),
),
migrations.AddField(
model_name='samlproviderconfig',
name='slug',
field=models.SlugField(default=b'default', help_text=b'A short string uniquely identifying this provider. Cannot contain spaces and should be a usable as a CSS class. Examples: "ubc", "mit-staging"', max_length=30),
),
migrations.RunPython(fill_slug_field, reverse_code=migrations.RunPython.noop),
]
| agpl-3.0 | -2,491,701,506,592,241,700 | 39.618182 | 227 | 0.675022 | false |
Beauhurst/django | django/db/models/options.py | 6 | 34598 | import copy
import warnings
from bisect import bisect
from collections import OrderedDict, defaultdict
from itertools import chain
from django.apps import apps
from django.conf import settings
from django.core.exceptions import FieldDoesNotExist, ImproperlyConfigured
from django.db import connections
from django.db.models import Manager
from django.db.models.fields import AutoField
from django.db.models.fields.proxy import OrderWrt
from django.db.models.query_utils import PathInfo
from django.utils.datastructures import ImmutableList, OrderedSet
from django.utils.deprecation import RemovedInDjango21Warning
from django.utils.functional import cached_property
from django.utils.text import camel_case_to_spaces, format_lazy
from django.utils.translation import override
PROXY_PARENTS = object()
EMPTY_RELATION_TREE = tuple()
IMMUTABLE_WARNING = (
"The return type of '%s' should never be mutated. If you want to manipulate this list "
"for your own use, make a copy first."
)
DEFAULT_NAMES = (
'verbose_name', 'verbose_name_plural', 'db_table', 'ordering',
'unique_together', 'permissions', 'get_latest_by', 'order_with_respect_to',
'app_label', 'db_tablespace', 'abstract', 'managed', 'proxy', 'swappable',
'auto_created', 'index_together', 'apps', 'default_permissions',
'select_on_save', 'default_related_name', 'required_db_features',
'required_db_vendor', 'base_manager_name', 'default_manager_name',
'indexes',
)
def normalize_together(option_together):
"""
option_together can be either a tuple of tuples, or a single
tuple of two strings. Normalize it to a tuple of tuples, so that
calling code can uniformly expect that.
"""
try:
if not option_together:
return ()
if not isinstance(option_together, (tuple, list)):
raise TypeError
first_element = next(iter(option_together))
if not isinstance(first_element, (tuple, list)):
option_together = (option_together,)
# Normalize everything to tuples
return tuple(tuple(ot) for ot in option_together)
except TypeError:
# If the value of option_together isn't valid, return it
# verbatim; this will be picked up by the check framework later.
return option_together
def make_immutable_fields_list(name, data):
return ImmutableList(data, warning=IMMUTABLE_WARNING % name)
class Options:
FORWARD_PROPERTIES = {
'fields', 'many_to_many', 'concrete_fields', 'local_concrete_fields',
'_forward_fields_map', 'managers', 'managers_map', 'base_manager',
'default_manager',
}
REVERSE_PROPERTIES = {'related_objects', 'fields_map', '_relation_tree'}
default_apps = apps
def __init__(self, meta, app_label=None):
self._get_fields_cache = {}
self.local_fields = []
self.local_many_to_many = []
self.private_fields = []
self.local_managers = []
self.base_manager_name = None
self.default_manager_name = None
self.model_name = None
self.verbose_name = None
self.verbose_name_plural = None
self.db_table = ''
self.ordering = []
self._ordering_clash = False
self.indexes = []
self.unique_together = []
self.index_together = []
self.select_on_save = False
self.default_permissions = ('add', 'change', 'delete')
self.permissions = []
self.object_name = None
self.app_label = app_label
self.get_latest_by = None
self.order_with_respect_to = None
self.db_tablespace = settings.DEFAULT_TABLESPACE
self.required_db_features = []
self.required_db_vendor = None
self.meta = meta
self.pk = None
self.auto_field = None
self.abstract = False
self.managed = True
self.proxy = False
# For any class that is a proxy (including automatically created
# classes for deferred object loading), proxy_for_model tells us
# which class this model is proxying. Note that proxy_for_model
# can create a chain of proxy models. For non-proxy models, the
# variable is always None.
self.proxy_for_model = None
# For any non-abstract class, the concrete class is the model
# in the end of the proxy_for_model chain. In particular, for
# concrete models, the concrete_model is always the class itself.
self.concrete_model = None
self.swappable = None
self.parents = OrderedDict()
self.auto_created = False
# List of all lookups defined in ForeignKey 'limit_choices_to' options
# from *other* models. Needed for some admin checks. Internal use only.
self.related_fkey_lookups = []
# A custom app registry to use, if you're making a separate model set.
self.apps = self.default_apps
self.default_related_name = None
@property
def label(self):
return '%s.%s' % (self.app_label, self.object_name)
@property
def label_lower(self):
return '%s.%s' % (self.app_label, self.model_name)
@property
def app_config(self):
# Don't go through get_app_config to avoid triggering imports.
return self.apps.app_configs.get(self.app_label)
@property
def installed(self):
return self.app_config is not None
def contribute_to_class(self, cls, name):
from django.db import connection
from django.db.backends.utils import truncate_name
cls._meta = self
self.model = cls
# First, construct the default values for these options.
self.object_name = cls.__name__
self.model_name = self.object_name.lower()
self.verbose_name = camel_case_to_spaces(self.object_name)
# Store the original user-defined values for each option,
# for use when serializing the model definition
self.original_attrs = {}
# Next, apply any overridden values from 'class Meta'.
if self.meta:
meta_attrs = self.meta.__dict__.copy()
for name in self.meta.__dict__:
# Ignore any private attributes that Django doesn't care about.
# NOTE: We can't modify a dictionary's contents while looping
# over it, so we loop over the *original* dictionary instead.
if name.startswith('_'):
del meta_attrs[name]
for attr_name in DEFAULT_NAMES:
if attr_name in meta_attrs:
setattr(self, attr_name, meta_attrs.pop(attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
elif hasattr(self.meta, attr_name):
setattr(self, attr_name, getattr(self.meta, attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
self.unique_together = normalize_together(self.unique_together)
self.index_together = normalize_together(self.index_together)
# verbose_name_plural is a special case because it uses a 's'
# by default.
if self.verbose_name_plural is None:
self.verbose_name_plural = format_lazy('{}s', self.verbose_name)
# order_with_respect_and ordering are mutually exclusive.
self._ordering_clash = bool(self.ordering and self.order_with_respect_to)
# Any leftover attributes must be invalid.
if meta_attrs != {}:
raise TypeError("'class Meta' got invalid attribute(s): %s" % ','.join(meta_attrs.keys()))
else:
self.verbose_name_plural = format_lazy('{}s', self.verbose_name)
del self.meta
# If the db_table wasn't provided, use the app_label + model_name.
if not self.db_table:
self.db_table = "%s_%s" % (self.app_label, self.model_name)
self.db_table = truncate_name(self.db_table, connection.ops.max_name_length())
def _prepare(self, model):
if self.order_with_respect_to:
# The app registry will not be ready at this point, so we cannot
# use get_field().
query = self.order_with_respect_to
try:
self.order_with_respect_to = next(
f for f in self._get_fields(reverse=False)
if f.name == query or f.attname == query
)
except StopIteration:
raise FieldDoesNotExist("%s has no field named '%s'" % (self.object_name, query))
self.ordering = ('_order',)
if not any(isinstance(field, OrderWrt) for field in model._meta.local_fields):
model.add_to_class('_order', OrderWrt())
else:
self.order_with_respect_to = None
if self.pk is None:
if self.parents:
# Promote the first parent link in lieu of adding yet another
# field.
field = next(iter(self.parents.values()))
# Look for a local field with the same name as the
# first parent link. If a local field has already been
# created, use it instead of promoting the parent
already_created = [fld for fld in self.local_fields if fld.name == field.name]
if already_created:
field = already_created[0]
field.primary_key = True
self.setup_pk(field)
if not field.remote_field.parent_link:
raise ImproperlyConfigured(
'Add parent_link=True to %s.' % field,
)
else:
auto = AutoField(verbose_name='ID', primary_key=True, auto_created=True)
model.add_to_class('id', auto)
def add_manager(self, manager):
self.local_managers.append(manager)
self._expire_cache()
def add_field(self, field, private=False):
# Insert the given field in the order in which it was created, using
# the "creation_counter" attribute of the field.
# Move many-to-many related fields from self.fields into
# self.many_to_many.
if private:
self.private_fields.append(field)
elif field.is_relation and field.many_to_many:
self.local_many_to_many.insert(bisect(self.local_many_to_many, field), field)
else:
self.local_fields.insert(bisect(self.local_fields, field), field)
self.setup_pk(field)
# If the field being added is a relation to another known field,
# expire the cache on this field and the forward cache on the field
# being referenced, because there will be new relationships in the
# cache. Otherwise, expire the cache of references *to* this field.
# The mechanism for getting at the related model is slightly odd -
# ideally, we'd just ask for field.related_model. However, related_model
# is a cached property, and all the models haven't been loaded yet, so
# we need to make sure we don't cache a string reference.
if field.is_relation and hasattr(field.remote_field, 'model') and field.remote_field.model:
try:
field.remote_field.model._meta._expire_cache(forward=False)
except AttributeError:
pass
self._expire_cache()
else:
self._expire_cache(reverse=False)
def setup_pk(self, field):
if not self.pk and field.primary_key:
self.pk = field
field.serialize = False
def setup_proxy(self, target):
"""
Do the internal setup so that the current model is a proxy for
"target".
"""
self.pk = target._meta.pk
self.proxy_for_model = target
self.db_table = target._meta.db_table
def __repr__(self):
return '<Options for %s>' % self.object_name
def __str__(self):
return "%s.%s" % (self.app_label, self.model_name)
def can_migrate(self, connection):
"""
Return True if the model can/should be migrated on the `connection`.
`connection` can be either a real connection or a connection alias.
"""
if self.proxy or self.swapped or not self.managed:
return False
if isinstance(connection, str):
connection = connections[connection]
if self.required_db_vendor:
return self.required_db_vendor == connection.vendor
if self.required_db_features:
return all(getattr(connection.features, feat, False)
for feat in self.required_db_features)
return True
@property
def verbose_name_raw(self):
"""Return the untranslated verbose name."""
with override(None):
return str(self.verbose_name)
@property
def swapped(self):
"""
Has this model been swapped out for another? If so, return the model
name of the replacement; otherwise, return None.
For historical reasons, model name lookups using get_model() are
case insensitive, so we make sure we are case insensitive here.
"""
if self.swappable:
swapped_for = getattr(settings, self.swappable, None)
if swapped_for:
try:
swapped_label, swapped_object = swapped_for.split('.')
except ValueError:
# setting not in the format app_label.model_name
# raising ImproperlyConfigured here causes problems with
# test cleanup code - instead it is raised in get_user_model
# or as part of validation.
return swapped_for
if '%s.%s' % (swapped_label, swapped_object.lower()) != self.label_lower:
return swapped_for
return None
@cached_property
def managers(self):
managers = []
seen_managers = set()
bases = (b for b in self.model.mro() if hasattr(b, '_meta'))
for depth, base in enumerate(bases):
for manager in base._meta.local_managers:
if manager.name in seen_managers:
continue
manager = copy.copy(manager)
manager.model = self.model
seen_managers.add(manager.name)
managers.append((depth, manager.creation_counter, manager))
return make_immutable_fields_list(
"managers",
(m[2] for m in sorted(managers)),
)
@cached_property
def managers_map(self):
return {manager.name: manager for manager in self.managers}
@cached_property
def base_manager(self):
base_manager_name = self.base_manager_name
if not base_manager_name:
# Get the first parent's base_manager_name if there's one.
for parent in self.model.mro()[1:]:
if hasattr(parent, '_meta'):
if parent._base_manager.name != '_base_manager':
base_manager_name = parent._base_manager.name
break
if base_manager_name:
try:
return self.managers_map[base_manager_name]
except KeyError:
raise ValueError(
"%s has no manager named %r" % (
self.object_name,
base_manager_name,
)
)
manager = Manager()
manager.name = '_base_manager'
manager.model = self.model
manager.auto_created = True
return manager
@cached_property
def default_manager(self):
default_manager_name = self.default_manager_name
if not default_manager_name and not self.local_managers:
# Get the first parent's default_manager_name if there's one.
for parent in self.model.mro()[1:]:
if hasattr(parent, '_meta'):
default_manager_name = parent._meta.default_manager_name
break
if default_manager_name:
try:
return self.managers_map[default_manager_name]
except KeyError:
raise ValueError(
"%s has no manager named %r" % (
self.object_name,
default_manager_name,
)
)
if self.managers:
return self.managers[0]
@cached_property
def fields(self):
"""
Return a list of all forward fields on the model and its parents,
excluding ManyToManyFields.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
# For legacy reasons, the fields property should only contain forward
# fields that are not private or with a m2m cardinality. Therefore we
# pass these three filters as filters to the generator.
# The third lambda is a longwinded way of checking f.related_model - we don't
# use that property directly because related_model is a cached property,
# and all the models may not have been loaded yet; we don't want to cache
# the string reference to the related_model.
def is_not_an_m2m_field(f):
return not (f.is_relation and f.many_to_many)
def is_not_a_generic_relation(f):
return not (f.is_relation and f.one_to_many)
def is_not_a_generic_foreign_key(f):
return not (
f.is_relation and f.many_to_one and not (hasattr(f.remote_field, 'model') and f.remote_field.model)
)
return make_immutable_fields_list(
"fields",
(f for f in self._get_fields(reverse=False)
if is_not_an_m2m_field(f) and is_not_a_generic_relation(f) and is_not_a_generic_foreign_key(f))
)
@cached_property
def concrete_fields(self):
"""
Return a list of all concrete fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
return make_immutable_fields_list(
"concrete_fields", (f for f in self.fields if f.concrete)
)
@cached_property
def local_concrete_fields(self):
"""
Return a list of all concrete fields on the model.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
return make_immutable_fields_list(
"local_concrete_fields", (f for f in self.local_fields if f.concrete)
)
@cached_property
def many_to_many(self):
"""
Return a list of all many to many fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this list.
"""
return make_immutable_fields_list(
"many_to_many",
(f for f in self._get_fields(reverse=False) if f.is_relation and f.many_to_many)
)
@cached_property
def related_objects(self):
"""
Return all related objects pointing to the current model. The related
objects can come from a one-to-one, one-to-many, or many-to-many field
relation type.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
all_related_fields = self._get_fields(forward=False, reverse=True, include_hidden=True)
return make_immutable_fields_list(
"related_objects",
(obj for obj in all_related_fields if not obj.hidden or obj.field.many_to_many)
)
@cached_property
def _forward_fields_map(self):
res = {}
fields = self._get_fields(reverse=False)
for field in fields:
res[field.name] = field
# Due to the way Django's internals work, get_field() should also
# be able to fetch a field by attname. In the case of a concrete
# field with relation, includes the *_id name too
try:
res[field.attname] = field
except AttributeError:
pass
return res
@cached_property
def fields_map(self):
res = {}
fields = self._get_fields(forward=False, include_hidden=True)
for field in fields:
res[field.name] = field
# Due to the way Django's internals work, get_field() should also
# be able to fetch a field by attname. In the case of a concrete
# field with relation, includes the *_id name too
try:
res[field.attname] = field
except AttributeError:
pass
return res
def get_field(self, field_name):
"""
Return a field instance given the name of a forward or reverse field.
"""
try:
# In order to avoid premature loading of the relation tree
# (expensive) we prefer checking if the field is a forward field.
return self._forward_fields_map[field_name]
except KeyError:
# If the app registry is not ready, reverse fields are
# unavailable, therefore we throw a FieldDoesNotExist exception.
if not self.apps.models_ready:
raise FieldDoesNotExist(
"%s has no field named '%s'. The app cache isn't ready yet, "
"so if this is an auto-created related field, it won't "
"be available yet." % (self.object_name, field_name)
)
try:
# Retrieve field instance by name from cached or just-computed
# field map.
return self.fields_map[field_name]
except KeyError:
raise FieldDoesNotExist("%s has no field named '%s'" % (self.object_name, field_name))
def get_base_chain(self, model):
"""
Return a list of parent classes leading to `model` (ordered from
closest to most distant ancestor). This has to handle the case where
`model` is a grandparent or even more distant relation.
"""
if not self.parents:
return []
if model in self.parents:
return [model]
for parent in self.parents:
res = parent._meta.get_base_chain(model)
if res:
res.insert(0, parent)
return res
return []
def get_parent_list(self):
"""
Return all the ancestors of this model as a list ordered by MRO.
Useful for determining if something is an ancestor, regardless of lineage.
"""
result = OrderedSet(self.parents)
for parent in self.parents:
for ancestor in parent._meta.get_parent_list():
result.add(ancestor)
return list(result)
def get_ancestor_link(self, ancestor):
"""
Return the field on the current model which points to the given
"ancestor". This is possible an indirect link (a pointer to a parent
model, which points, eventually, to the ancestor). Used when
constructing table joins for model inheritance.
Return None if the model isn't an ancestor of this one.
"""
if ancestor in self.parents:
return self.parents[ancestor]
for parent in self.parents:
# Tries to get a link field from the immediate parent
parent_link = parent._meta.get_ancestor_link(ancestor)
if parent_link:
# In case of a proxied model, the first link
# of the chain to the ancestor is that parent
# links
return self.parents[parent] or parent_link
def get_path_to_parent(self, parent):
"""
Return a list of PathInfos containing the path from the current
model to the parent model, or an empty list if parent is not a
parent of the current model.
"""
if self.model is parent:
return []
# Skip the chain of proxy to the concrete proxied model.
proxied_model = self.concrete_model
path = []
opts = self
for int_model in self.get_base_chain(parent):
if int_model is proxied_model:
opts = int_model._meta
else:
final_field = opts.parents[int_model]
targets = (final_field.remote_field.get_related_field(),)
opts = int_model._meta
path.append(PathInfo(final_field.model._meta, opts, targets, final_field, False, True))
return path
def get_path_from_parent(self, parent):
"""
Return a list of PathInfos containing the path from the parent
model to the current model, or an empty list if parent is not a
parent of the current model.
"""
if self.model is parent:
return []
model = self.concrete_model
# Get a reversed base chain including both the current and parent
# models.
chain = model._meta.get_base_chain(parent)
chain.reverse()
chain.append(model)
# Construct a list of the PathInfos between models in chain.
path = []
for i, ancestor in enumerate(chain[:-1]):
child = chain[i + 1]
link = child._meta.get_ancestor_link(ancestor)
path.extend(link.get_reverse_path_info())
return path
def _populate_directed_relation_graph(self):
"""
This method is used by each model to find its reverse objects. As this
method is very expensive and is accessed frequently (it looks up every
field in a model, in every app), it is computed on first access and then
is set as a property on every model.
"""
related_objects_graph = defaultdict(list)
all_models = self.apps.get_models(include_auto_created=True)
for model in all_models:
opts = model._meta
# Abstract model's fields are copied to child models, hence we will
# see the fields from the child models.
if opts.abstract:
continue
fields_with_relations = (
f for f in opts._get_fields(reverse=False, include_parents=False)
if f.is_relation and f.related_model is not None
)
for f in fields_with_relations:
if not isinstance(f.remote_field.model, str):
related_objects_graph[f.remote_field.model._meta.concrete_model._meta].append(f)
for model in all_models:
# Set the relation_tree using the internal __dict__. In this way
# we avoid calling the cached property. In attribute lookup,
# __dict__ takes precedence over a data descriptor (such as
# @cached_property). This means that the _meta._relation_tree is
# only called if related_objects is not in __dict__.
related_objects = related_objects_graph[model._meta.concrete_model._meta]
model._meta.__dict__['_relation_tree'] = related_objects
# It seems it is possible that self is not in all_models, so guard
# against that with default for get().
return self.__dict__.get('_relation_tree', EMPTY_RELATION_TREE)
@cached_property
def _relation_tree(self):
return self._populate_directed_relation_graph()
def _expire_cache(self, forward=True, reverse=True):
# This method is usually called by apps.cache_clear(), when the
# registry is finalized, or when a new field is added.
if forward:
for cache_key in self.FORWARD_PROPERTIES:
if cache_key in self.__dict__:
delattr(self, cache_key)
if reverse and not self.abstract:
for cache_key in self.REVERSE_PROPERTIES:
if cache_key in self.__dict__:
delattr(self, cache_key)
self._get_fields_cache = {}
def get_fields(self, include_parents=True, include_hidden=False):
"""
Return a list of fields associated to the model. By default, include
forward and reverse fields, fields derived from inheritance, but not
hidden fields. The returned fields can be changed using the parameters:
- include_parents: include fields derived from inheritance
- include_hidden: include fields that have a related_name that
starts with a "+"
"""
if include_parents is False:
include_parents = PROXY_PARENTS
return self._get_fields(include_parents=include_parents, include_hidden=include_hidden)
def _get_fields(self, forward=True, reverse=True, include_parents=True, include_hidden=False,
seen_models=None):
"""
Internal helper function to return fields of the model.
* If forward=True, then fields defined on this model are returned.
* If reverse=True, then relations pointing to this model are returned.
* If include_hidden=True, then fields with is_hidden=True are returned.
* The include_parents argument toggles if fields from parent models
should be included. It has three values: True, False, and
PROXY_PARENTS. When set to PROXY_PARENTS, the call will return all
fields defined for the current model or any of its parents in the
parent chain to the model's concrete model.
"""
if include_parents not in (True, False, PROXY_PARENTS):
raise TypeError("Invalid argument for include_parents: %s" % (include_parents,))
# This helper function is used to allow recursion in ``get_fields()``
# implementation and to provide a fast way for Django's internals to
# access specific subsets of fields.
# We must keep track of which models we have already seen. Otherwise we
# could include the same field multiple times from different models.
topmost_call = False
if seen_models is None:
seen_models = set()
topmost_call = True
seen_models.add(self.model)
# Creates a cache key composed of all arguments
cache_key = (forward, reverse, include_parents, include_hidden, topmost_call)
try:
# In order to avoid list manipulation. Always return a shallow copy
# of the results.
return self._get_fields_cache[cache_key]
except KeyError:
pass
fields = []
# Recursively call _get_fields() on each parent, with the same
# options provided in this call.
if include_parents is not False:
for parent in self.parents:
# In diamond inheritance it is possible that we see the same
# model from two different routes. In that case, avoid adding
# fields from the same parent again.
if parent in seen_models:
continue
if (parent._meta.concrete_model != self.concrete_model and
include_parents == PROXY_PARENTS):
continue
for obj in parent._meta._get_fields(
forward=forward, reverse=reverse, include_parents=include_parents,
include_hidden=include_hidden, seen_models=seen_models):
if getattr(obj, 'parent_link', False) and obj.model != self.concrete_model:
continue
fields.append(obj)
if reverse and not self.proxy:
# Tree is computed once and cached until the app cache is expired.
# It is composed of a list of fields pointing to the current model
# from other models.
all_fields = self._relation_tree
for field in all_fields:
# If hidden fields should be included or the relation is not
# intentionally hidden, add to the fields dict.
if include_hidden or not field.remote_field.hidden:
fields.append(field.remote_field)
if forward:
fields.extend(
field for field in chain(self.local_fields, self.local_many_to_many)
)
# Private fields are recopied to each child model, and they get a
# different model as field.model in each child. Hence we have to
# add the private fields separately from the topmost call. If we
# did this recursively similar to local_fields, we would get field
# instances with field.model != self.model.
if topmost_call:
fields.extend(
f for f in self.private_fields
)
# In order to avoid list manipulation. Always
# return a shallow copy of the results
fields = make_immutable_fields_list("get_fields()", fields)
# Store result into cache for later access
self._get_fields_cache[cache_key] = fields
return fields
@property
def has_auto_field(self):
warnings.warn(
'Model._meta.has_auto_field is deprecated in favor of checking if '
'Model._meta.auto_field is not None.',
RemovedInDjango21Warning, stacklevel=2
)
return self.auto_field is not None
@has_auto_field.setter
def has_auto_field(self, value):
pass
@cached_property
def _property_names(self):
"""
Return a set of the names of the properties defined on the model.
Internal helper for model initialization.
"""
return frozenset({
attr for attr in
dir(self.model) if isinstance(getattr(self.model, attr), property)
})
| bsd-3-clause | 1,516,432,898,110,203,000 | 40.286396 | 115 | 0.596769 | false |
melonproject/oyente | oyente/batch_run.py | 3 | 1356 | import json
import glob
from tqdm import tqdm
import os
import sys
import urllib2
contract_dir = 'contract_data'
cfiles = glob.glob(contract_dir+'/contract1.json')
cjson = {}
print "Loading contracts..."
for cfile in tqdm(cfiles):
cjson.update(json.loads(open(cfile).read()))
results = {}
missed = []
print "Running analysis..."
contracts = cjson.keys()
if os.path.isfile('results.json'):
old_res = json.loads(open('results.json').read())
old_res = old_res.keys()
contracts = [c for c in contracts if c not in old_res]
cores=0
job=0
if len(sys.argv)>=3:
cores = int(sys.argv[1])
job = int(sys.argv[2])
contracts = contracts[(len(contracts)/cores)*job:(len(contracts)/cores)*(job+1)]
print "Job %d: Running on %d contracts..." % (job, len(contracts))
for c in tqdm(contracts):
with open('tmp.evm','w') as of:
of.write(cjson[c][1][2:])
os.system('python oyente.py -ll 30 -s tmp.evm -j -b')
try:
results[c] = json.loads(open('tmp.evm.json').read())
except:
missed.append(c)
with open('results.json', 'w') as of:
of.write(json.dumps(results,indent=1))
with open('missed.json', 'w') as of:
of.write(json.dumps(missed,indent=1))
# urllib2.urlopen('https://dweet.io/dweet/for/oyente-%d-%d?completed=%d&missed=%d&remaining=%d' % (job,cores,len(results),len(missed),len(contracts)-len(results)-len(missed)))
print "Completed."
| gpl-3.0 | 7,615,481,742,327,639,000 | 24.111111 | 176 | 0.678466 | false |
xukunfeng/ardupilot | mk/PX4/Tools/genmsg/src/genmsg/deps.py | 216 | 3993 | # Software License Agreement (BSD License)
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import genmsg.msg_loader
import genmsg
# pkg_name - string
# msg_file - string full path
# search_paths - dict of {'pkg':'msg_dir'}
def find_msg_dependencies_with_type(pkg_name, msg_file, search_paths):
# Read and parse the source msg file
msg_context = genmsg.msg_loader.MsgContext.create_default()
full_type_name = genmsg.gentools.compute_full_type_name(pkg_name, os.path.basename(msg_file))
spec = genmsg.msg_loader.load_msg_from_file(msg_context, msg_file, full_type_name)
try:
genmsg.msg_loader.load_depends(msg_context, spec, search_paths)
except genmsg.InvalidMsgSpec as e:
raise genmsg.MsgGenerationException("Cannot read .msg for %s: %s"%(full_type_name, str(e)))
deps = set()
for dep_type_name in msg_context.get_all_depends(full_type_name):
deps.add((dep_type_name, msg_context.get_file(dep_type_name)))
return list(deps)
def find_msg_dependencies(pkg_name, msg_file, search_paths):
deps = find_msg_dependencies_with_type(pkg_name, msg_file, search_paths)
return [d[1] for d in deps]
def find_srv_dependencies_with_type(pkg_name, msg_file, search_paths):
# Read and parse the source msg file
msg_context = genmsg.msg_loader.MsgContext.create_default()
full_type_name = genmsg.gentools.compute_full_type_name(pkg_name, os.path.basename(msg_file))
spec = genmsg.msg_loader.load_srv_from_file(msg_context, msg_file, full_type_name)
try:
genmsg.msg_loader.load_depends(msg_context, spec, search_paths)
except genmsg.InvalidMsgSpec as e:
raise genmsg.MsgGenerationException("Cannot read .msg for %s: %s"%(full_type_name, str(e)))
deps = set()
for dep_type_name in msg_context.get_all_depends(spec.request.full_name):
deps.add((dep_type_name, msg_context.get_file(dep_type_name)))
for dep_type_name in msg_context.get_all_depends(spec.response.full_name):
deps.add((dep_type_name, msg_context.get_file(dep_type_name)))
return list(deps)
def find_srv_dependencies(pkg_name, msg_file, search_paths):
deps = find_srv_dependencies_with_type(pkg_name, msg_file, search_paths)
return [d[1] for d in deps]
#paths = {'std_msgs':'/u/mkjargaard/repositories/mkjargaard/dist-sandbox/std_msgs/msg'}
#file = '/u/mkjargaard/repositories/mkjargaard/dist-sandbox/quux_msgs/msg/QuuxString.msg'
#find_msg_dependencies('quux_msgs', file, paths)
| gpl-3.0 | -5,715,704,955,319,090,000 | 41.478723 | 99 | 0.736789 | false |
jeasoft/odoo | addons/account_followup/__openerp__.py | 261 | 2938 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Payment Follow-up Management',
'version': '1.0',
'category': 'Accounting & Finance',
'description': """
Module to automate letters for unpaid invoices, with multi-level recalls.
=========================================================================
You can define your multiple levels of recall through the menu:
---------------------------------------------------------------
Configuration / Follow-up / Follow-up Levels
Once it is defined, you can automatically print recalls every day through simply clicking on the menu:
------------------------------------------------------------------------------------------------------
Payment Follow-Up / Send Email and letters
It will generate a PDF / send emails / set manual actions according to the the different levels
of recall defined. You can define different policies for different companies.
Note that if you want to check the follow-up level for a given partner/account entry, you can do from in the menu:
------------------------------------------------------------------------------------------------------------------
Reporting / Accounting / **Follow-ups Analysis
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/billing',
'depends': ['account_accountant', 'mail'],
'data': [
'security/account_followup_security.xml',
'security/ir.model.access.csv',
'report/account_followup_report.xml',
'account_followup_data.xml',
'account_followup_view.xml',
'account_followup_customers.xml',
'wizard/account_followup_print_view.xml',
'res_config_view.xml',
'views/report_followup.xml',
'account_followup_reports.xml'
],
'demo': ['account_followup_demo.xml'],
'test': [
'test/account_followup.yml',
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -3,327,289,274,687,465,500 | 41.57971 | 114 | 0.566031 | false |
xiandiancloud/ji | lms/djangoapps/bulk_email/models.py | 24 | 9630 | """
Models for bulk email
WE'RE USING MIGRATIONS!
If you make changes to this model, be sure to create an appropriate migration
file and check it in at the same time as your model changes. To do that,
1. Go to the edx-platform dir
2. ./manage.py lms schemamigration bulk_email --auto description_of_your_change
3. Add the migration file created in edx-platform/lms/djangoapps/bulk_email/migrations/
"""
import logging
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models, transaction
from html_to_text import html_to_text
from mail_utils import wrap_message
from xmodule_django.models import CourseKeyField
log = logging.getLogger(__name__)
# Bulk email to_options - the send to options that users can
# select from when they send email.
SEND_TO_MYSELF = 'myself'
SEND_TO_STAFF = 'staff'
SEND_TO_ALL = 'all'
TO_OPTIONS = [SEND_TO_MYSELF, SEND_TO_STAFF, SEND_TO_ALL]
class Email(models.Model):
"""
Abstract base class for common information for an email.
"""
sender = models.ForeignKey(User, default=1, blank=True, null=True)
slug = models.CharField(max_length=128, db_index=True)
subject = models.CharField(max_length=128, blank=True)
html_message = models.TextField(null=True, blank=True)
text_message = models.TextField(null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta: # pylint: disable=C0111
abstract = True
class CourseEmail(Email):
"""
Stores information for an email to a course.
"""
# Three options for sending that we provide from the instructor dashboard:
# * Myself: This sends an email to the staff member that is composing the email.
#
# * Staff and instructors: This sends an email to anyone in the staff group and
# anyone in the instructor group
#
# * All: This sends an email to anyone enrolled in the course, with any role
# (student, staff, or instructor)
#
TO_OPTION_CHOICES = (
(SEND_TO_MYSELF, 'Myself'),
(SEND_TO_STAFF, 'Staff and instructors'),
(SEND_TO_ALL, 'All')
)
course_id = CourseKeyField(max_length=255, db_index=True)
to_option = models.CharField(max_length=64, choices=TO_OPTION_CHOICES, default=SEND_TO_MYSELF)
def __unicode__(self):
return self.subject
@classmethod
def create(cls, course_id, sender, to_option, subject, html_message, text_message=None):
"""
Create an instance of CourseEmail.
The CourseEmail.save_now method makes sure the CourseEmail entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# automatically generate the stripped version of the text from the HTML markup:
if text_message is None:
text_message = html_to_text(html_message)
# perform some validation here:
if to_option not in TO_OPTIONS:
fmt = 'Course email being sent to unrecognized to_option: "{to_option}" for "{course}", subject "{subject}"'
msg = fmt.format(to_option=to_option, course=course_id, subject=subject)
raise ValueError(msg)
# create the task, then save it immediately:
course_email = cls(
course_id=course_id,
sender=sender,
to_option=to_option,
subject=subject,
html_message=html_message,
text_message=text_message,
)
course_email.save_now()
return course_email
@transaction.autocommit
def save_now(self):
"""
Writes CourseEmail immediately, ensuring the transaction is committed.
Autocommit annotation makes sure the database entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, this autocommit here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
self.save()
class Optout(models.Model):
"""
Stores users that have opted out of receiving emails from a course.
"""
# Allowing null=True to support data migration from email->user.
# We need to first create the 'user' column with some sort of default in order to run the data migration,
# and given the unique index, 'null' is the best default value.
user = models.ForeignKey(User, db_index=True, null=True)
course_id = CourseKeyField(max_length=255, db_index=True)
class Meta: # pylint: disable=C0111
unique_together = ('user', 'course_id')
# Defines the tag that must appear in a template, to indicate
# the location where the email message body is to be inserted.
COURSE_EMAIL_MESSAGE_BODY_TAG = '{{message_body}}'
class CourseEmailTemplate(models.Model):
"""
Stores templates for all emails to a course to use.
This is expected to be a singleton, to be shared across all courses.
Initialization takes place in a migration that in turn loads a fixture.
The admin console interface disables add and delete operations.
Validation is handled in the CourseEmailTemplateForm class.
"""
html_template = models.TextField(null=True, blank=True)
plain_template = models.TextField(null=True, blank=True)
@staticmethod
def get_template():
"""
Fetch the current template
If one isn't stored, an exception is thrown.
"""
try:
return CourseEmailTemplate.objects.get()
except CourseEmailTemplate.DoesNotExist:
log.exception("Attempting to fetch a non-existent course email template")
raise
@staticmethod
def _render(format_string, message_body, context):
"""
Create a text message using a template, message body and context.
Convert message body (`message_body`) into an email message
using the provided template. The template is a format string,
which is rendered using format() with the provided `context` dict.
This doesn't insert user's text into template, until such time we can
support proper error handling due to errors in the message body
(e.g. due to the use of curly braces).
Instead, for now, we insert the message body *after* the substitutions
have been performed, so that anything in the message body that might
interfere will be innocently returned as-is.
Output is returned as a unicode string. It is not encoded as utf-8.
Such encoding is left to the email code, which will use the value
of settings.DEFAULT_CHARSET to encode the message.
"""
# If we wanted to support substitution, we'd call:
# format_string = format_string.replace(COURSE_EMAIL_MESSAGE_BODY_TAG, message_body)
result = format_string.format(**context)
# Note that the body tag in the template will now have been
# "formatted", so we need to do the same to the tag being
# searched for.
message_body_tag = COURSE_EMAIL_MESSAGE_BODY_TAG.format()
result = result.replace(message_body_tag, message_body, 1)
# finally, return the result, after wrapping long lines and without converting to an encoded byte array.
return wrap_message(result)
def render_plaintext(self, plaintext, context):
"""
Create plain text message.
Convert plain text body (`plaintext`) into plaintext email message using the
stored plain template and the provided `context` dict.
"""
return CourseEmailTemplate._render(self.plain_template, plaintext, context)
def render_htmltext(self, htmltext, context):
"""
Create HTML text message.
Convert HTML text body (`htmltext`) into HTML email message using the
stored HTML template and the provided `context` dict.
"""
return CourseEmailTemplate._render(self.html_template, htmltext, context)
class CourseAuthorization(models.Model):
"""
Enable the course email feature on a course-by-course basis.
"""
# The course that these features are attached to.
course_id = CourseKeyField(max_length=255, db_index=True, unique=True)
# Whether or not to enable instructor email
email_enabled = models.BooleanField(default=False)
@classmethod
def instructor_email_enabled(cls, course_id):
"""
Returns whether or not email is enabled for the given course id.
If email has not been explicitly enabled, returns False.
"""
# If settings.FEATURES['REQUIRE_COURSE_EMAIL_AUTH'] is
# set to False, then we enable email for every course.
if not settings.FEATURES['REQUIRE_COURSE_EMAIL_AUTH']:
return True
try:
record = cls.objects.get(course_id=course_id)
return record.email_enabled
except cls.DoesNotExist:
return False
def __unicode__(self):
not_en = "Not "
if self.email_enabled:
not_en = ""
# pylint: disable=no-member
return u"Course '{}': Instructor Email {}Enabled".format(self.course_id.to_deprecated_string(), not_en)
| agpl-3.0 | -5,084,161,098,711,893,000 | 37.063241 | 120 | 0.672586 | false |
yipenggao/moose | python/chigger/misc/AxisSource.py | 6 | 1994 | #pylint: disable=missing-docstring
#################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
#################################################################
import vtk
from .. import base
from .. import utils
class AxisSource(base.ChiggerFilterSourceBase):
"""
Creates a Axis source for use with the ColorBar.
"""
VTKACTOR_TYPE = vtk.vtkContextActor
@staticmethod
def getOptions():
opt = base.ChiggerFilterSourceBase.getOptions()
opt += utils.AxisOptions.get_options()
return opt
def __init__(self, **kwargs):
super(AxisSource, self).__init__(vtkactor_type=vtk.vtkContextActor, vtkmapper_type=None,
**kwargs)
self._vtksource = vtk.vtkAxis()
self._vtkactor.GetScene().AddItem(self._vtksource)
def getVTKSource(self):
"""
Return the vtkAxis object.
"""
return self._vtksource
def update(self, **kwargs):
"""
Update the vtkAxis with given settings. (override)
Inputs:
see ChiggerFilterSourceBase
"""
super(AxisSource, self).update(**kwargs)
utils.AxisOptions.set_options(self._vtksource, self._options)
self._vtksource.Update()
| lgpl-2.1 | 5,896,478,575,260,118,000 | 35.925926 | 96 | 0.467402 | false |
gfyoung/pandas | pandas/tests/indexes/common.py | 2 | 28221 | import gc
from typing import Type
import numpy as np
import pytest
from pandas._libs import iNaT
from pandas.errors import InvalidIndexError
from pandas.core.dtypes.common import is_datetime64tz_dtype
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
CategoricalIndex,
DatetimeIndex,
Index,
Int64Index,
IntervalIndex,
MultiIndex,
PeriodIndex,
RangeIndex,
Series,
TimedeltaIndex,
UInt64Index,
isna,
)
import pandas._testing as tm
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
class Base:
""" base class for index sub-class tests """
_holder: Type[Index]
_compat_props = ["shape", "ndim", "size", "nbytes"]
def create_index(self) -> Index:
raise NotImplementedError("Method not implemented")
def test_pickle_compat_construction(self):
# need an object to create with
msg = (
r"Index\(\.\.\.\) must be called with a collection of some "
r"kind, None was passed|"
r"__new__\(\) missing 1 required positional argument: 'data'|"
r"__new__\(\) takes at least 2 arguments \(1 given\)"
)
with pytest.raises(TypeError, match=msg):
self._holder()
@pytest.mark.parametrize("name", [None, "new_name"])
def test_to_frame(self, name):
# see GH-15230, GH-22580
idx = self.create_index()
if name:
idx_name = name
else:
idx_name = idx.name or 0
df = idx.to_frame(name=idx_name)
assert df.index is idx
assert len(df.columns) == 1
assert df.columns[0] == idx_name
assert df[idx_name].values is not idx.values
df = idx.to_frame(index=False, name=idx_name)
assert df.index is not idx
def test_shift(self):
# GH8083 test the base class for shift
idx = self.create_index()
msg = (
f"This method is only implemented for DatetimeIndex, PeriodIndex and "
f"TimedeltaIndex; Got type {type(idx).__name__}"
)
with pytest.raises(NotImplementedError, match=msg):
idx.shift(1)
with pytest.raises(NotImplementedError, match=msg):
idx.shift(1, 2)
def test_constructor_name_unhashable(self):
# GH#29069 check that name is hashable
# See also same-named test in tests.series.test_constructors
idx = self.create_index()
with pytest.raises(TypeError, match="Index.name must be a hashable type"):
type(idx)(idx, name=[])
def test_create_index_existing_name(self):
# GH11193, when an existing index is passed, and a new name is not
# specified, the new index should inherit the previous object name
expected = self.create_index()
if not isinstance(expected, MultiIndex):
expected.name = "foo"
result = Index(expected)
tm.assert_index_equal(result, expected)
result = Index(expected, name="bar")
expected.name = "bar"
tm.assert_index_equal(result, expected)
else:
expected.names = ["foo", "bar"]
result = Index(expected)
tm.assert_index_equal(
result,
Index(
Index(
[
("foo", "one"),
("foo", "two"),
("bar", "one"),
("baz", "two"),
("qux", "one"),
("qux", "two"),
],
dtype="object",
),
names=["foo", "bar"],
),
)
result = Index(expected, names=["A", "B"])
tm.assert_index_equal(
result,
Index(
Index(
[
("foo", "one"),
("foo", "two"),
("bar", "one"),
("baz", "two"),
("qux", "one"),
("qux", "two"),
],
dtype="object",
),
names=["A", "B"],
),
)
def test_numeric_compat(self):
idx = self.create_index()
# Check that this doesn't cover MultiIndex case, if/when it does,
# we can remove multi.test_compat.test_numeric_compat
assert not isinstance(idx, MultiIndex)
if type(idx) is Index:
return
typ = type(idx._data).__name__
lmsg = "|".join(
[
rf"unsupported operand type\(s\) for \*: '{typ}' and 'int'",
"cannot perform (__mul__|__truediv__|__floordiv__) with "
f"this index type: {typ}",
]
)
with pytest.raises(TypeError, match=lmsg):
idx * 1
rmsg = "|".join(
[
rf"unsupported operand type\(s\) for \*: 'int' and '{typ}'",
"cannot perform (__rmul__|__rtruediv__|__rfloordiv__) with "
f"this index type: {typ}",
]
)
with pytest.raises(TypeError, match=rmsg):
1 * idx
div_err = lmsg.replace("*", "/")
with pytest.raises(TypeError, match=div_err):
idx / 1
div_err = rmsg.replace("*", "/")
with pytest.raises(TypeError, match=div_err):
1 / idx
floordiv_err = lmsg.replace("*", "//")
with pytest.raises(TypeError, match=floordiv_err):
idx // 1
floordiv_err = rmsg.replace("*", "//")
with pytest.raises(TypeError, match=floordiv_err):
1 // idx
def test_logical_compat(self):
idx = self.create_index()
with pytest.raises(TypeError, match="cannot perform all"):
idx.all()
with pytest.raises(TypeError, match="cannot perform any"):
idx.any()
def test_reindex_base(self):
idx = self.create_index()
expected = np.arange(idx.size, dtype=np.intp)
actual = idx.get_indexer(idx)
tm.assert_numpy_array_equal(expected, actual)
with pytest.raises(ValueError, match="Invalid fill method"):
idx.get_indexer(idx, method="invalid")
def test_get_indexer_consistency(self, index):
# See GH 16819
if isinstance(index, IntervalIndex):
# requires index.is_non_overlapping
return
if index.is_unique:
indexer = index.get_indexer(index[0:2])
assert isinstance(indexer, np.ndarray)
assert indexer.dtype == np.intp
else:
e = "Reindexing only valid with uniquely valued Index objects"
with pytest.raises(InvalidIndexError, match=e):
index.get_indexer(index[0:2])
indexer, _ = index.get_indexer_non_unique(index[0:2])
assert isinstance(indexer, np.ndarray)
assert indexer.dtype == np.intp
def test_ndarray_compat_properties(self):
idx = self.create_index()
assert idx.T.equals(idx)
assert idx.transpose().equals(idx)
values = idx.values
for prop in self._compat_props:
assert getattr(idx, prop) == getattr(values, prop)
# test for validity
idx.nbytes
idx.values.nbytes
def test_repr_roundtrip(self):
idx = self.create_index()
tm.assert_index_equal(eval(repr(idx)), idx)
def test_repr_max_seq_item_setting(self):
# GH10182
idx = self.create_index()
idx = idx.repeat(50)
with pd.option_context("display.max_seq_items", None):
repr(idx)
assert "..." not in str(idx)
def test_copy_name(self, index):
# gh-12309: Check that the "name" argument
# passed at initialization is honored.
if isinstance(index, MultiIndex):
return
first = type(index)(index, copy=True, name="mario")
second = type(first)(first, copy=False)
# Even though "copy=False", we want a new object.
assert first is not second
# Not using tm.assert_index_equal() since names differ.
assert index.equals(first)
assert first.name == "mario"
assert second.name == "mario"
s1 = Series(2, index=first)
s2 = Series(3, index=second[:-1])
if not isinstance(index, CategoricalIndex):
# See gh-13365
s3 = s1 * s2
assert s3.index.name == "mario"
def test_copy_name2(self, index):
# gh-35592
if isinstance(index, MultiIndex):
return
assert index.copy(name="mario").name == "mario"
with pytest.raises(ValueError, match="Length of new names must be 1, got 2"):
index.copy(name=["mario", "luigi"])
msg = f"{type(index).__name__}.name must be a hashable type"
with pytest.raises(TypeError, match=msg):
index.copy(name=[["mario"]])
def test_copy_dtype_deprecated(self, index):
# GH35853
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
index.copy(dtype=object)
def test_ensure_copied_data(self, index):
# Check the "copy" argument of each Index.__new__ is honoured
# GH12309
init_kwargs = {}
if isinstance(index, PeriodIndex):
# Needs "freq" specification:
init_kwargs["freq"] = index.freq
elif isinstance(index, (RangeIndex, MultiIndex, CategoricalIndex)):
# RangeIndex cannot be initialized from data
# MultiIndex and CategoricalIndex are tested separately
return
index_type = type(index)
result = index_type(index.values, copy=True, **init_kwargs)
if is_datetime64tz_dtype(index.dtype):
result = result.tz_localize("UTC").tz_convert(index.tz)
if isinstance(index, (DatetimeIndex, TimedeltaIndex)):
index = index._with_freq(None)
tm.assert_index_equal(index, result)
if isinstance(index, PeriodIndex):
# .values an object array of Period, thus copied
result = index_type(ordinal=index.asi8, copy=False, **init_kwargs)
tm.assert_numpy_array_equal(index.asi8, result.asi8, check_same="same")
elif isinstance(index, IntervalIndex):
# checked in test_interval.py
pass
else:
result = index_type(index.values, copy=False, **init_kwargs)
tm.assert_numpy_array_equal(index.values, result.values, check_same="same")
def test_memory_usage(self, index):
index._engine.clear_mapping()
result = index.memory_usage()
if index.empty:
# we report 0 for no-length
assert result == 0
return
# non-zero length
index.get_loc(index[0])
result2 = index.memory_usage()
result3 = index.memory_usage(deep=True)
# RangeIndex, IntervalIndex
# don't have engines
if not isinstance(index, (RangeIndex, IntervalIndex)):
assert result2 > result
if index.inferred_type == "object":
assert result3 > result2
def test_argsort(self, request, index):
# separately tested
if isinstance(index, CategoricalIndex):
return
result = index.argsort()
expected = np.array(index).argsort()
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
def test_numpy_argsort(self, index):
result = np.argsort(index)
expected = index.argsort()
tm.assert_numpy_array_equal(result, expected)
# these are the only two types that perform
# pandas compatibility input validation - the
# rest already perform separate (or no) such
# validation via their 'values' attribute as
# defined in pandas.core.indexes/base.py - they
# cannot be changed at the moment due to
# backwards compatibility concerns
if isinstance(type(index), (CategoricalIndex, RangeIndex)):
# TODO: why type(index)?
msg = "the 'axis' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(index, axis=1)
msg = "the 'kind' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(index, kind="mergesort")
msg = "the 'order' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(index, order=("a", "b"))
def test_repeat(self):
rep = 2
i = self.create_index()
expected = Index(i.values.repeat(rep), name=i.name)
tm.assert_index_equal(i.repeat(rep), expected)
i = self.create_index()
rep = np.arange(len(i))
expected = Index(i.values.repeat(rep), name=i.name)
tm.assert_index_equal(i.repeat(rep), expected)
def test_numpy_repeat(self):
rep = 2
i = self.create_index()
expected = i.repeat(rep)
tm.assert_index_equal(np.repeat(i, rep), expected)
msg = "the 'axis' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.repeat(i, rep, axis=0)
@pytest.mark.parametrize("klass", [list, tuple, np.array, Series])
def test_where(self, klass):
i = self.create_index()
if isinstance(i, (pd.DatetimeIndex, pd.TimedeltaIndex)):
# where does not preserve freq
i = i._with_freq(None)
cond = [True] * len(i)
result = i.where(klass(cond))
expected = i
tm.assert_index_equal(result, expected)
cond = [False] + [True] * len(i[1:])
expected = Index([i._na_value] + i[1:].tolist(), dtype=i.dtype)
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_insert_base(self, index):
result = index[1:4]
if not len(index):
return
# test 0th element
assert index[0:4].equals(result.insert(0, index[0]))
def test_delete_base(self, index):
if not len(index):
return
if isinstance(index, RangeIndex):
# tested in class
return
expected = index[1:]
result = index.delete(0)
assert result.equals(expected)
assert result.name == expected.name
expected = index[:-1]
result = index.delete(-1)
assert result.equals(expected)
assert result.name == expected.name
length = len(index)
msg = f"index {length} is out of bounds for axis 0 with size {length}"
with pytest.raises(IndexError, match=msg):
index.delete(length)
def test_equals(self, index):
if isinstance(index, IntervalIndex):
# IntervalIndex tested separately, the index.equals(index.astype(object))
# fails for IntervalIndex
return
assert index.equals(index)
assert index.equals(index.copy())
assert index.equals(index.astype(object))
assert not index.equals(list(index))
assert not index.equals(np.array(index))
# Cannot pass in non-int64 dtype to RangeIndex
if not isinstance(index, RangeIndex):
same_values = Index(index, dtype=object)
assert index.equals(same_values)
assert same_values.equals(index)
if index.nlevels == 1:
# do not test MultiIndex
assert not index.equals(Series(index))
def test_equals_op(self):
# GH9947, GH10637
index_a = self.create_index()
n = len(index_a)
index_b = index_a[0:-1]
index_c = index_a[0:-1].append(index_a[-2:-1])
index_d = index_a[0:1]
msg = "Lengths must match|could not be broadcast"
with pytest.raises(ValueError, match=msg):
index_a == index_b
expected1 = np.array([True] * n)
expected2 = np.array([True] * (n - 1) + [False])
tm.assert_numpy_array_equal(index_a == index_a, expected1)
tm.assert_numpy_array_equal(index_a == index_c, expected2)
# test comparisons with numpy arrays
array_a = np.array(index_a)
array_b = np.array(index_a[0:-1])
array_c = np.array(index_a[0:-1].append(index_a[-2:-1]))
array_d = np.array(index_a[0:1])
with pytest.raises(ValueError, match=msg):
index_a == array_b
tm.assert_numpy_array_equal(index_a == array_a, expected1)
tm.assert_numpy_array_equal(index_a == array_c, expected2)
# test comparisons with Series
series_a = Series(array_a)
series_b = Series(array_b)
series_c = Series(array_c)
series_d = Series(array_d)
with pytest.raises(ValueError, match=msg):
index_a == series_b
tm.assert_numpy_array_equal(index_a == series_a, expected1)
tm.assert_numpy_array_equal(index_a == series_c, expected2)
# cases where length is 1 for one of them
with pytest.raises(ValueError, match="Lengths must match"):
index_a == index_d
with pytest.raises(ValueError, match="Lengths must match"):
index_a == series_d
with pytest.raises(ValueError, match="Lengths must match"):
index_a == array_d
msg = "Can only compare identically-labeled Series objects"
with pytest.raises(ValueError, match=msg):
series_a == series_d
with pytest.raises(ValueError, match="Lengths must match"):
series_a == array_d
# comparing with a scalar should broadcast; note that we are excluding
# MultiIndex because in this case each item in the index is a tuple of
# length 2, and therefore is considered an array of length 2 in the
# comparison instead of a scalar
if not isinstance(index_a, MultiIndex):
expected3 = np.array([False] * (len(index_a) - 2) + [True, False])
# assuming the 2nd to last item is unique in the data
item = index_a[-2]
tm.assert_numpy_array_equal(index_a == item, expected3)
# For RangeIndex we can convert to Int64Index
tm.assert_series_equal(series_a == item, Series(expected3))
def test_format(self):
# GH35439
idx = self.create_index()
expected = [str(x) for x in idx]
assert idx.format() == expected
def test_format_empty(self):
# GH35712
empty_idx = self._holder([])
assert empty_idx.format() == []
assert empty_idx.format(name=True) == [""]
def test_hasnans_isnans(self, index):
# GH 11343, added tests for hasnans / isnans
if isinstance(index, MultiIndex):
return
# cases in indices doesn't include NaN
idx = index.copy(deep=True)
expected = np.array([False] * len(idx), dtype=bool)
tm.assert_numpy_array_equal(idx._isnan, expected)
assert idx.hasnans is False
idx = index.copy(deep=True)
values = np.asarray(idx.values)
if len(index) == 0:
return
elif isinstance(index, DatetimeIndexOpsMixin):
values[1] = iNaT
elif isinstance(index, (Int64Index, UInt64Index)):
return
else:
values[1] = np.nan
if isinstance(index, PeriodIndex):
idx = type(index)(values, freq=index.freq)
else:
idx = type(index)(values)
expected = np.array([False] * len(idx), dtype=bool)
expected[1] = True
tm.assert_numpy_array_equal(idx._isnan, expected)
assert idx.hasnans is True
def test_fillna(self, index):
# GH 11343
if len(index) == 0:
pass
elif isinstance(index, MultiIndex):
idx = index.copy(deep=True)
msg = "isna is not defined for MultiIndex"
with pytest.raises(NotImplementedError, match=msg):
idx.fillna(idx[0])
else:
idx = index.copy(deep=True)
result = idx.fillna(idx[0])
tm.assert_index_equal(result, idx)
assert result is not idx
msg = "'value' must be a scalar, passed: "
with pytest.raises(TypeError, match=msg):
idx.fillna([idx[0]])
idx = index.copy(deep=True)
values = np.asarray(idx.values)
if isinstance(index, DatetimeIndexOpsMixin):
values[1] = iNaT
elif isinstance(index, (Int64Index, UInt64Index)):
return
else:
values[1] = np.nan
if isinstance(index, PeriodIndex):
idx = type(index)(values, freq=index.freq)
else:
idx = type(index)(values)
expected = np.array([False] * len(idx), dtype=bool)
expected[1] = True
tm.assert_numpy_array_equal(idx._isnan, expected)
assert idx.hasnans is True
def test_nulls(self, index):
# this is really a smoke test for the methods
# as these are adequately tested for function elsewhere
if len(index) == 0:
tm.assert_numpy_array_equal(index.isna(), np.array([], dtype=bool))
elif isinstance(index, MultiIndex):
idx = index.copy()
msg = "isna is not defined for MultiIndex"
with pytest.raises(NotImplementedError, match=msg):
idx.isna()
elif not index.hasnans:
tm.assert_numpy_array_equal(index.isna(), np.zeros(len(index), dtype=bool))
tm.assert_numpy_array_equal(index.notna(), np.ones(len(index), dtype=bool))
else:
result = isna(index)
tm.assert_numpy_array_equal(index.isna(), result)
tm.assert_numpy_array_equal(index.notna(), ~result)
def test_empty(self):
# GH 15270
index = self.create_index()
assert not index.empty
assert index[:0].empty
def test_join_self_unique(self, join_type):
index = self.create_index()
if index.is_unique:
joined = index.join(index, how=join_type)
assert (index == joined).all()
def test_map(self):
# callable
index = self.create_index()
# we don't infer UInt64
if isinstance(index, pd.UInt64Index):
expected = index.astype("int64")
else:
expected = index
result = index.map(lambda x: x)
# For RangeIndex we convert to Int64Index
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"mapper",
[
lambda values, index: {i: e for e, i in zip(values, index)},
lambda values, index: Series(values, index),
],
)
def test_map_dictlike(self, mapper):
index = self.create_index()
if isinstance(index, (pd.CategoricalIndex, pd.IntervalIndex)):
pytest.skip(f"skipping tests for {type(index)}")
identity = mapper(index.values, index)
# we don't infer to UInt64 for a dict
if isinstance(index, pd.UInt64Index) and isinstance(identity, dict):
expected = index.astype("int64")
else:
expected = index
result = index.map(identity)
# For RangeIndex we convert to Int64Index
tm.assert_index_equal(result, expected)
# empty mappable
expected = Index([np.nan] * len(index))
result = index.map(mapper(expected, index))
tm.assert_index_equal(result, expected)
def test_map_str(self):
# GH 31202
index = self.create_index()
result = index.map(str)
expected = Index([str(x) for x in index], dtype=object)
tm.assert_index_equal(result, expected)
def test_putmask_with_wrong_mask(self):
# GH18368
index = self.create_index()
fill = index[0]
msg = "putmask: mask and data must be the same size"
with pytest.raises(ValueError, match=msg):
index.putmask(np.ones(len(index) + 1, np.bool_), fill)
with pytest.raises(ValueError, match=msg):
index.putmask(np.ones(len(index) - 1, np.bool_), fill)
with pytest.raises(ValueError, match=msg):
index.putmask("foo", fill)
@pytest.mark.parametrize("copy", [True, False])
@pytest.mark.parametrize("name", [None, "foo"])
@pytest.mark.parametrize("ordered", [True, False])
def test_astype_category(self, copy, name, ordered):
# GH 18630
index = self.create_index()
if name:
index = index.rename(name)
# standard categories
dtype = CategoricalDtype(ordered=ordered)
result = index.astype(dtype, copy=copy)
expected = CategoricalIndex(index.values, name=name, ordered=ordered)
tm.assert_index_equal(result, expected)
# non-standard categories
dtype = CategoricalDtype(index.unique().tolist()[:-1], ordered)
result = index.astype(dtype, copy=copy)
expected = CategoricalIndex(index.values, name=name, dtype=dtype)
tm.assert_index_equal(result, expected)
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
result = index.astype("category", copy=copy)
expected = CategoricalIndex(index.values, name=name)
tm.assert_index_equal(result, expected)
def test_is_unique(self):
# initialize a unique index
index = self.create_index().drop_duplicates()
assert index.is_unique is True
# empty index should be unique
index_empty = index[:0]
assert index_empty.is_unique is True
# test basic dupes
index_dup = index.insert(0, index[0])
assert index_dup.is_unique is False
# single NA should be unique
index_na = index.insert(0, np.nan)
assert index_na.is_unique is True
# multiple NA should not be unique
index_na_dup = index_na.insert(0, np.nan)
assert index_na_dup.is_unique is False
@pytest.mark.arm_slow
def test_engine_reference_cycle(self):
# GH27585
index = self.create_index()
nrefs_pre = len(gc.get_referrers(index))
index._engine
assert len(gc.get_referrers(index)) == nrefs_pre
def test_getitem_2d_deprecated(self):
# GH#30588
idx = self.create_index()
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
res = idx[:, None]
assert isinstance(res, np.ndarray), type(res)
def test_contains_requires_hashable_raises(self):
idx = self.create_index()
msg = "unhashable type: 'list'"
with pytest.raises(TypeError, match=msg):
[] in idx
msg = "|".join(
[
r"unhashable type: 'dict'",
r"must be real number, not dict",
r"an integer is required",
r"\{\}",
r"pandas\._libs\.interval\.IntervalTree' is not iterable",
]
)
with pytest.raises(TypeError, match=msg):
{} in idx._engine
def test_copy_shares_cache(self):
# GH32898, GH36840
idx = self.create_index()
idx.get_loc(idx[0]) # populates the _cache.
copy = idx.copy()
assert copy._cache is idx._cache
def test_shallow_copy_shares_cache(self):
# GH32669, GH36840
idx = self.create_index()
idx.get_loc(idx[0]) # populates the _cache.
shallow_copy = idx._shallow_copy()
assert shallow_copy._cache is idx._cache
shallow_copy = idx._shallow_copy(idx._data)
assert shallow_copy._cache is not idx._cache
assert shallow_copy._cache == {}
| bsd-3-clause | 2,308,540,821,060,143,000 | 33.415854 | 87 | 0.564402 | false |
hyqneuron/pylearn2-maxsom | pylearn2/scripts/papers/maxout/mytests/mytest4.py | 1 | 8009 | from pylearn2.models.mlp import MLP
from pylearn2.models.maxout import Maxout
from pylearn2.training_algorithms.sgd import SGD
import logging
import warnings
import sys
import numpy as np
from theano.compat import six
from theano import config
from theano import function
from theano.gof.op import get_debug_values
import theano.tensor as T
from pylearn2.compat import OrderedDict, first_key
from pylearn2.monitor import Monitor
from pylearn2.space import CompositeSpace, NullSpace
from pylearn2.train_extensions import TrainExtension
from pylearn2.training_algorithms.training_algorithm import TrainingAlgorithm
from pylearn2.training_algorithms.learning_rule import Momentum
from pylearn2.training_algorithms.learning_rule import MomentumAdjustor \
as LRMomentumAdjustor
from pylearn2.utils.iteration import is_stochastic, has_uniform_batch_size
from pylearn2.utils import py_integer_types, py_float_types
from pylearn2.utils import safe_zip
from pylearn2.utils import serial
from pylearn2.utils import sharedX
from pylearn2.utils import contains_nan
from pylearn2.utils import contains_inf
from pylearn2.utils import isfinite
from pylearn2.utils.data_specs import DataSpecsMapping
from pylearn2.utils.exc import reraise_as
from pylearn2.utils.timing import log_timing
from pylearn2.utils.rng import make_np_rng
log = logging.getLogger(__name__)
class TestAlgo(SGD):
# this train function mainly to hack into weight tracking
def train(self, dataset):
"""
Runs one epoch of SGD training on the specified dataset.
Parameters
----------
dataset : Dataset
"""
self.first = False
rng = self.rng
if not is_stochastic(self.train_iteration_mode):
rng = None
data_specs = self.cost.get_data_specs(self.model)
# The iterator should be built from flat data specs, so it returns
# flat, non-redundent tuples of data.
mapping = DataSpecsMapping(data_specs)
space_tuple = mapping.flatten(data_specs[0], return_tuple=True)
source_tuple = mapping.flatten(data_specs[1], return_tuple=True)
if len(space_tuple) == 0:
# No data will be returned by the iterator, and it is impossible
# to know the size of the actual batch.
# It is not decided yet what the right thing to do should be.
raise NotImplementedError("Unable to train with SGD, because "
"the cost does not actually use data from the data set. "
"data_specs: %s" % str(data_specs))
flat_data_specs = (CompositeSpace(space_tuple), source_tuple)
iterator = dataset.iterator(mode=self.train_iteration_mode,
batch_size=self.batch_size,
data_specs=flat_data_specs, return_tuple=True,
rng = rng, num_batches = self.batches_per_iter)
"""
if not hasattr(self, 'batch_count'):
self.batch_count=0
self.param_records=[]
print "Going into first batch"
param_init = self.model.get_param_values()
"""
on_load_batch = self.on_load_batch
for batch in iterator:
for callback in on_load_batch:
callback(*batch)
self.sgd_update(*batch)
# iterator might return a smaller batch if dataset size
# isn't divisible by batch_size
# Note: if data_specs[0] is a NullSpace, there is no way to know
# how many examples would actually have been in the batch,
# since it was empty, so actual_batch_size would be reported as 0.
actual_batch_size = flat_data_specs[0].np_batch_size(batch)
self.monitor.report_batch(actual_batch_size)
for callback in self.update_callbacks:
callback(self)
"""
param_first = self.model.get_param_values()
with log_timing(log, "Saving initial param and first param"):
serial.save("param_init_first.pkl", (param_init, param_first))
sys.exit(0)
# Now, we record the weights every 50 minibatches
# So 10 records per epoch
self.batch_count+=1
if self.batch_count%50==0:
self.param_records.append(self.model.get_param_values())
# for every 2 epochs, we save the param_records
if self.batch_count%(50*20)==0:
record_path = './mytest/'+str(self.batch_count)+'.pkl'
print "We are now about to same lots of param records"
with log_timing(log, 'Saving param records to'+record_path):
serial.save(record_path, self.param_records)
self.param_records=[]
"""
class SOMaxout(Maxout):
"""
A SOM-Maxout layer based on Maxout
Each maxout unit is a group, and units within the same group learn
"together" by copying each other's update in an SOM-like manner.
Usually, in a maxout group, if a unit is winning/maxing all the time, the
other units in its group will never be used, never get updated, and thus get
stuck forever. This wastes maxout's capacity.
SOM-Maxout solves this problem by asking units within the same somaxout
group to be each others' buddies. The winners will help their neighbours to
learn "together". That is, if the winner gets a delta w, it will ask its
neighbours to get a SOM_factor * delta w.
decay_rate
"""
def __init__(self, *args, **kwargs):
super(SOMaxout, self).__init__(*args, **kwargs)
print "initiating mytest4"
assert self.num_pieces==5, "This test only support 5-piece per group"
matrix_value = np.asarray([[ 1. , 0.8, 0.5, 0.2, 0. ],
[ 0.8, 1. , 0.8, 0.5, 0.2],
[ 0.5, 0.8, 1. , 0.8, 0.5],
[ 0.2, 0.5, 0.8, 1. , 0.8],
[ 0. , 0.2, 0.5, 0.8, 1. ]])
self.SOM_copy_matrix = sharedX(matrix_value)
self.standardize_norm = True
print "SOM_copy_matrix established for layer "+self.layer_name
print matrix_value
def modify_grads(self, grads):
"""
W is a matrix n-input by n-maxout unit
The objective of this function is to ask nearby units in the same SOM
group to learn from each other by asking them to copy each other's
grads
[1, 0.8]
[0.8, 1]
"""
W, = self.transformer.get_params()
grad_old = grads[W]
npi = self.num_pieces
# within each Maxout unit, we perform a within-group copy of grads.
# each within-group copy produces an input-size by num_pieces matrix.
grad_list= [ T.dot(grad_old[:, i*npi:(i+1)*npi ], self.SOM_copy_matrix)
for i in xrange(self.num_units)]
# we then concatenate all those matrices into an input-size by
# num_units*num_pieces matrix
grads[W] = T.concatenate(grad_list, axis=1)
print "Gradients for layer "+self.layer_name+" modified."
def _modify_updates(self, updates):
"""
At each update, make sure all units in the same somaxout group has equal
norm
"""
W, = self.transformer.get_params()
update_old = updates[W]
npi = self.num_pieces
if self.standardize_norm:
norms = T.sqrt(T.sum(T.sqr(update_old), axis=0))
norm_mean = norms.reshape([self.num_units, self.num_pieces]).mean(axis=1)
norm_desired=T.repeat(norm_mean, npi)
if self.max_col_norm is not None:
norm_desired = T.clip(norm_desired, 0, self.max_col_norm)
updates[W] = update_old * norm_desired / norms
print "Updates for layer "+self.layer_name+" modified with within-group norm standardization"
| bsd-3-clause | 4,847,830,221,371,459,000 | 41.152632 | 105 | 0.618304 | false |
Xperia-Nicki/android_platform_sony_nicki | external/webkit/Tools/Scripts/webkitpy/common/system/ospath_unittest.py | 15 | 2518 | # Copyright (C) 2010 Chris Jerdonek ([email protected])
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for ospath.py."""
import os
import unittest
from webkitpy.common.system.ospath import relpath
# Make sure the tests in this class are platform independent.
class RelPathTest(unittest.TestCase):
"""Tests relpath()."""
os_path_abspath = lambda self, path: path
def _rel_path(self, path, abs_start_path):
return relpath(path, abs_start_path, self.os_path_abspath)
def test_same_path(self):
rel_path = self._rel_path("WebKit", "WebKit")
self.assertEquals(rel_path, "")
def test_long_rel_path(self):
start_path = "WebKit"
expected_rel_path = os.path.join("test", "Foo.txt")
path = os.path.join(start_path, expected_rel_path)
rel_path = self._rel_path(path, start_path)
self.assertEquals(expected_rel_path, rel_path)
def test_none_rel_path(self):
"""Test _rel_path() with None return value."""
start_path = "WebKit"
path = os.path.join("other_dir", "foo.txt")
rel_path = self._rel_path(path, start_path)
self.assertTrue(rel_path is None)
rel_path = self._rel_path("Tools", "WebKit")
self.assertTrue(rel_path is None)
| apache-2.0 | 9,164,030,147,796,861,000 | 39.612903 | 79 | 0.70969 | false |
troya2/pjsip | doc/pjsip-book/conf.py | 61 | 8065 | # -*- coding: utf-8 -*-
#
# The PJSIP Book documentation build configuration file, created by
# sphinx-quickstart on Sat Nov 30 06:36:26 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [ 'breathe', 'sphinx.ext.todo', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PJSUA2 Documentation'
copyright = u'2014, Teluu Ltd.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0-alpha'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PJSUA2Doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'PJSUA2Doc.tex', u'PJSUA2 Documentation',
u'Sauw Ming Liong, Benny Prijono', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pjsua2doc', u'PJSUA2 Documentation',
[u'Sauw Ming Liong', u'Benny Prijono'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'PJSUA2Doc', u'PJSUA2 Documentation',
u'Sauw Ming Liong@*Benny Prijono', 'ThePJSIPBook', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
breathe_projects = {
"pjsua2": "xml/",
}
breathe_default_project = "pjsua2"
breathe_projects_source = {
"pjsua2":"../../pjsip/include/pjsua2"
}
breathe_domain_by_extension = {
"hpp":"cpp"
}
| gpl-2.0 | 6,872,362,424,761,038,000 | 30.503906 | 89 | 0.702418 | false |
ojake/django | django/contrib/gis/db/backends/postgis/models.py | 396 | 2158 | """
The GeometryColumns and SpatialRefSys models for the PostGIS backend.
"""
from django.contrib.gis.db.backends.base.models import SpatialRefSysMixin
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class PostGISGeometryColumns(models.Model):
"""
The 'geometry_columns' table from the PostGIS. See the PostGIS
documentation at Ch. 4.3.2.
On PostGIS 2, this is a view.
"""
f_table_catalog = models.CharField(max_length=256)
f_table_schema = models.CharField(max_length=256)
f_table_name = models.CharField(max_length=256)
f_geometry_column = models.CharField(max_length=256)
coord_dimension = models.IntegerField()
srid = models.IntegerField(primary_key=True)
type = models.CharField(max_length=30)
class Meta:
app_label = 'gis'
db_table = 'geometry_columns'
managed = False
@classmethod
def table_name_col(cls):
"""
Returns the name of the metadata column used to store the feature table
name.
"""
return 'f_table_name'
@classmethod
def geom_col_name(cls):
"""
Returns the name of the metadata column used to store the feature
geometry column.
"""
return 'f_geometry_column'
def __str__(self):
return "%s.%s - %dD %s field (SRID: %d)" % \
(self.f_table_name, self.f_geometry_column,
self.coord_dimension, self.type, self.srid)
class PostGISSpatialRefSys(models.Model, SpatialRefSysMixin):
"""
The 'spatial_ref_sys' table from PostGIS. See the PostGIS
documentaiton at Ch. 4.2.1.
"""
srid = models.IntegerField(primary_key=True)
auth_name = models.CharField(max_length=256)
auth_srid = models.IntegerField()
srtext = models.CharField(max_length=2048)
proj4text = models.CharField(max_length=2048)
class Meta:
app_label = 'gis'
db_table = 'spatial_ref_sys'
managed = False
@property
def wkt(self):
return self.srtext
@classmethod
def wkt_col(cls):
return 'srtext'
| bsd-3-clause | 6,024,108,787,767,507,000 | 28.561644 | 79 | 0.646432 | false |
msvbhat/distaf | distaf/util.py | 1 | 4872 | # This file is part of DiSTAF
# Copyright (C) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from types import FunctionType
from distaf.client_rpyc import BigBang
from distaf.config_parser import get_global_config, get_testcase_config
testcases = {}
test_list = {}
test_seq = []
test_mounts = {}
globl_configs = {}
global_mode = None
tc = None
def distaf_init(config_file_string="config.yml"):
"""
The distaf init function which calls the BigBang
"""
config_files = config_file_string.split()
global globl_configs, global_mode, tc
globl_configs = get_global_config(config_files)
global_mode = globl_configs['global_mode']
tc = BigBang(globl_configs)
return globl_configs
def inject_gluster_logs(label, servers=''):
"""
Injects the label in gluster related logs
This is mainly to help identifying what was going
on during the test case
@parameter: A label string which will be injected to gluster logs
A list of servers in which this log inejection should be
done
@returns: None
"""
if servers == '':
servers = tc.all_nodes
cmd = "for file in `find $(gluster --print-logdir) -type f " \
"-name '*.log'`; do echo \"%s\" >> $file; done" % label
tc.run_servers(cmd, servers=servers, verbose=False)
return None
def testcase(name):
def decorator(func):
tc_config = get_testcase_config(func.__doc__)
def wrapper(self):
tc.logger.info("Starting the test: %s" % name)
voltype, mount_proto = test_seq.pop(0)
inject_gluster_logs("%s_%s" % (voltype, name))
_ret = True
globl_configs['reuse_setup'] = tc_config['reuse_setup']
globl_configs.update(tc_config)
globl_configs['voltype'] = voltype
globl_configs['mount_proto'] = mount_proto
if isinstance(func, FunctionType):
_ret = func()
else:
try:
func_obj = func(globl_configs)
ret = func_obj.setup()
if not ret:
tc.logger.error("The setup of %s failed" % name)
_ret = False
if _ret:
ret = func_obj.run()
if not ret:
tc.logger.error("The execution of testcase %s " \
"failed" % name)
_ret = False
ret = func_obj.teardown()
if not ret:
tc.logger.error("The teardown of %s failed" % name)
_ret = False
if len(test_seq) == 0 or voltype != test_seq[0][0]:
tc.logger.info("Last test case to use %s volume type" \
% voltype)
ret = func_obj.cleanup()
if not ret:
tc.logger.error("The cleanup of volume %s failed" \
% name)
_ret = False
except:
tc.logger.exception("Exception while running %s" % name)
_ret = False
self.assertTrue(_ret, "Testcase %s failed" % name)
inject_gluster_logs("%s_%s" % (voltype, name))
tc.logger.info("Ending the test: %s" % name)
return _ret
testcases[name] = wrapper
if not global_mode and tc_config is not None:
for voltype in tc_config['runs_on_volumes']:
if voltype not in test_list:
test_list[voltype] = []
if not tc_config['reuse_setup']:
test_list[voltype].insert(0, name)
else:
test_list[voltype].append(name)
test_mounts[name] = tc_config['runs_on_protocol']
return wrapper
return decorator
def distaf_finii():
"""
The fini() function which closes all connection to the servers
"""
tc.fini()
| gpl-2.0 | -6,207,542,107,935,917,000 | 35.631579 | 79 | 0.543514 | false |
pombredanne/psd-tools | tests/test_pixels.py | 8 | 5675 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import pytest
from psd_tools import PSDImage, Layer, Group
from .utils import full_name
PIXEL_COLORS = (
# filename probe point pixel value
('1layer.psd', (5, 5), (0x27, 0xBA, 0x0F)),
('group.psd', (10, 20), (0xFF, 0xFF, 0xFF)),
('hidden-groups.psd', (60, 100), (0xE1, 0x0B, 0x0B)),
('hidden-layer.psd', (0, 0), (0xFF, 0xFF, 0xFF)),
# ('note.psd', (30, 30), (0, 0, 0)), # what is it?
('smart-object-slice.psd', (70, 80), (0xAC, 0x19, 0x19)), # XXX: what is this test about?
)
TRANSPARENCY_PIXEL_COLORS = (
('transparentbg-gimp.psd', (14, 14), (0xFF, 0xFF, 0xFF, 0x13)),
('2layers.psd', (70, 30), (0xF1, 0xF3, 0xC1)), # why gimp shows it as F2F4C2 ?
)
MASK_PIXEL_COLORS = (
('clipping-mask.psd', (182, 68), (0xDA, 0xE6, 0xF7)), # this is a clipped point
('mask.psd', (87, 7), (0xFF, 0xFF, 0xFF)), # mask truncates the layer here
)
NO_LAYERS_PIXEL_COLORS = (
('history.psd', (70, 85), (0x24, 0x26, 0x29)),
)
PIXEL_COLORS_8BIT = (PIXEL_COLORS + NO_LAYERS_PIXEL_COLORS +
MASK_PIXEL_COLORS + TRANSPARENCY_PIXEL_COLORS)
PIXEL_COLORS_32BIT = (
('32bit.psd', (75, 15), (136, 139, 145)),
('32bit.psd', (95, 15), (0, 0, 0)),
('300dpi.psd', (70, 30), (0, 0, 0)),
('300dpi.psd', (50, 60), (214, 59, 59)),
('gradient fill.psd', (10, 15), (235, 241, 250)), # background
('gradient fill.psd', (70, 50), (0, 0, 0)), # black circle
('gradient fill.psd', (50, 50), (205, 144, 110)), # filled ellipse
('pen-text.psd', (50, 50), (229, 93, 93)),
('pen-text.psd', (170, 40), (0, 0, 0)),
('vector mask.psd', (10, 15), (255, 255, 255)),
('vector mask.psd', (50, 90), (221, 227, 236)),
('transparentbg.psd', (0, 0), (255, 255, 255, 0)),
('transparentbg.psd', (50, 50), (0, 0, 0, 255)),
('32bit5x5.psd', (0, 0), (235, 241, 250)), # why not equal to 16bit5x5.psd?
('32bit5x5.psd', (4, 0), (0, 0, 0)),
('32bit5x5.psd', (1, 3), (46, 196, 104)),
)
PIXEL_COLORS_16BIT = (
('16bit5x5.psd', (0, 0), (236, 242, 251)),
('16bit5x5.psd', (4, 0), (0, 0, 0)),
('16bit5x5.psd', (1, 3), (46, 196, 104)),
)
LAYER_COLORS = (
('1layer.psd', 0, (5, 5), (0x27, 0xBA, 0x0F)),
('2layers.psd', 1, (5, 5), (0x27, 0xBA, 0x0F)),
('2layers.psd', 1, (70, 30), (0x27, 0xBA, 0x0F)),
('2layers.psd', 0, (0, 0), (0, 0, 0, 0)),
('2layers.psd', 0, (62, 26), (0xF2, 0xF4, 0xC2, 0xFE)),
)
LAYER_COLORS_MULTIBYTE = (
('16bit5x5.psd', 1, (0, 0), (236, 242, 251, 255)),
('16bit5x5.psd', 1, (1, 3), (46, 196, 104, 255)),
('32bit5x5.psd', 1, (0, 0), (235, 241, 250, 255)), # why not equal to 16bit5x5.psd?
('32bit5x5.psd', 1, (1, 3), (46, 196, 104, 255)),
)
def color_PIL(psd, point):
im = psd.as_PIL()
return im.getpixel(point)
def color_pymaging(psd, point):
im = psd.as_pymaging()
return tuple(im.get_pixel(*point))
BACKENDS = [[color_PIL], [color_pymaging]]
@pytest.mark.parametrize(["get_color"], BACKENDS)
@pytest.mark.parametrize(["filename", "point", "color"], PIXEL_COLORS_8BIT)
def test_composite(filename, point, color, get_color):
psd = PSDImage.load(full_name(filename))
assert color == get_color(psd, point)
@pytest.mark.parametrize(["filename", "point", "color"], PIXEL_COLORS_32BIT)
def test_composite_32bit(filename, point, color):
psd = PSDImage.load(full_name(filename))
assert color == color_PIL(psd, point)
@pytest.mark.parametrize(["filename", "point", "color"], PIXEL_COLORS_16BIT)
def test_composite_16bit(filename, point, color):
psd = PSDImage.load(full_name(filename))
assert color == color_PIL(psd, point)
@pytest.mark.parametrize(["filename", "layer_num", "point", "color"], LAYER_COLORS_MULTIBYTE)
def test_layer_colors_multibyte(filename, layer_num, point, color):
psd = PSDImage.load(full_name(filename))
layer = psd.layers[layer_num]
assert color == color_PIL(layer, point)
@pytest.mark.parametrize(["get_color"], BACKENDS)
@pytest.mark.parametrize(["filename", "layer_num", "point", "color"], LAYER_COLORS)
def test_layer_colors(filename, layer_num, point, color, get_color):
psd = PSDImage.load(full_name(filename))
layer = psd.layers[layer_num]
assert color == get_color(layer, point)
@pytest.mark.parametrize(["filename", "point", "color"], PIXEL_COLORS + MASK_PIXEL_COLORS + TRANSPARENCY_PIXEL_COLORS)
def test_layer_merging_size(filename, point, color):
psd = PSDImage.load(full_name(filename))
merged_image = psd.as_PIL_merged()
assert merged_image.size == psd.as_PIL().size
@pytest.mark.parametrize(["filename", "point", "color"], PIXEL_COLORS)
def test_layer_merging_pixels(filename, point, color):
psd = PSDImage.load(full_name(filename))
merged_image = psd.as_PIL_merged()
assert color[:3] == merged_image.getpixel(point)[:3]
assert merged_image.getpixel(point)[3] == 255 # alpha channel
@pytest.mark.xfail
@pytest.mark.parametrize(["filename", "point", "color"], TRANSPARENCY_PIXEL_COLORS)
def test_layer_merging_pixels_transparency(filename, point, color):
psd = PSDImage.load(full_name(filename))
merged_image = psd.as_PIL_merged()
assert color == merged_image.getpixel(point)
| mit | 8,326,158,287,670,446,000 | 40.423358 | 118 | 0.569163 | false |
jazkarta/edx-platform-for-isc | common/djangoapps/track/tests/test_util.py | 239 | 1203 | from datetime import datetime
import json
from pytz import UTC
from django.test import TestCase
from track.utils import DateTimeJSONEncoder
class TestDateTimeJSONEncoder(TestCase):
def test_datetime_encoding(self):
a_naive_datetime = datetime(2012, 05, 01, 07, 27, 10, 20000)
a_tz_datetime = datetime(2012, 05, 01, 07, 27, 10, 20000, tzinfo=UTC)
a_date = a_naive_datetime.date()
an_iso_datetime = '2012-05-01T07:27:10.020000+00:00'
an_iso_date = '2012-05-01'
obj = {
'number': 100,
'string': 'hello',
'object': {'a': 1},
'a_datetime': a_naive_datetime,
'a_tz_datetime': a_tz_datetime,
'a_date': a_date,
}
to_json = json.dumps(obj, cls=DateTimeJSONEncoder)
from_json = json.loads(to_json)
self.assertEqual(from_json['number'], 100)
self.assertEqual(from_json['string'], 'hello')
self.assertEqual(from_json['object'], {'a': 1})
self.assertEqual(from_json['a_datetime'], an_iso_datetime)
self.assertEqual(from_json['a_tz_datetime'], an_iso_datetime)
self.assertEqual(from_json['a_date'], an_iso_date)
| agpl-3.0 | 7,489,561,403,423,701,000 | 31.513514 | 77 | 0.601829 | false |
Red-M/CloudBot-legacy | plugins/tell.py | 2 | 3615 | """ tell.py: written by sklnd in July 2009
2010.01.25 - modified by Scaevolus"""
import time
import re
from util import hook, timesince
db_ready = []
def db_init(db, conn):
"""Check that our db has the tell table, create it if not."""
global db_ready
if not conn.name in db_ready:
db.execute("create table if not exists tell"
"(user_to, user_from, message, chan, time,"
"primary key(user_to, message))")
db.commit()
db_ready.append(conn.name)
def get_tells(db, user_to):
return db.execute("select user_from, message, time, chan from tell where"
" user_to=lower(?) order by time",
(user_to.lower(),)).fetchall()
@hook.singlethread
@hook.event('PRIVMSG')
def tellinput(inp, input=None, notice=None, db=None, nick=None, conn=None):
if 'showtells' in input.msg.lower():
return
db_init(db, conn)
tells = get_tells(db, nick)
if tells:
user_from, message, time, chan = tells[0]
reltime = timesince.timesince(time)
reply = "{} sent you a message {} ago from {}: {}".format(user_from, reltime, chan,
message)
if len(tells) > 1:
reply += " (+{} more, {}showtells to view)".format(len(tells) - 1, conn.conf["command_prefix"])
db.execute("delete from tell where user_to=lower(?) and message=?",
(nick, message))
db.commit()
notice(reply)
@hook.command(autohelp=False)
def showtells(inp, nick='', chan='', notice=None, db=None, conn=None):
"""showtells -- View all pending tell messages (sent in a notice)."""
db_init(db, conn)
tells = get_tells(db, nick)
if not tells:
notice("You have no pending tells.")
return
for tell in tells:
user_from, message, time, chan = tell
past = timesince.timesince(time)
notice("{} sent you a message {} ago from {}: {}".format(user_from, past, chan, message))
db.execute("delete from tell where user_to=lower(?)",
(nick,))
db.commit()
@hook.command
def tell(inp, nick='', chan='', db=None, input=None, notice=None, conn=None):
"""tell <nick> <message> -- Relay <message> to <nick> when <nick> is around."""
query = inp.split(' ', 1)
if len(query) != 2:
notice(tell.__doc__)
return
user_to = query[0].lower()
message = query[1].strip()
user_from = nick
if chan.lower() == user_from.lower():
chan = 'a pm'
if user_to == user_from.lower():
notice("Have you looked in a mirror lately?")
return
if user_to.lower() == input.conn.nick.lower():
# user is looking for us, being a smart-ass
notice("Thanks for the message, {}!".format(user_from))
return
if not re.match("^[A-Za-z0-9_|.\-\]\[]*$", user_to.lower()):
notice("I can't send a message to that user!")
return
db_init(db, conn)
if db.execute("select count() from tell where user_to=?",
(user_to,)).fetchone()[0] >= 10:
notice("That person has too many messages queued.")
return
try:
db.execute("insert into tell(user_to, user_from, message, chan,"
"time) values(?,?,?,?,?)", (user_to, user_from, message,
chan, time.time()))
db.commit()
except db.IntegrityError:
notice("Message has already been queued.")
return
notice("Your message has been sent!")
| gpl-3.0 | -8,847,720,744,274,657,000 | 28.876033 | 107 | 0.554357 | false |
swenson/sagewiki | unidecode/unidecode/x025.py | 252 | 3871 | data = (
'-', # 0x00
'-', # 0x01
'|', # 0x02
'|', # 0x03
'-', # 0x04
'-', # 0x05
'|', # 0x06
'|', # 0x07
'-', # 0x08
'-', # 0x09
'|', # 0x0a
'|', # 0x0b
'+', # 0x0c
'+', # 0x0d
'+', # 0x0e
'+', # 0x0f
'+', # 0x10
'+', # 0x11
'+', # 0x12
'+', # 0x13
'+', # 0x14
'+', # 0x15
'+', # 0x16
'+', # 0x17
'+', # 0x18
'+', # 0x19
'+', # 0x1a
'+', # 0x1b
'+', # 0x1c
'+', # 0x1d
'+', # 0x1e
'+', # 0x1f
'+', # 0x20
'+', # 0x21
'+', # 0x22
'+', # 0x23
'+', # 0x24
'+', # 0x25
'+', # 0x26
'+', # 0x27
'+', # 0x28
'+', # 0x29
'+', # 0x2a
'+', # 0x2b
'+', # 0x2c
'+', # 0x2d
'+', # 0x2e
'+', # 0x2f
'+', # 0x30
'+', # 0x31
'+', # 0x32
'+', # 0x33
'+', # 0x34
'+', # 0x35
'+', # 0x36
'+', # 0x37
'+', # 0x38
'+', # 0x39
'+', # 0x3a
'+', # 0x3b
'+', # 0x3c
'+', # 0x3d
'+', # 0x3e
'+', # 0x3f
'+', # 0x40
'+', # 0x41
'+', # 0x42
'+', # 0x43
'+', # 0x44
'+', # 0x45
'+', # 0x46
'+', # 0x47
'+', # 0x48
'+', # 0x49
'+', # 0x4a
'+', # 0x4b
'-', # 0x4c
'-', # 0x4d
'|', # 0x4e
'|', # 0x4f
'-', # 0x50
'|', # 0x51
'+', # 0x52
'+', # 0x53
'+', # 0x54
'+', # 0x55
'+', # 0x56
'+', # 0x57
'+', # 0x58
'+', # 0x59
'+', # 0x5a
'+', # 0x5b
'+', # 0x5c
'+', # 0x5d
'+', # 0x5e
'+', # 0x5f
'+', # 0x60
'+', # 0x61
'+', # 0x62
'+', # 0x63
'+', # 0x64
'+', # 0x65
'+', # 0x66
'+', # 0x67
'+', # 0x68
'+', # 0x69
'+', # 0x6a
'+', # 0x6b
'+', # 0x6c
'+', # 0x6d
'+', # 0x6e
'+', # 0x6f
'+', # 0x70
'/', # 0x71
'\\', # 0x72
'X', # 0x73
'-', # 0x74
'|', # 0x75
'-', # 0x76
'|', # 0x77
'-', # 0x78
'|', # 0x79
'-', # 0x7a
'|', # 0x7b
'-', # 0x7c
'|', # 0x7d
'-', # 0x7e
'|', # 0x7f
'#', # 0x80
'#', # 0x81
'#', # 0x82
'#', # 0x83
'#', # 0x84
'#', # 0x85
'#', # 0x86
'#', # 0x87
'#', # 0x88
'#', # 0x89
'#', # 0x8a
'#', # 0x8b
'#', # 0x8c
'#', # 0x8d
'#', # 0x8e
'#', # 0x8f
'#', # 0x90
'#', # 0x91
'#', # 0x92
'#', # 0x93
'-', # 0x94
'|', # 0x95
'[?]', # 0x96
'[?]', # 0x97
'[?]', # 0x98
'[?]', # 0x99
'[?]', # 0x9a
'[?]', # 0x9b
'[?]', # 0x9c
'[?]', # 0x9d
'[?]', # 0x9e
'[?]', # 0x9f
'#', # 0xa0
'#', # 0xa1
'#', # 0xa2
'#', # 0xa3
'#', # 0xa4
'#', # 0xa5
'#', # 0xa6
'#', # 0xa7
'#', # 0xa8
'#', # 0xa9
'#', # 0xaa
'#', # 0xab
'#', # 0xac
'#', # 0xad
'#', # 0xae
'#', # 0xaf
'#', # 0xb0
'#', # 0xb1
'^', # 0xb2
'^', # 0xb3
'^', # 0xb4
'^', # 0xb5
'>', # 0xb6
'>', # 0xb7
'>', # 0xb8
'>', # 0xb9
'>', # 0xba
'>', # 0xbb
'V', # 0xbc
'V', # 0xbd
'V', # 0xbe
'V', # 0xbf
'<', # 0xc0
'<', # 0xc1
'<', # 0xc2
'<', # 0xc3
'<', # 0xc4
'<', # 0xc5
'*', # 0xc6
'*', # 0xc7
'*', # 0xc8
'*', # 0xc9
'*', # 0xca
'*', # 0xcb
'*', # 0xcc
'*', # 0xcd
'*', # 0xce
'*', # 0xcf
'*', # 0xd0
'*', # 0xd1
'*', # 0xd2
'*', # 0xd3
'*', # 0xd4
'*', # 0xd5
'*', # 0xd6
'*', # 0xd7
'*', # 0xd8
'*', # 0xd9
'*', # 0xda
'*', # 0xdb
'*', # 0xdc
'*', # 0xdd
'*', # 0xde
'*', # 0xdf
'*', # 0xe0
'*', # 0xe1
'*', # 0xe2
'*', # 0xe3
'*', # 0xe4
'*', # 0xe5
'*', # 0xe6
'#', # 0xe7
'#', # 0xe8
'#', # 0xe9
'#', # 0xea
'#', # 0xeb
'^', # 0xec
'^', # 0xed
'^', # 0xee
'O', # 0xef
'#', # 0xf0
'#', # 0xf1
'#', # 0xf2
'#', # 0xf3
'#', # 0xf4
'#', # 0xf5
'#', # 0xf6
'#', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| gpl-2.0 | 4,723,810,819,600,030,000 | 14.062257 | 16 | 0.266081 | false |
jakub-d/kubernetes | hack/lookup_pull.py | 246 | 1299 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Script to print out PR info in release note format.
import json
import sys
import urllib2
PULLQUERY=("https://api.github.com/repos/"
"GoogleCloudPlatform/kubernetes/pulls/{pull}")
LOGIN="login"
TITLE="title"
USER="user"
def print_pulls(pulls):
for pull in pulls:
d = json.loads(urllib2.urlopen(PULLQUERY.format(pull=pull)).read())
print "* {title} #{pull} ({author})".format(
title=d[TITLE], pull=pull, author=d[USER][LOGIN])
if __name__ == "__main__":
if len(sys.argv) < 2:
print ("Usage: {cmd} <pulls>...: Prints out short " +
"markdown description for PRs appropriate for release notes.")
sys.exit(1)
print_pulls(sys.argv[1:])
| apache-2.0 | 1,747,868,749,868,640,800 | 31.475 | 74 | 0.700539 | false |
snowballstem/snowball | python/stemwords.py | 1 | 3437 | import sys
import codecs
import snowballstemmer
def usage():
print('''usage: %s [-l <language>] [-i <input file>] [-o <output file>] [-c <character encoding>] [-p[2]] [-h]
The input file consists of a list of words to be stemmed, one per
line. Words should be in lower case, but (for English) A-Z letters
are mapped to their a-z equivalents anyway. If omitted, stdin is
used.
If -c is given, the argument is the character encoding of the input
and output files. If it is omitted, the UTF-8 encoding is used.
If -p is given the output file consists of each word of the input
file followed by \"->\" followed by its stemmed equivalent.
If -p2 is given the output file is a two column layout containing
the input words in the first column and the stemmed eqivalents in
the second column.
Otherwise, the output file consists of the stemmed words, one per
line.
-h displays this help''' % sys.argv[0])
def main():
argv = sys.argv[1:]
if len(argv) < 5:
usage()
else:
pretty = 0
input = ''
output = ''
encoding = 'utf_8'
language = 'English'
show_help = False
while len(argv):
arg = argv[0]
argv = argv[1:]
if arg == '-h':
show_help = True
break
elif arg == "-p":
pretty = 1
elif arg == "-p2":
pretty = 2
elif arg == "-l":
if len(argv) == 0:
show_help = True
break
language = argv[0]
argv = argv[1:]
elif arg == "-i":
if len(argv) == 0:
show_help = True
break
input = argv[0]
argv = argv[1:]
elif arg == "-o":
if len(argv) == 0:
show_help = True
break
output = argv[0]
argv = argv[1:]
elif arg == "-c":
if len(argv) == 0:
show_help = True
break
encoding = argv[0]
if show_help or input == '' or output == '':
usage()
else:
stemming(language, input, output, encoding, pretty)
def stemming(lang, input, output, encoding, pretty):
stemmer = snowballstemmer.stemmer(lang)
with codecs.open(output, "w", encoding) as outfile:
with codecs.open(input, "r", encoding) as infile:
for original in infile.readlines():
original = original.strip()
# Convert only ASCII-letters to lowercase, to match C behavior
original = ''.join((c.lower() if 'A' <= c <= 'Z' else c for c in original))
stemmed = stemmer.stemWord(original)
if pretty == 0:
if stemmed != "":
outfile.write(stemmed)
elif pretty == 1:
outfile.write(original, " -> ", stemmed)
elif pretty == 2:
outfile.write(original)
if len(original) < 30:
outfile.write(" " * (30 - len(original)))
else:
outfile.write("\n")
outfile.write(" " * 30)
outfile.write(stemmed)
outfile.write('\n')
main()
| bsd-3-clause | 8,974,131,121,116,061,000 | 33.029703 | 114 | 0.484725 | false |
markYoungH/chromium.src | media/tools/constrained_network_server/traffic_control.py | 186 | 12569 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Traffic control library for constraining the network configuration on a port.
The traffic controller sets up a constrained network configuration on a port.
Traffic to the constrained port is forwarded to a specified server port.
"""
import logging
import os
import re
import subprocess
# The maximum bandwidth limit.
_DEFAULT_MAX_BANDWIDTH_KBIT = 1000000
class TrafficControlError(BaseException):
"""Exception raised for errors in traffic control library.
Attributes:
msg: User defined error message.
cmd: Command for which the exception was raised.
returncode: Return code of running the command.
stdout: Output of running the command.
stderr: Error output of running the command.
"""
def __init__(self, msg, cmd=None, returncode=None, output=None,
error=None):
BaseException.__init__(self, msg)
self.msg = msg
self.cmd = cmd
self.returncode = returncode
self.output = output
self.error = error
def CheckRequirements():
"""Checks if permissions are available to run traffic control commands.
Raises:
TrafficControlError: If permissions to run traffic control commands are not
available.
"""
if os.geteuid() != 0:
_Exec(['sudo', '-n', 'tc', '-help'],
msg=('Cannot run \'tc\' command. Traffic Control must be run as root '
'or have password-less sudo access to this command.'))
_Exec(['sudo', '-n', 'iptables', '-help'],
msg=('Cannot run \'iptables\' command. Traffic Control must be run '
'as root or have password-less sudo access to this command.'))
def CreateConstrainedPort(config):
"""Creates a new constrained port.
Imposes packet level constraints such as bandwidth, latency, and packet loss
on a given port using the specified configuration dictionary. Traffic to that
port is forwarded to a specified server port.
Args:
config: Constraint configuration dictionary, format:
port: Port to constrain (integer 1-65535).
server_port: Port to redirect traffic on [port] to (integer 1-65535).
interface: Network interface name (string).
latency: Delay added on each packet sent (integer in ms).
bandwidth: Maximum allowed upload bandwidth (integer in kbit/s).
loss: Percentage of packets to drop (integer 0-100).
Raises:
TrafficControlError: If any operation fails. The message in the exception
describes what failed.
"""
_CheckArgsExist(config, 'interface', 'port', 'server_port')
_AddRootQdisc(config['interface'])
try:
_ConfigureClass('add', config)
_AddSubQdisc(config)
_AddFilter(config['interface'], config['port'])
_AddIptableRule(config['interface'], config['port'], config['server_port'])
except TrafficControlError as e:
logging.debug('Error creating constrained port %d.\nError: %s\n'
'Deleting constrained port.', config['port'], e.error)
DeleteConstrainedPort(config)
raise e
def DeleteConstrainedPort(config):
"""Deletes an existing constrained port.
Deletes constraints set on a given port and the traffic forwarding rule from
the constrained port to a specified server port.
The original constrained network configuration used to create the constrained
port must be passed in.
Args:
config: Constraint configuration dictionary, format:
port: Port to constrain (integer 1-65535).
server_port: Port to redirect traffic on [port] to (integer 1-65535).
interface: Network interface name (string).
bandwidth: Maximum allowed upload bandwidth (integer in kbit/s).
Raises:
TrafficControlError: If any operation fails. The message in the exception
describes what failed.
"""
_CheckArgsExist(config, 'interface', 'port', 'server_port')
try:
# Delete filters first so it frees the class.
_DeleteFilter(config['interface'], config['port'])
finally:
try:
# Deleting the class deletes attached qdisc as well.
_ConfigureClass('del', config)
finally:
_DeleteIptableRule(config['interface'], config['port'],
config['server_port'])
def TearDown(config):
"""Deletes the root qdisc and all iptables rules.
Args:
config: Constraint configuration dictionary, format:
interface: Network interface name (string).
Raises:
TrafficControlError: If any operation fails. The message in the exception
describes what failed.
"""
_CheckArgsExist(config, 'interface')
command = ['sudo', 'tc', 'qdisc', 'del', 'dev', config['interface'], 'root']
try:
_Exec(command, msg='Could not delete root qdisc.')
finally:
_DeleteAllIpTableRules()
def _CheckArgsExist(config, *args):
"""Check that the args exist in config dictionary and are not None.
Args:
config: Any dictionary.
*args: The list of key names to check.
Raises:
TrafficControlError: If any key name does not exist in config or is None.
"""
for key in args:
if key not in config.keys() or config[key] is None:
raise TrafficControlError('Missing "%s" parameter.' % key)
def _AddRootQdisc(interface):
"""Sets up the default root qdisc.
Args:
interface: Network interface name.
Raises:
TrafficControlError: If adding the root qdisc fails for a reason other than
it already exists.
"""
command = ['sudo', 'tc', 'qdisc', 'add', 'dev', interface, 'root', 'handle',
'1:', 'htb']
try:
_Exec(command, msg=('Error creating root qdisc. '
'Make sure you have root access'))
except TrafficControlError as e:
# Ignore the error if root already exists.
if not 'File exists' in e.error:
raise e
def _ConfigureClass(option, config):
"""Adds or deletes a class and qdisc attached to the root.
The class specifies bandwidth, and qdisc specifies delay and packet loss. The
class ID is based on the config port.
Args:
option: Adds or deletes a class option [add|del].
config: Constraint configuration dictionary, format:
port: Port to constrain (integer 1-65535).
interface: Network interface name (string).
bandwidth: Maximum allowed upload bandwidth (integer in kbit/s).
"""
# Use constrained port as class ID so we can attach the qdisc and filter to
# it, as well as delete the class, using only the port number.
class_id = '1:%x' % config['port']
if 'bandwidth' not in config.keys() or not config['bandwidth']:
bandwidth = _DEFAULT_MAX_BANDWIDTH_KBIT
else:
bandwidth = config['bandwidth']
bandwidth = '%dkbit' % bandwidth
command = ['sudo', 'tc', 'class', option, 'dev', config['interface'],
'parent', '1:', 'classid', class_id, 'htb', 'rate', bandwidth,
'ceil', bandwidth]
_Exec(command, msg=('Error configuring class ID %s using "%s" command.' %
(class_id, option)))
def _AddSubQdisc(config):
"""Adds a qdisc attached to the class identified by the config port.
Args:
config: Constraint configuration dictionary, format:
port: Port to constrain (integer 1-65535).
interface: Network interface name (string).
latency: Delay added on each packet sent (integer in ms).
loss: Percentage of packets to drop (integer 0-100).
"""
port_hex = '%x' % config['port']
class_id = '1:%x' % config['port']
command = ['sudo', 'tc', 'qdisc', 'add', 'dev', config['interface'], 'parent',
class_id, 'handle', port_hex + ':0', 'netem']
# Check if packet-loss is set in the configuration.
if 'loss' in config.keys() and config['loss']:
loss = '%d%%' % config['loss']
command.extend(['loss', loss])
# Check if latency is set in the configuration.
if 'latency' in config.keys() and config['latency']:
latency = '%dms' % config['latency']
command.extend(['delay', latency])
_Exec(command, msg='Could not attach qdisc to class ID %s.' % class_id)
def _AddFilter(interface, port):
"""Redirects packets coming to a specified port into the constrained class.
Args:
interface: Interface name to attach the filter to (string).
port: Port number to filter packets with (integer 1-65535).
"""
class_id = '1:%x' % port
command = ['sudo', 'tc', 'filter', 'add', 'dev', interface, 'protocol', 'ip',
'parent', '1:', 'prio', '1', 'u32', 'match', 'ip', 'sport', port,
'0xffff', 'flowid', class_id]
_Exec(command, msg='Error adding filter on port %d.' % port)
def _DeleteFilter(interface, port):
"""Deletes the filter attached to the configured port.
Args:
interface: Interface name the filter is attached to (string).
port: Port number being filtered (integer 1-65535).
"""
handle_id = _GetFilterHandleId(interface, port)
command = ['sudo', 'tc', 'filter', 'del', 'dev', interface, 'protocol', 'ip',
'parent', '1:0', 'handle', handle_id, 'prio', '1', 'u32']
_Exec(command, msg='Error deleting filter on port %d.' % port)
def _GetFilterHandleId(interface, port):
"""Searches for the handle ID of the filter identified by the config port.
Args:
interface: Interface name the filter is attached to (string).
port: Port number being filtered (integer 1-65535).
Returns:
The handle ID.
Raises:
TrafficControlError: If handle ID was not found.
"""
command = ['sudo', 'tc', 'filter', 'list', 'dev', interface, 'parent', '1:']
output = _Exec(command, msg='Error listing filters.')
# Search for the filter handle ID associated with class ID '1:port'.
handle_id_re = re.search(
'([0-9a-fA-F]{3}::[0-9a-fA-F]{3}).*(?=flowid 1:%x\s)' % port, output)
if handle_id_re:
return handle_id_re.group(1)
raise TrafficControlError(('Could not find filter handle ID for class ID '
'1:%x.') % port)
def _AddIptableRule(interface, port, server_port):
"""Forwards traffic from constrained port to a specified server port.
Args:
interface: Interface name to attach the filter to (string).
port: Port of incoming packets (integer 1-65535).
server_port: Server port to forward the packets to (integer 1-65535).
"""
# Preroute rules for accessing the port through external connections.
command = ['sudo', 'iptables', '-t', 'nat', '-A', 'PREROUTING', '-i',
interface, '-p', 'tcp', '--dport', port, '-j', 'REDIRECT',
'--to-port', server_port]
_Exec(command, msg='Error adding iptables rule for port %d.' % port)
# Output rules for accessing the rule through localhost or 127.0.0.1
command = ['sudo', 'iptables', '-t', 'nat', '-A', 'OUTPUT', '-p', 'tcp',
'--dport', port, '-j', 'REDIRECT', '--to-port', server_port]
_Exec(command, msg='Error adding iptables rule for port %d.' % port)
def _DeleteIptableRule(interface, port, server_port):
"""Deletes the iptable rule associated with specified port number.
Args:
interface: Interface name to attach the filter to (string).
port: Port of incoming packets (integer 1-65535).
server_port: Server port packets are forwarded to (integer 1-65535).
"""
command = ['sudo', 'iptables', '-t', 'nat', '-D', 'PREROUTING', '-i',
interface, '-p', 'tcp', '--dport', port, '-j', 'REDIRECT',
'--to-port', server_port]
_Exec(command, msg='Error deleting iptables rule for port %d.' % port)
command = ['sudo', 'iptables', '-t', 'nat', '-D', 'OUTPUT', '-p', 'tcp',
'--dport', port, '-j', 'REDIRECT', '--to-port', server_port]
_Exec(command, msg='Error adding iptables rule for port %d.' % port)
def _DeleteAllIpTableRules():
"""Deletes all iptables rules."""
command = ['sudo', 'iptables', '-t', 'nat', '-F']
_Exec(command, msg='Error deleting all iptables rules.')
def _Exec(command, msg=None):
"""Executes a command.
Args:
command: Command list to execute.
msg: Message describing the error in case the command fails.
Returns:
The standard output from running the command.
Raises:
TrafficControlError: If command fails. Message is set by the msg parameter.
"""
cmd_list = [str(x) for x in command]
cmd = ' '.join(cmd_list)
logging.debug('Running command: %s', cmd)
p = subprocess.Popen(cmd_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = p.communicate()
if p.returncode != 0:
raise TrafficControlError(msg, cmd, p.returncode, output, error)
return output.strip()
| bsd-3-clause | -7,661,146,222,376,202,000 | 34.50565 | 80 | 0.662185 | false |
marcelocure/django | tests/gis_tests/utils.py | 327 | 1377 | from unittest import skip
from django.conf import settings
from django.db import DEFAULT_DB_ALIAS
def no_backend(test_func, backend):
"Use this decorator to disable test on specified backend."
if settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'].rsplit('.')[-1] == backend:
@skip("This test is skipped on '%s' backend" % backend)
def inner():
pass
return inner
else:
return test_func
# Decorators to disable entire test functions for specific
# spatial backends.
def no_oracle(func):
return no_backend(func, 'oracle')
# Shortcut booleans to omit only portions of tests.
_default_db = settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'].rsplit('.')[-1]
oracle = _default_db == 'oracle'
postgis = _default_db == 'postgis'
mysql = _default_db == 'mysql'
spatialite = _default_db == 'spatialite'
# MySQL spatial indices can't handle NULL geometries.
gisfield_may_be_null = not mysql
if oracle and 'gis' in settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE']:
from django.contrib.gis.db.backends.oracle.models import OracleSpatialRefSys as SpatialRefSys
elif postgis:
from django.contrib.gis.db.backends.postgis.models import PostGISSpatialRefSys as SpatialRefSys
elif spatialite:
from django.contrib.gis.db.backends.spatialite.models import SpatialiteSpatialRefSys as SpatialRefSys
else:
SpatialRefSys = None
| bsd-3-clause | -8,935,055,788,941,275,000 | 32.585366 | 105 | 0.726943 | false |
chromium/chromium | components/policy/tools/generate_policy_source.py | 1 | 66390 | #!/usr/bin/env python3
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''python3 %(prog)s [options]
Pass at least:
--chrome-version-file <path to src/chrome/VERSION> or --all-chrome-versions
--target-platform <which platform the target code will be generated for and can
be one of (win, mac, linux, chromeos, ios)>
--policy_templates <path to the policy_templates.json input file>.'''
from argparse import ArgumentParser
from collections import namedtuple
from collections import OrderedDict
from functools import partial
import ast
import codecs
import json
import os
import re
import sys
import textwrap
sys.path.insert(
0,
os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir,
'third_party', 'six', 'src'))
import six
from xml.sax.saxutils import escape as xml_escape
if sys.version_info.major == 2:
string_type = basestring
else:
string_type = str
CHROME_POLICY_KEY = 'SOFTWARE\\\\Policies\\\\Google\\\\Chrome'
CHROMIUM_POLICY_KEY = 'SOFTWARE\\\\Policies\\\\Chromium'
PLATFORM_STRINGS = {
'chrome_frame': ['win'],
'chrome_os': ['chrome_os'],
'android': ['android'],
'webview_android': ['android'],
'ios': ['ios'],
'chrome.win': ['win'],
'chrome.linux': ['linux'],
'chrome.mac': ['mac'],
'chrome.*': ['win', 'mac', 'linux'],
'chrome.win7': ['win']
}
class PolicyDetails:
"""Parses a policy template and caches all its details."""
# Maps policy types to a tuple with 4 other types:
# - the equivalent base::Value::Type or 'TYPE_EXTERNAL' if the policy
# references external data
# - the equivalent Protobuf field type
# - the name of one of the protobufs for shared policy types
# - the equivalent type in Android's App Restriction Schema
# TODO(joaodasilva): refactor the 'dict' type into a more generic 'json' type
# that can also be used to represent lists of other JSON objects.
TYPE_MAP = {
'dict': ('Type::DICTIONARY', 'string', 'String', 'string'),
'external': ('TYPE_EXTERNAL', 'string', 'String', 'invalid'),
'int': ('Type::INTEGER', 'int64', 'Integer', 'integer'),
'int-enum': ('Type::INTEGER', 'int64', 'Integer', 'choice'),
'list': ('Type::LIST', 'StringList', 'StringList', 'string'),
'main': ('Type::BOOLEAN', 'bool', 'Boolean', 'bool'),
'string': ('Type::STRING', 'string', 'String', 'string'),
'string-enum': ('Type::STRING', 'string', 'String', 'choice'),
'string-enum-list': ('Type::LIST', 'StringList', 'StringList',
'multi-select'),
}
class EnumItem:
def __init__(self, item):
self.caption = PolicyDetails._RemovePlaceholders(item['caption'])
self.value = item['value']
def _ConvertPlatform(self, platform):
'''Converts product platform string in policy_templates.json to platform
string that is defined in build config.'''
if platform not in PLATFORM_STRINGS:
raise RuntimeError('Platform "%s" is not supported' % platform)
return PLATFORM_STRINGS[platform]
def __init__(self, policy, chrome_major_version, target_platform, valid_tags):
self.id = policy['id']
self.name = policy['name']
self.tags = policy.get('tags', None)
self._CheckTagsValidity(valid_tags)
features = policy.get('features', {})
self.can_be_recommended = features.get('can_be_recommended', False)
self.can_be_mandatory = features.get('can_be_mandatory', True)
self.internal_only = features.get('internal_only', False)
self.is_deprecated = policy.get('deprecated', False)
self.is_device_only = policy.get('device_only', False)
self.is_future = policy.get('future', False)
self.per_profile = features.get('per_profile', False)
self.supported_chrome_os_management = policy.get(
'supported_chrome_os_management', ['active_directory', 'google_cloud'])
self.schema = policy['schema']
self.validation_schema = policy.get('validation_schema')
self.has_enterprise_default = 'default_for_enterprise_users' in policy
if self.has_enterprise_default:
self.enterprise_default = policy['default_for_enterprise_users']
self.platforms = set()
self.future_on = set()
for platform, version_range in map(lambda s: s.split(':'),
policy.get('supported_on', [])):
split_result = version_range.split('-')
if len(split_result) != 2:
raise RuntimeError('supported_on must have exactly one dash: "%s"' % p)
(version_min, version_max) = split_result
if version_min == '':
raise RuntimeError('supported_on must define a start version: "%s"' % p)
# Skip if filtering by Chromium version and the current Chromium version
# does not support the policy.
if chrome_major_version:
if (int(version_min) > chrome_major_version or
version_max != '' and int(version_max) < chrome_major_version):
continue
self.platforms.update(self._ConvertPlatform(platform))
for platform in policy.get('future_on', []):
self.future_on.update(self._ConvertPlatform(platform))
if self.is_device_only and self.platforms.union(self.future_on) > set(
['chrome_os']):
raise RuntimeError('device_only is only allowed for Chrome OS: "%s"' %
self.name)
self.is_supported = (target_platform in self.platforms
or target_platform in self.future_on)
self.is_future_on = target_platform in self.future_on
self.is_future = self.is_future or self.is_future_on
if policy['type'] not in PolicyDetails.TYPE_MAP:
raise NotImplementedError(
'Unknown policy type for %s: %s' % (policy['name'], policy['type']))
self.policy_type, self.protobuf_type, self.policy_protobuf_type, \
self.restriction_type = PolicyDetails.TYPE_MAP[policy['type']]
self.desc = '\n'.join(
map(str.strip,
PolicyDetails._RemovePlaceholders(policy['desc']).splitlines()))
self.caption = PolicyDetails._RemovePlaceholders(policy['caption'])
self.max_size = policy.get('max_size', 0)
items = policy.get('items')
if items is None:
self.items = None
else:
self.items = [PolicyDetails.EnumItem(entry) for entry in items]
PH_PATTERN = re.compile('<ph[^>]*>([^<]*|[^<]*<ex>([^<]*)</ex>[^<]*)</ph>')
def _CheckTagsValidity(self, valid_tags):
if self.tags == None:
raise RuntimeError('Policy ' + self.name + ' has to contain a list of '
'tags!\n An empty list is also valid but means '
'setting this policy can never harm the user\'s '
'privacy or security.\n')
for tag in self.tags:
if not tag in valid_tags:
raise RuntimeError('Invalid Tag:' + tag + '!\n'
'Chose a valid tag from \'risk_tag_definitions\' (a '
'subproperty of root in policy_templates.json)!')
# Simplistic grit placeholder stripper.
@staticmethod
def _RemovePlaceholders(text):
result = ''
pos = 0
for m in PolicyDetails.PH_PATTERN.finditer(text):
result += text[pos:m.start(0)]
result += m.group(2) or m.group(1)
pos = m.end(0)
result += text[pos:]
return result
class PolicyAtomicGroup:
"""Parses a policy atomic group and caches its name and policy names"""
def __init__(self, policy_group, available_policies,
policies_already_in_group):
self.id = policy_group['id']
self.name = policy_group['name']
self.policies = policy_group.get('policies', None)
self._CheckPoliciesValidity(available_policies, policies_already_in_group)
def _CheckPoliciesValidity(self, available_policies,
policies_already_in_group):
if self.policies == None or len(self.policies) <= 0:
raise RuntimeError('Atomic policy group ' + self.name +
' has to contain a list of '
'policies!\n')
for policy in self.policies:
if policy in policies_already_in_group:
raise RuntimeError('Policy: ' + policy +
' cannot be in more than one atomic group '
'in policy_templates.json)!')
policies_already_in_group.add(policy)
if not policy in available_policies:
raise RuntimeError('Invalid policy: ' + policy + ' in atomic group ' +
self.name + '.\n')
def ParseVersionFile(version_path):
chrome_major_version = None
for line in open(version_path, 'r').readlines():
key, val = line.rstrip('\r\n').split('=', 1)
if key == 'MAJOR':
chrome_major_version = val
break
if chrome_major_version is None:
raise RuntimeError('VERSION file does not contain major version.')
return int(chrome_major_version)
def main():
parser = ArgumentParser(usage=__doc__)
parser.add_argument(
'--pch',
'--policy-constants-header',
dest='header_path',
help='generate header file of policy constants',
metavar='FILE')
parser.add_argument(
'--pcc',
'--policy-constants-source',
dest='source_path',
help='generate source file of policy constants',
metavar='FILE')
parser.add_argument(
'--cpp',
'--cloud-policy-protobuf',
dest='cloud_policy_proto_path',
help='generate cloud policy protobuf file',
metavar='FILE')
parser.add_argument(
'--cpfrp',
'--cloud-policy-full-runtime-protobuf',
dest='cloud_policy_full_runtime_proto_path',
help='generate cloud policy full runtime protobuf',
metavar='FILE')
parser.add_argument(
'--csp',
'--chrome-settings-protobuf',
dest='chrome_settings_proto_path',
help='generate chrome settings protobuf file',
metavar='FILE')
parser.add_argument(
'--policy-common-definitions-protobuf',
dest='policy_common_definitions_proto_path',
help='policy common definitions protobuf file path',
metavar='FILE')
parser.add_argument(
'--policy-common-definitions-full-runtime-protobuf',
dest='policy_common_definitions_full_runtime_proto_path',
help='generate policy common definitions full runtime protobuf file',
metavar='FILE')
parser.add_argument(
'--csfrp',
'--chrome-settings-full-runtime-protobuf',
dest='chrome_settings_full_runtime_proto_path',
help='generate chrome settings full runtime protobuf',
metavar='FILE')
parser.add_argument(
'--ard',
'--app-restrictions-definition',
dest='app_restrictions_path',
help='generate an XML file as specified by '
'Android\'s App Restriction Schema',
metavar='FILE')
parser.add_argument(
'--rth',
'--risk-tag-header',
dest='risk_header_path',
help='generate header file for policy risk tags',
metavar='FILE')
parser.add_argument(
'--crospch',
'--cros-policy-constants-header',
dest='cros_constants_header_path',
help='generate header file of policy constants for use in '
'Chrome OS',
metavar='FILE')
parser.add_argument(
'--crospcc',
'--cros-policy-constants-source',
dest='cros_constants_source_path',
help='generate source file of policy constants for use in '
'Chrome OS',
metavar='FILE')
parser.add_argument(
'--chrome-version-file',
dest='chrome_version_file',
help='path to src/chrome/VERSION',
metavar='FILE')
parser.add_argument(
'--all-chrome-versions',
action='store_true',
dest='all_chrome_versions',
default=False,
help='do not restrict generated policies by chrome version')
parser.add_argument(
'--target-platform',
dest='target_platform',
help='the platform the generated code should run on - can be one of'
'(win, mac, linux, chromeos, fuchsia)',
metavar='PLATFORM')
parser.add_argument(
'--policy-templates-file',
dest='policy_templates_file',
help='path to the policy_templates.json input file',
metavar='FILE')
args = parser.parse_args()
has_arg_error = False
if not args.target_platform:
print('Error: Missing --target-platform=<platform>')
has_arg_error = True
if not args.policy_templates_file:
print('Error: Missing'
' --policy-templates-file=<path to policy_templates.json>')
has_arg_error = True
if not args.chrome_version_file and not args.all_chrome_versions:
print('Error: Missing'
' --chrome-version-file=<path to src/chrome/VERSION>\n'
' or --all-chrome-versions')
has_arg_error = True
if has_arg_error:
print('')
parser.print_help()
return 2
version_path = args.chrome_version_file
target_platform = args.target_platform
template_file_name = args.policy_templates_file
# --target-platform accepts "chromeos" as its input because that's what is
# used within GN. Within policy templates, "chrome_os" is used instead.
if target_platform == 'chromeos':
target_platform = 'chrome_os'
if args.all_chrome_versions:
chrome_major_version = None
else:
chrome_major_version = ParseVersionFile(version_path)
template_file_contents = _LoadJSONFile(template_file_name)
risk_tags = RiskTags(template_file_contents)
policy_details = [
PolicyDetails(policy, chrome_major_version, target_platform,
risk_tags.GetValidTags())
for policy in template_file_contents['policy_definitions']
if policy['type'] != 'group'
]
risk_tags.ComputeMaxTags(policy_details)
sorted_policy_details = sorted(policy_details, key=lambda policy: policy.name)
policy_details_set = list(map((lambda x: x.name), policy_details))
policies_already_in_group = set()
policy_atomic_groups = [
PolicyAtomicGroup(group, policy_details_set, policies_already_in_group)
for group in template_file_contents['policy_atomic_group_definitions']
]
sorted_policy_atomic_groups = sorted(
policy_atomic_groups, key=lambda group: group.name)
def GenerateFile(path, writer, sorted=False, xml=False):
if path:
with codecs.open(path, 'w', encoding='utf-8') as f:
_OutputGeneratedWarningHeader(f, template_file_name, xml)
writer(sorted and sorted_policy_details or policy_details,
sorted and sorted_policy_atomic_groups or policy_atomic_groups,
target_platform, f, risk_tags)
if args.header_path:
GenerateFile(args.header_path, _WritePolicyConstantHeader, sorted=True)
if args.source_path:
GenerateFile(args.source_path, _WritePolicyConstantSource, sorted=True)
if args.risk_header_path:
GenerateFile(args.risk_header_path, _WritePolicyRiskTagHeader)
if args.cloud_policy_proto_path:
GenerateFile(args.cloud_policy_proto_path, _WriteCloudPolicyProtobuf)
if (args.policy_common_definitions_full_runtime_proto_path and
args.policy_common_definitions_proto_path):
GenerateFile(
args.policy_common_definitions_full_runtime_proto_path,
partial(_WritePolicyCommonDefinitionsFullRuntimeProtobuf,
args.policy_common_definitions_proto_path))
if args.cloud_policy_full_runtime_proto_path:
GenerateFile(args.cloud_policy_full_runtime_proto_path,
_WriteCloudPolicyFullRuntimeProtobuf)
if args.chrome_settings_proto_path:
GenerateFile(args.chrome_settings_proto_path, _WriteChromeSettingsProtobuf)
if args.chrome_settings_full_runtime_proto_path:
GenerateFile(args.chrome_settings_full_runtime_proto_path,
_WriteChromeSettingsFullRuntimeProtobuf)
if target_platform == 'android' and args.app_restrictions_path:
GenerateFile(args.app_restrictions_path, _WriteAppRestrictions, xml=True)
# Generated code for Chrome OS (unused in Chromium).
if args.cros_constants_header_path:
GenerateFile(
args.cros_constants_header_path,
_WriteChromeOSPolicyConstantsHeader,
sorted=True)
if args.cros_constants_source_path:
GenerateFile(
args.cros_constants_source_path,
_WriteChromeOSPolicyConstantsSource,
sorted=True)
return 0
#------------------ shared helpers ---------------------------------#
def _OutputGeneratedWarningHeader(f, template_file_path, xml_style):
left_margin = '//'
if xml_style:
left_margin = ' '
f.write('<?xml version="1.0" encoding="utf-8"?>\n' '<!--\n')
else:
f.write('//\n')
f.write(left_margin + ' DO NOT MODIFY THIS FILE DIRECTLY!\n')
f.write(left_margin + ' IT IS GENERATED BY generate_policy_source.py\n')
f.write(left_margin + ' FROM ' + template_file_path + '\n')
if xml_style:
f.write('-->\n\n')
else:
f.write(left_margin + '\n\n')
COMMENT_WRAPPER = textwrap.TextWrapper()
COMMENT_WRAPPER.width = 80
COMMENT_WRAPPER.initial_indent = '// '
COMMENT_WRAPPER.subsequent_indent = '// '
COMMENT_WRAPPER.replace_whitespace = False
# Writes a comment, each line prefixed by // and wrapped to 80 spaces.
def _OutputComment(f, comment):
for line in six.ensure_text(comment).splitlines():
if len(line) == 0:
f.write('//')
else:
f.write(COMMENT_WRAPPER.fill(line))
f.write('\n')
def _LoadJSONFile(json_file):
with codecs.open(json_file, 'r', encoding='utf-8') as f:
text = f.read()
return ast.literal_eval(text)
#------------------ policy constants header ------------------------#
def _WritePolicyConstantHeader(policies, policy_atomic_groups, target_platform,
f, risk_tags):
f.write('''#ifndef COMPONENTS_POLICY_POLICY_CONSTANTS_H_
#define COMPONENTS_POLICY_POLICY_CONSTANTS_H_
#include <cstdint>
#include <string>
#include "components/policy/core/common/policy_details.h"
#include "components/policy/core/common/policy_map.h"
#include "components/policy/proto/cloud_policy.pb.h"
namespace policy {
namespace internal {
struct SchemaData;
}
''')
if target_platform == 'win':
f.write('// The windows registry path where Chrome policy '
'configuration resides.\n'
'extern const wchar_t kRegistryChromePolicyKey[];\n')
f.write('''#if defined(OS_CHROMEOS)
// Sets default profile policies values for enterprise users.
void SetEnterpriseUsersProfileDefaults(PolicyMap* policy_map);
// Sets default system-wide policies values for enterprise users.
void SetEnterpriseUsersSystemWideDefaults(PolicyMap* policy_map);
// Sets all default values for enterprise users.
void SetEnterpriseUsersDefaults(PolicyMap* policy_map);
#endif
// Returns the PolicyDetails for |policy| if |policy| is a known
// Chrome policy, otherwise returns nullptr.
const PolicyDetails* GetChromePolicyDetails(
const std::string& policy);
// Returns the schema data of the Chrome policy schema.
const internal::SchemaData* GetChromeSchemaData();
''')
f.write('// Key names for the policy settings.\n' 'namespace key {\n\n')
for policy in policies:
# TODO(joaodasilva): Include only supported policies in
# configuration_policy_handler.cc and configuration_policy_handler_list.cc
# so that these names can be conditional on 'policy.is_supported'.
# http://crbug.com/223616
f.write('extern const char k' + policy.name + '[];\n')
f.write('\n} // namespace key\n\n')
f.write('// Group names for the policy settings.\n' 'namespace group {\n\n')
for group in policy_atomic_groups:
f.write('extern const char k' + group.name + '[];\n')
f.write('\n} // namespace group\n\n')
f.write('struct AtomicGroup {\n'
' const short id;\n'
' const char* policy_group;\n'
' const char* const* policies;\n'
'};\n\n')
f.write('extern const AtomicGroup kPolicyAtomicGroupMappings[];\n\n')
f.write('extern const size_t kPolicyAtomicGroupMappingsLength;\n\n')
f.write('enum class StringPolicyType {\n'
' STRING,\n'
' JSON,\n'
' EXTERNAL,\n'
'};\n\n')
# User policy proto pointers, one struct for each protobuf type.
protobuf_types = _GetProtobufTypes(policies)
for protobuf_type in protobuf_types:
_WriteChromePolicyAccessHeader(f, protobuf_type)
f.write('constexpr int64_t kDevicePolicyExternalDataResourceCacheSize = %d;\n'
% _ComputeTotalDevicePolicyExternalDataMaxSize(policies))
f.write('\n} // namespace policy\n\n'
'#endif // COMPONENTS_POLICY_POLICY_CONSTANTS_H_\n')
def _WriteChromePolicyAccessHeader(f, protobuf_type):
f.write('// Read access to the protobufs of all supported %s user policies.\n'
% protobuf_type.lower())
f.write('struct %sPolicyAccess {\n' % protobuf_type)
f.write(' const char* policy_key;\n'
' bool per_profile;\n'
' bool (enterprise_management::CloudPolicySettings::'
'*has_proto)() const;\n'
' const enterprise_management::%sPolicyProto&\n'
' (enterprise_management::CloudPolicySettings::'
'*get_proto)() const;\n' % protobuf_type)
if protobuf_type == 'String':
f.write(' const StringPolicyType type;\n')
f.write('};\n')
f.write('extern const %sPolicyAccess k%sPolicyAccess[];\n\n' %
(protobuf_type, protobuf_type))
def _ComputeTotalDevicePolicyExternalDataMaxSize(policies):
total_device_policy_external_data_max_size = 0
for policy in policies:
if policy.is_device_only and policy.policy_type == 'TYPE_EXTERNAL':
total_device_policy_external_data_max_size += policy.max_size
return total_device_policy_external_data_max_size
#------------------ policy constants source ------------------------#
SchemaNodeKey = namedtuple('SchemaNodeKey',
'schema_type extra is_sensitive_value')
SchemaNode = namedtuple(
'SchemaNode',
'schema_type extra is_sensitive_value has_sensitive_children comments')
PropertyNode = namedtuple('PropertyNode', 'key schema')
PropertiesNode = namedtuple(
'PropertiesNode',
'begin end pattern_end required_begin required_end additional name')
RestrictionNode = namedtuple('RestrictionNode', 'first second')
# A mapping of the simple schema types to base::Value::Types.
SIMPLE_SCHEMA_NAME_MAP = {
'boolean': 'Type::BOOLEAN',
'integer': 'Type::INTEGER',
'null': 'Type::NONE',
'number': 'Type::DOUBLE',
'string': 'Type::STRING',
}
INVALID_INDEX = -1
MIN_INDEX = -1
MAX_INDEX = (1 << 15) - 1 # signed short in c++
MIN_POLICY_ID = 0
MAX_POLICY_ID = (1 << 16) - 1 # unsigned short
MIN_EXTERNAL_DATA_SIZE = 0
MAX_EXTERNAL_DATA_SIZE = (1 << 32) - 1 # unsigned int32
class SchemaNodesGenerator:
"""Builds the internal structs to represent a JSON schema."""
def __init__(self, shared_strings):
"""Creates a new generator.
|shared_strings| is a map of strings to a C expression that evaluates to
that string at runtime. This mapping can be used to reuse existing string
constants."""
self.shared_strings = shared_strings
self.key_index_map = {} # |SchemaNodeKey| -> index in |schema_nodes|
self.schema_nodes = [] # List of |SchemaNode|s
self.property_nodes = [] # List of |PropertyNode|s
self.properties_nodes = [] # List of |PropertiesNode|s
self.restriction_nodes = [] # List of |RestrictionNode|s
self.required_properties = []
self.int_enums = []
self.string_enums = []
self.ranges = {}
self.id_map = {}
def GetString(self, s):
if s in self.shared_strings:
return self.shared_strings[s]
# Generate JSON escaped string, which is slightly different from desired
# C/C++ escaped string. Known differences includes unicode escaping format.
return json.dumps(s)
def AppendSchema(self, schema_type, extra, is_sensitive_value, comment=''):
# Find existing schema node with same structure.
key_node = SchemaNodeKey(schema_type, extra, is_sensitive_value)
if key_node in self.key_index_map:
index = self.key_index_map[key_node]
if comment:
self.schema_nodes[index].comments.add(comment)
return index
# Create new schema node.
index = len(self.schema_nodes)
comments = {comment} if comment else set()
schema_node = SchemaNode(schema_type, extra, is_sensitive_value, False,
comments)
self.schema_nodes.append(schema_node)
self.key_index_map[key_node] = index
return index
def AppendRestriction(self, first, second):
r = RestrictionNode(str(first), str(second))
if not r in self.ranges:
self.ranges[r] = len(self.restriction_nodes)
self.restriction_nodes.append(r)
return self.ranges[r]
def GetSimpleType(self, name, is_sensitive_value):
return self.AppendSchema(SIMPLE_SCHEMA_NAME_MAP[name], INVALID_INDEX,
is_sensitive_value, 'simple type: ' + name)
def SchemaHaveRestriction(self, schema):
return any(keyword in schema
for keyword in ['minimum', 'maximum', 'enum', 'pattern'])
def IsConsecutiveInterval(self, seq):
sortedSeq = sorted(seq)
return all(
sortedSeq[i] + 1 == sortedSeq[i + 1] for i in range(len(sortedSeq) - 1))
def GetEnumIntegerType(self, schema, is_sensitive_value, name):
assert all(type(x) == int for x in schema['enum'])
possible_values = schema['enum']
if self.IsConsecutiveInterval(possible_values):
index = self.AppendRestriction(max(possible_values), min(possible_values))
return self.AppendSchema(
'Type::INTEGER', index, is_sensitive_value,
'integer with enumeration restriction (use range instead): %s' % name)
offset_begin = len(self.int_enums)
self.int_enums += possible_values
offset_end = len(self.int_enums)
return self.AppendSchema('Type::INTEGER',
self.AppendRestriction(offset_begin, offset_end),
is_sensitive_value,
'integer with enumeration restriction: %s' % name)
def GetEnumStringType(self, schema, is_sensitive_value, name):
assert all(type(x) == str for x in schema['enum'])
offset_begin = len(self.string_enums)
self.string_enums += schema['enum']
offset_end = len(self.string_enums)
return self.AppendSchema('Type::STRING',
self.AppendRestriction(offset_begin, offset_end),
is_sensitive_value,
'string with enumeration restriction: %s' % name)
def GetEnumType(self, schema, is_sensitive_value, name):
if len(schema['enum']) == 0:
raise RuntimeError('Empty enumeration in %s' % name)
elif schema['type'] == 'integer':
return self.GetEnumIntegerType(schema, is_sensitive_value, name)
elif schema['type'] == 'string':
return self.GetEnumStringType(schema, is_sensitive_value, name)
else:
raise RuntimeError('Unknown enumeration type in %s' % name)
def GetPatternType(self, schema, is_sensitive_value, name):
if schema['type'] != 'string':
raise RuntimeError('Unknown pattern type in %s' % name)
pattern = schema['pattern']
# Try to compile the pattern to validate it, note that the syntax used
# here might be slightly different from re2.
# TODO(binjin): Add a python wrapper of re2 and use it here.
re.compile(pattern)
index = len(self.string_enums)
self.string_enums.append(pattern)
return self.AppendSchema('Type::STRING', self.AppendRestriction(
index, index), is_sensitive_value,
'string with pattern restriction: %s' % name)
def GetRangedType(self, schema, is_sensitive_value, name):
if schema['type'] != 'integer':
raise RuntimeError('Unknown ranged type in %s' % name)
min_value_set, max_value_set = False, False
if 'minimum' in schema:
min_value = int(schema['minimum'])
min_value_set = True
if 'maximum' in schema:
max_value = int(schema['maximum'])
max_value_set = True
if min_value_set and max_value_set and min_value > max_value:
raise RuntimeError('Invalid ranged type in %s' % name)
index = self.AppendRestriction(
str(max_value) if max_value_set else 'INT_MAX',
str(min_value) if min_value_set else 'INT_MIN')
return self.AppendSchema('Type::INTEGER', index, is_sensitive_value,
'integer with ranged restriction: %s' % name)
def Generate(self, schema, name):
"""Generates the structs for the given schema.
|schema|: a valid JSON schema in a dictionary.
|name|: the name of the current node, for the generated comments."""
if '$ref' in schema:
if 'id' in schema:
raise RuntimeError("Schemas with a $ref can't have an id")
if not isinstance(schema['$ref'], string_type):
raise RuntimeError("$ref attribute must be a string")
return schema['$ref']
is_sensitive_value = schema.get('sensitiveValue', False)
assert type(is_sensitive_value) is bool
if schema['type'] in SIMPLE_SCHEMA_NAME_MAP:
if not self.SchemaHaveRestriction(schema):
# Simple types use shared nodes.
return self.GetSimpleType(schema['type'], is_sensitive_value)
elif 'enum' in schema:
return self.GetEnumType(schema, is_sensitive_value, name)
elif 'pattern' in schema:
return self.GetPatternType(schema, is_sensitive_value, name)
else:
return self.GetRangedType(schema, is_sensitive_value, name)
if schema['type'] == 'array':
return self.AppendSchema(
'Type::LIST',
self.GenerateAndCollectID(schema['items'], 'items of ' + name),
is_sensitive_value)
elif schema['type'] == 'object':
# Reserve an index first, so that dictionaries come before their
# properties. This makes sure that the root node is the first in the
# SchemaNodes array.
# This however, prevents de-duplication for object schemas since we could
# only determine duplicates after all child schema nodes are generated as
# well and then we couldn't remove the newly created schema node without
# invalidating all child schema indices.
index = len(self.schema_nodes)
self.schema_nodes.append(
SchemaNode('Type::DICTIONARY', INVALID_INDEX, is_sensitive_value,
False, {name}))
if 'additionalProperties' in schema:
additionalProperties = self.GenerateAndCollectID(
schema['additionalProperties'], 'additionalProperties of ' + name)
else:
additionalProperties = INVALID_INDEX
# Properties must be sorted by name, for the binary search lookup.
# Note that |properties| must be evaluated immediately, so that all the
# recursive calls to Generate() append the necessary child nodes; if
# |properties| were a generator then this wouldn't work.
sorted_properties = sorted(schema.get('properties', {}).items())
properties = [
PropertyNode(
self.GetString(key), self.GenerateAndCollectID(subschema, key))
for key, subschema in sorted_properties
]
pattern_properties = []
for pattern, subschema in schema.get('patternProperties', {}).items():
pattern_properties.append(
PropertyNode(
self.GetString(pattern),
self.GenerateAndCollectID(subschema, pattern)))
begin = len(self.property_nodes)
self.property_nodes += properties
end = len(self.property_nodes)
self.property_nodes += pattern_properties
pattern_end = len(self.property_nodes)
if index == 0:
self.root_properties_begin = begin
self.root_properties_end = end
required_begin = len(self.required_properties)
required_properties = schema.get('required', [])
assert type(required_properties) is list
assert all(type(x) == str for x in required_properties)
self.required_properties += required_properties
required_end = len(self.required_properties)
# Check that each string in |required_properties| is in |properties|.
properties = schema.get('properties', {})
for name in required_properties:
assert name in properties
extra = len(self.properties_nodes)
self.properties_nodes.append(
PropertiesNode(begin, end, pattern_end, required_begin, required_end,
additionalProperties, name))
# Update index at |extra| now, since that was filled with a dummy value
# when the schema node was created.
self.schema_nodes[index] = self.schema_nodes[index]._replace(extra=extra)
return index
else:
assert False
def GenerateAndCollectID(self, schema, name):
"""A wrapper of Generate(), will take the return value, check and add 'id'
attribute to self.id_map. The wrapper needs to be used for every call to
Generate().
"""
index = self.Generate(schema, name)
if 'id' not in schema:
return index
id_str = schema['id']
if id_str in self.id_map:
raise RuntimeError('Duplicated id: ' + id_str)
self.id_map[id_str] = index
return index
def Write(self, f):
"""Writes the generated structs to the given file.
|f| an open file to write to."""
f.write('const internal::SchemaNode kSchemas[] = {\n'
'// Type' + ' ' * 27 +
'Extra IsSensitiveValue HasSensitiveChildren\n')
for schema_node in self.schema_nodes:
assert schema_node.extra >= MIN_INDEX and schema_node.extra <= MAX_INDEX
comment = ('\n' + ' ' * 69 + '// ').join(sorted(schema_node.comments))
f.write(' { base::Value::%-19s %4s %-16s %-5s }, // %s\n' %
(schema_node.schema_type + ',', str(schema_node.extra) + ',',
str(schema_node.is_sensitive_value).lower() + ',',
str(schema_node.has_sensitive_children).lower(), comment))
f.write('};\n\n')
if self.property_nodes:
f.write('const internal::PropertyNode kPropertyNodes[] = {\n'
'// Property' + ' ' * 61 + 'Schema\n')
for property_node in self.property_nodes:
f.write(' { %-64s %6d },\n' % (property_node.key + ',',
property_node.schema))
f.write('};\n\n')
if self.properties_nodes:
f.write('const internal::PropertiesNode kProperties[] = {\n'
'// Begin End PatternEnd RequiredBegin RequiredEnd'
' Additional Properties\n')
for properties_node in self.properties_nodes:
for i in range(0, len(properties_node) - 1):
assert (properties_node[i] >= MIN_INDEX and
properties_node[i] <= MAX_INDEX)
f.write(
' { %5d, %5d, %5d, %5d, %10d, %5d }, // %s\n' % properties_node)
f.write('};\n\n')
if self.restriction_nodes:
f.write('const internal::RestrictionNode kRestrictionNodes[] = {\n')
f.write('// FIRST, SECOND\n')
for restriction_node in self.restriction_nodes:
f.write(' {{ %-8s %4s}},\n' % (restriction_node.first + ',',
restriction_node.second))
f.write('};\n\n')
if self.required_properties:
f.write('const char* const kRequiredProperties[] = {\n')
for required_property in self.required_properties:
f.write(' %s,\n' % self.GetString(required_property))
f.write('};\n\n')
if self.int_enums:
f.write('const int kIntegerEnumerations[] = {\n')
for possible_values in self.int_enums:
f.write(' %d,\n' % possible_values)
f.write('};\n\n')
if self.string_enums:
f.write('const char* const kStringEnumerations[] = {\n')
for possible_values in self.string_enums:
f.write(' %s,\n' % self.GetString(possible_values))
f.write('};\n\n')
f.write('const internal::SchemaData* GetChromeSchemaData() {\n')
f.write(' static const internal::SchemaData kChromeSchemaData = {\n'
' kSchemas,\n')
f.write(' kPropertyNodes,\n' if self.property_nodes else ' nullptr,\n')
f.write(' kProperties,\n' if self.properties_nodes else ' nullptr,\n')
f.write(' kRestrictionNodes,\n' if self.
restriction_nodes else ' nullptr,\n')
f.write(' kRequiredProperties,\n' if self.
required_properties else ' nullptr,\n')
f.write(' kIntegerEnumerations,\n' if self.int_enums else ' nullptr,\n')
f.write(
' kStringEnumerations,\n' if self.string_enums else ' nullptr,\n')
f.write(' %d, // validation_schema root index\n' %
self.validation_schema_root_index)
f.write(' };\n\n')
f.write(' return &kChromeSchemaData;\n' '}\n\n')
def GetByID(self, id_str):
if not isinstance(id_str, string_type):
return id_str
if id_str not in self.id_map:
raise RuntimeError('Invalid $ref: ' + id_str)
return self.id_map[id_str]
def ResolveID(self, index, tuple_type, params):
simple_tuple = params[:index] + (self.GetByID(
params[index]),) + params[index + 1:]
return tuple_type(*simple_tuple)
def ResolveReferences(self):
"""Resolve reference mapping, required to be called after Generate()
After calling Generate(), the type of indices used in schema structures
might be either int or string. An int type suggests that it's a resolved
index, but for string type it's unresolved. Resolving a reference is as
simple as looking up for corresponding ID in self.id_map, and replace the
old index with the mapped index.
"""
self.schema_nodes = list(
map(partial(self.ResolveID, 1, SchemaNode), self.schema_nodes))
self.property_nodes = list(
map(partial(self.ResolveID, 1, PropertyNode), self.property_nodes))
self.properties_nodes = list(
map(partial(self.ResolveID, 3, PropertiesNode), self.properties_nodes))
def FindSensitiveChildren(self):
"""Wrapper function, which calls FindSensitiveChildrenRecursive().
"""
if self.schema_nodes:
self.FindSensitiveChildrenRecursive(0, set())
def FindSensitiveChildrenRecursive(self, index, handled_schema_nodes):
"""Recursively compute |has_sensitive_children| for the schema node at
|index| and all its child elements. A schema has sensitive children if any
of its children has |is_sensitive_value|==True or has sensitive children
itself.
"""
node = self.schema_nodes[index]
if index in handled_schema_nodes:
return node.has_sensitive_children or node.is_sensitive_value
handled_schema_nodes.add(index)
has_sensitive_children = False
if node.schema_type == 'Type::DICTIONARY':
properties_node = self.properties_nodes[node.extra]
# Iterate through properties and patternProperties.
for property_index in range(properties_node.begin,
properties_node.pattern_end - 1):
sub_index = self.property_nodes[property_index].schema
has_sensitive_children |= self.FindSensitiveChildrenRecursive(
sub_index, handled_schema_nodes)
# AdditionalProperties
if properties_node.additional != INVALID_INDEX:
sub_index = properties_node.additional
has_sensitive_children |= self.FindSensitiveChildrenRecursive(
sub_index, handled_schema_nodes)
elif node.schema_type == 'Type::LIST':
sub_index = node.extra
has_sensitive_children |= self.FindSensitiveChildrenRecursive(
sub_index, handled_schema_nodes)
if has_sensitive_children:
self.schema_nodes[index] = self.schema_nodes[index]._replace(
has_sensitive_children=True)
return has_sensitive_children or node.is_sensitive_value
def _GenerateDefaultValue(value):
"""Converts a JSON object into a base::Value entry. Returns a tuple, the first
entry being a list of declaration statements to define the variable, the
second entry being a way to access the variable.
If no definition is needed, the first return value will be an empty list. If
any error occurs, the second return value will be None (ie, no way to fetch
the value).
|value|: The deserialized value to convert to base::Value."""
if type(value) == bool or type(value) == int:
return [], 'base::Value(%s)' % json.dumps(value)
elif type(value) == str:
return [], 'base::Value("%s")' % value
elif type(value) == list:
setup = ['base::Value default_value(base::Value::Type::LIST);']
for entry in value:
decl, fetch = _GenerateDefaultValue(entry)
# Nested lists are not supported.
if decl:
return [], None
setup.append('default_value.Append(%s);' % fetch)
return setup, 'std::move(default_value)'
return [], None
def _WritePolicyConstantSource(policies, policy_atomic_groups, target_platform,
f, risk_tags):
f.write('''#include "components/policy/policy_constants.h"
#include <algorithm>
#include <climits>
#include <memory>
#include "base/check_op.h"
#include "base/stl_util.h" // base::size()
#include "base/values.h"
#include "build/branding_buildflags.h"
#include "components/policy/core/common/policy_types.h"
#include "components/policy/core/common/schema_internal.h"
#include "components/policy/proto/cloud_policy.pb.h"
#include "components/policy/risk_tag.h"
namespace em = enterprise_management;
namespace policy {
''')
# Generate the Chrome schema.
chrome_schema = {
'type': 'object',
'properties': {},
}
chrome_validation_schema = {
'type': 'object',
'properties': {},
}
shared_strings = {}
for policy in policies:
shared_strings[policy.name] = "key::k%s" % policy.name
if policy.is_supported:
chrome_schema['properties'][policy.name] = policy.schema
if policy.validation_schema is not None:
(chrome_validation_schema['properties'][policy.name]
) = policy.validation_schema
# Note: this list must be kept in sync with the known property list of the
# Chrome schema, so that binary searching in the PropertyNode array gets the
# right index on this array as well. See the implementation of
# GetChromePolicyDetails() below.
# TODO(crbug.com/1074336): kChromePolicyDetails shouldn't be declare if there
# is no policy.
f.write(
'''const __attribute__((unused)) PolicyDetails kChromePolicyDetails[] = {
// is_deprecated is_future is_device_policy id max_external_data_size, risk tags
''')
for policy in policies:
if policy.is_supported:
assert policy.id >= MIN_POLICY_ID and policy.id <= MAX_POLICY_ID
assert (policy.max_size >= MIN_EXTERNAL_DATA_SIZE and
policy.max_size <= MAX_EXTERNAL_DATA_SIZE)
f.write(' // %s\n' % policy.name)
f.write(' { %-14s%-10s%-17s%4s,%22s, %s },\n' %
('true,' if policy.is_deprecated else 'false,',
'true,' if policy.is_future_on else 'false, ',
'true,' if policy.is_device_only else 'false,', policy.id,
policy.max_size, risk_tags.ToInitString(policy.tags)))
f.write('};\n\n')
schema_generator = SchemaNodesGenerator(shared_strings)
schema_generator.GenerateAndCollectID(chrome_schema, 'root node')
if chrome_validation_schema['properties']:
schema_generator.validation_schema_root_index = \
schema_generator.GenerateAndCollectID(chrome_validation_schema,
'validation_schema root node')
else:
schema_generator.validation_schema_root_index = INVALID_INDEX
schema_generator.ResolveReferences()
schema_generator.FindSensitiveChildren()
schema_generator.Write(f)
f.write('\n')
if schema_generator.property_nodes:
f.write('namespace {\n')
f.write('bool CompareKeys(const internal::PropertyNode& node,\n'
' const std::string& key) {\n'
' return node.key < key;\n'
'}\n\n')
f.write('} // namespace\n\n')
if target_platform == 'win':
f.write('#if BUILDFLAG(GOOGLE_CHROME_BRANDING)\n'
'const wchar_t kRegistryChromePolicyKey[] = '
'L"' + CHROME_POLICY_KEY + '";\n'
'#else\n'
'const wchar_t kRegistryChromePolicyKey[] = '
'L"' + CHROMIUM_POLICY_KEY + '";\n'
'#endif\n\n')
# Setting enterprise defaults code generation.
profile_policy_enterprise_defaults = ""
system_wide_policy_enterprise_defaults = ""
for policy in policies:
if policy.has_enterprise_default and policy.is_supported:
declare_default_stmts, fetch_default = _GenerateDefaultValue(
policy.enterprise_default)
if not fetch_default:
raise RuntimeError('Type %s of policy %s is not supported at '
'enterprise defaults' %
(policy.policy_type, policy.name))
# Convert declare_default_stmts to a string with the correct indentation.
if declare_default_stmts:
declare_default = ' %s\n' % '\n '.join(declare_default_stmts)
else:
declare_default = ''
setting_enterprise_default = ''' if (!policy_map->Get(key::k%s)) {
%s
policy_map->Set(key::k%s,
POLICY_LEVEL_MANDATORY,
POLICY_SCOPE_USER,
POLICY_SOURCE_ENTERPRISE_DEFAULT,
%s,
nullptr);
}
''' % (policy.name, declare_default, policy.name, fetch_default)
if policy.per_profile:
profile_policy_enterprise_defaults += setting_enterprise_default
else:
system_wide_policy_enterprise_defaults += setting_enterprise_default
f.write('#if defined(OS_CHROMEOS)')
f.write('''
void SetEnterpriseUsersProfileDefaults(PolicyMap* policy_map) {
%s
}
''' % profile_policy_enterprise_defaults)
f.write('''
void SetEnterpriseUsersSystemWideDefaults(PolicyMap* policy_map) {
%s
}
''' % system_wide_policy_enterprise_defaults)
f.write('''
void SetEnterpriseUsersDefaults(PolicyMap* policy_map) {
SetEnterpriseUsersProfileDefaults(policy_map);
SetEnterpriseUsersSystemWideDefaults(policy_map);
}
''')
f.write('#endif\n\n')
f.write('const PolicyDetails* GetChromePolicyDetails('
'const std::string& policy) {\n')
if schema_generator.property_nodes:
f.write(' // First index in kPropertyNodes of the Chrome policies.\n'
' static const int begin_index = %s;\n'
' // One-past-the-end of the Chrome policies in kPropertyNodes.\n'
' static const int end_index = %s;\n' %
(schema_generator.root_properties_begin,
schema_generator.root_properties_end))
f.write(''' const internal::PropertyNode* begin =
kPropertyNodes + begin_index;
const internal::PropertyNode* end = kPropertyNodes + end_index;
const internal::PropertyNode* it =
std::lower_bound(begin, end, policy, CompareKeys);
if (it == end || it->key != policy)
return nullptr;
// This relies on kPropertyNodes from begin_index to end_index
// having exactly the same policies (and in the same order) as
// kChromePolicyDetails, so that binary searching on the first
// gets the same results as a binary search on the second would.
// However, kPropertyNodes has the policy names and
// kChromePolicyDetails doesn't, so we obtain the index into
// the second array by searching the first to avoid duplicating
// the policy name pointers.
// Offsetting |it| from |begin| here obtains the index we're
// looking for.
size_t index = it - begin;
CHECK_LT(index, base::size(kChromePolicyDetails));
return kChromePolicyDetails + index;
''')
else:
f.write('return nullptr;')
f.write('}\n\n')
f.write('namespace key {\n\n')
for policy in policies:
# TODO(joaodasilva): Include only supported policies in
# configuration_policy_handler.cc and configuration_policy_handler_list.cc
# so that these names can be conditional on 'policy.is_supported'.
# http://crbug.com/223616
f.write('const char k{name}[] = "{name}";\n'.format(name=policy.name))
f.write('\n} // namespace key\n\n')
f.write('namespace group {\n\n')
for group in policy_atomic_groups:
f.write('const char k{name}[] = "{name}";\n'.format(name=group.name))
f.write('\n')
f.write('namespace {\n\n')
for group in policy_atomic_groups:
f.write('const char* const %s[] = {' % (group.name))
for policy in group.policies:
f.write('key::k%s, ' % (policy))
f.write('nullptr};\n')
f.write('\n} // namespace\n')
f.write('\n} // namespace group\n\n')
atomic_groups_length = 0
f.write('const AtomicGroup kPolicyAtomicGroupMappings[] = {\n')
for group in policy_atomic_groups:
atomic_groups_length += 1
f.write(' {')
f.write(' {id}, group::k{name}, group::{name}'.format(
id=group.id, name=group.name))
f.write(' },\n')
f.write('};\n\n')
f.write('const size_t kPolicyAtomicGroupMappingsLength = %s;\n\n' %
(atomic_groups_length))
supported_user_policies = [
p for p in policies if p.is_supported and not p.is_device_only
]
protobuf_types = _GetProtobufTypes(supported_user_policies)
for protobuf_type in protobuf_types:
_WriteChromePolicyAccessSource(supported_user_policies, f, protobuf_type)
f.write('\n} // namespace policy\n')
# Return the StringPolicyType enum value for a particular policy type.
def _GetStringPolicyType(policy_type):
if policy_type == 'Type::STRING':
return 'StringPolicyType::STRING'
elif policy_type == 'Type::DICTIONARY':
return 'StringPolicyType::JSON'
elif policy_type == 'TYPE_EXTERNAL':
return 'StringPolicyType::EXTERNAL'
raise RuntimeError('Invalid string type: ' + policy_type + '!\n')
# Writes an array that contains the pointers to the proto field for each policy
# in |policies| of the given |protobuf_type|.
def _WriteChromePolicyAccessSource(policies, f, protobuf_type):
f.write('const %sPolicyAccess k%sPolicyAccess[] = {\n' % (protobuf_type,
protobuf_type))
extra_args = ''
for policy in policies:
if policy.policy_protobuf_type == protobuf_type:
name = policy.name
if protobuf_type == 'String':
extra_args = ',\n ' + _GetStringPolicyType(policy.policy_type)
f.write(' {key::k%s,\n'
' %s,\n'
' &em::CloudPolicySettings::has_%s,\n'
' &em::CloudPolicySettings::%s%s},\n' %
(name, str(policy.per_profile).lower(), name.lower(),
name.lower(), extra_args))
# The list is nullptr-terminated.
f.write(' {nullptr, false, nullptr, nullptr},\n' '};\n\n')
#------------------ policy risk tag header -------------------------#
class RiskTags(object):
'''Generates files and strings to translate the parsed risk tags.'''
# TODO(fhorschig|tnagel): Add, Check & Generate translation descriptions.
def __init__(self, template_file_contents):
self.max_tags = None
self.enum_for_tag = OrderedDict() # Ordered by severity as stated in JSON.
self._ReadRiskTagMetaData(template_file_contents)
def GenerateEnum(self):
values = [' ' + self.enum_for_tag[tag] for tag in self.enum_for_tag]
values.append(' RISK_TAG_COUNT')
values.append(' RISK_TAG_NONE')
enum_text = 'enum RiskTag : uint8_t {\n'
enum_text += ',\n'.join(values) + '\n};\n'
return enum_text
def GetMaxTags(self):
return str(self.max_tags)
def GetValidTags(self):
return [tag for tag in self.enum_for_tag]
def ToInitString(self, tags):
all_tags = [self._ToEnum(tag) for tag in tags]
all_tags += ["RISK_TAG_NONE" for missing in range(len(tags), self.max_tags)]
str_tags = "{ " + ", ".join(all_tags) + " }"
return "\n ".join(textwrap.wrap(str_tags, 69))
def ComputeMaxTags(self, policies):
self.max_tags = 0
for policy in policies:
if not policy.is_supported or policy.tags == None:
continue
self.max_tags = max(len(policy.tags), self.max_tags)
def _ToEnum(self, tag):
if tag in self.enum_for_tag:
return self.enum_for_tag[tag]
raise RuntimeError('Invalid Tag:' + tag + '!\n'
'Chose a valid tag from \'risk_tag_definitions\' (a '
'subproperty of root in policy_templates.json)!')
def _ReadRiskTagMetaData(self, template_file_contents):
for tag in template_file_contents['risk_tag_definitions']:
if tag.get('name', None) == None:
raise RuntimeError('Tag in \'risk_tag_definitions\' without '
'description found!')
if tag.get('description', None) == None:
raise RuntimeError('Tag ' + tag['name'] + ' has no description!')
if tag.get('user-description', None) == None:
raise RuntimeError('Tag ' + tag['name'] + ' has no user-description!')
self.enum_for_tag[tag['name']] = "RISK_TAG_" + tag['name'].replace(
"-", "_").upper()
def _WritePolicyRiskTagHeader(policies, policy_atomic_groups, target_platform,
f, risk_tags):
f.write('''#ifndef CHROME_COMMON_POLICY_RISK_TAG_H_
#define CHROME_COMMON_POLICY_RISK_TAG_H_
#include <stddef.h>
namespace policy {
// The tag of a policy indicates which impact a policy can have on
// a user's privacy and/or security. Ordered descending by
// impact.
// The explanation of the single tags is stated in
// policy_templates.json within the 'risk_tag_definitions' tag.
''')
f.write(risk_tags.GenerateEnum() + '\n')
f.write('// This constant describes how many risk tags were used by the\n'
'// policy which uses the most risk tags.\n'
'const size_t kMaxRiskTagCount = ' + risk_tags.GetMaxTags() + ';\n'
'\n'
'} // namespace policy\n\n'
'\n'
'#endif // CHROME_COMMON_POLICY_RISK_TAG_H_')
#------------------ policy protobufs -------------------------------#
# This code applies to both Active Directory and Google cloud management.
CHROME_SETTINGS_PROTO_HEAD = '''
syntax = "proto2";
option optimize_for = LITE_RUNTIME;
package enterprise_management;
// For StringList and PolicyOptions.
import "policy_common_definitions.proto";
'''
CLOUD_POLICY_PROTO_HEAD = '''
syntax = "proto2";
option optimize_for = LITE_RUNTIME;
package enterprise_management;
import "policy_common_definitions.proto";
'''
# Field IDs [1..RESERVED_IDS] will not be used in the wrapping protobuf.
RESERVED_IDS = 2
def _WritePolicyProto(f, policy, fields):
_OutputComment(f, policy.caption + '\n\n' + policy.desc)
if policy.items is not None:
_OutputComment(f, '\nValid values:')
for item in policy.items:
_OutputComment(f, ' %s: %s' % (str(item.value), item.caption))
if policy.policy_type == 'Type::DICTIONARY':
_OutputComment(
f, '\nValue schema:\n%s' % json.dumps(
policy.schema, sort_keys=True, indent=4, separators=(',', ': ')))
_OutputComment(
f, '\nSupported on: %s' %
', '.join(sorted(list(policy.platforms.union(policy.future_on)))))
if policy.can_be_recommended and not policy.can_be_mandatory:
_OutputComment(
f, '\nNote: this policy must have a RECOMMENDED ' +
'PolicyMode set in PolicyOptions.')
f.write('message %sProto {\n' % policy.name)
f.write(' optional PolicyOptions policy_options = 1;\n')
f.write(' optional %s %s = 2;\n' % (policy.protobuf_type, policy.name))
f.write('}\n\n')
fields += [
' optional %sProto %s = %s;\n' % (policy.name, policy.name,
policy.id + RESERVED_IDS)
]
def _WriteChromeSettingsProtobuf(policies, policy_atomic_groups,
target_platform, f, risk_tags):
f.write(CHROME_SETTINGS_PROTO_HEAD)
fields = []
f.write('// PBs for individual settings.\n\n')
for policy in policies:
# Note: This protobuf also gets the unsupported policies, since it's an
# exhaustive list of all the supported user policies on any platform.
if not policy.is_device_only:
_WritePolicyProto(f, policy, fields)
f.write('// --------------------------------------------------\n'
'// Big wrapper PB containing the above groups.\n\n'
'message ChromeSettingsProto {\n')
f.write(''.join(fields))
f.write('}\n\n')
def _WriteChromeSettingsFullRuntimeProtobuf(policies, policy_atomic_groups,
target_platform, f, risk_tags):
# For full runtime, disable LITE_RUNTIME switch and import full runtime
# version of cloud_policy.proto.
f.write(
CHROME_SETTINGS_PROTO_HEAD.replace(
"option optimize_for = LITE_RUNTIME;",
"//option optimize_for = LITE_RUNTIME;").replace(
"import \"cloud_policy.proto\";",
"import \"cloud_policy_full_runtime.proto\";").replace(
"import \"policy_common_definitions.proto\";",
"import \"policy_common_definitions_full_runtime.proto\";"))
fields = []
f.write('// PBs for individual settings.\n\n')
for policy in policies:
# Note: This protobuf also gets the unsupported policies, since it's an
# exhaustive list of all the supported user policies on any platform.
if not policy.is_device_only:
_WritePolicyProto(f, policy, fields)
f.write('// --------------------------------------------------\n'
'// Big wrapper PB containing the above groups.\n\n'
'message ChromeSettingsProto {\n')
f.write(''.join(fields))
f.write('}\n\n')
def _WriteCloudPolicyProtobuf(policies, policy_atomic_groups, target_platform,
f, risk_tags):
f.write(CLOUD_POLICY_PROTO_HEAD)
f.write('message CloudPolicySettings {\n')
for policy in policies:
if policy.is_supported and not policy.is_device_only:
f.write(
' optional %sPolicyProto %s = %s;\n' %
(policy.policy_protobuf_type, policy.name, policy.id + RESERVED_IDS))
f.write('}\n\n')
def _WriteCloudPolicyFullRuntimeProtobuf(policies, policy_atomic_groups,
target_platform, f, risk_tags):
# For full runtime, disable LITE_RUNTIME switch
f.write(
CLOUD_POLICY_PROTO_HEAD.replace(
"option optimize_for = LITE_RUNTIME;",
"//option optimize_for = LITE_RUNTIME;").replace(
"import \"policy_common_definitions.proto\";",
"import \"policy_common_definitions_full_runtime.proto\";"))
f.write('message CloudPolicySettings {\n')
for policy in policies:
if policy.is_supported and not policy.is_device_only:
f.write(
' optional %sPolicyProto %s = %s;\n' %
(policy.policy_protobuf_type, policy.name, policy.id + RESERVED_IDS))
f.write('}\n\n')
def _WritePolicyCommonDefinitionsFullRuntimeProtobuf(
policy_common_definitions_proto_path, policies, policy_atomic_groups,
target_platform, f, risk_tags):
# For full runtime, disable LITE_RUNTIME switch
with open(policy_common_definitions_proto_path, 'r') as proto_file:
policy_common_definitions_proto_code = proto_file.read()
f.write(
policy_common_definitions_proto_code.replace(
"option optimize_for = LITE_RUNTIME;",
"//option optimize_for = LITE_RUNTIME;"))
#------------------ Chrome OS policy constants header --------------#
# This code applies to Active Directory management only.
# Filter for _GetSupportedChromeOSPolicies().
def _IsSupportedChromeOSPolicy(type, policy):
# Filter out unsupported policies.
if not policy.is_supported:
return False
# Filter out device policies if user policies are requested.
if type == 'user' and policy.is_device_only:
return False
# Filter out user policies if device policies are requested.
if type == 'device' and not policy.is_device_only:
return False
# Filter out non-Active-Directory policies.
if 'active_directory' not in policy.supported_chrome_os_management:
return False
return True
# Returns a list of supported user and/or device policies `by filtering
# |policies|. |type| may be 'user', 'device' or 'both'.
def _GetSupportedChromeOSPolicies(policies, type):
if (type not in ['user', 'device', 'both']):
raise RuntimeError('Unsupported type "%s"' % type)
return filter(partial(_IsSupportedChromeOSPolicy, type), policies)
# Returns the list of all policy.policy_protobuf_type strings from |policies|.
def _GetProtobufTypes(policies):
return sorted(['Integer', 'Boolean', 'String', 'StringList'])
# Writes the definition of an array that contains the pointers to the mutable
# proto field for each policy in |policies| of the given |protobuf_type|.
def _WriteChromeOSPolicyAccessHeader(f, protobuf_type):
f.write('// Access to the mutable protobuf function of all supported '
'%s user\n// policies.\n' % protobuf_type.lower())
f.write('struct %sPolicyAccess {\n'
' const char* policy_key;\n'
' bool per_profile;\n'
' enterprise_management::%sPolicyProto*\n'
' (enterprise_management::CloudPolicySettings::'
'*mutable_proto_ptr)();\n'
'};\n' % (protobuf_type, protobuf_type))
f.write('extern const %sPolicyAccess k%sPolicyAccess[];\n\n' %
(protobuf_type, protobuf_type))
# Writes policy_constants.h for use in Chrome OS.
def _WriteChromeOSPolicyConstantsHeader(policies, policy_atomic_groups,
target_platform, f, risk_tags):
f.write('#ifndef __BINDINGS_POLICY_CONSTANTS_H_\n'
'#define __BINDINGS_POLICY_CONSTANTS_H_\n\n')
# Forward declarations.
supported_user_policies = _GetSupportedChromeOSPolicies(policies, 'user')
protobuf_types = _GetProtobufTypes(supported_user_policies)
f.write('namespace enterprise_management {\n' 'class CloudPolicySettings;\n')
for protobuf_type in protobuf_types:
f.write('class %sPolicyProto;\n' % protobuf_type)
f.write('} // namespace enterprise_management\n\n')
f.write('namespace policy {\n\n')
# Policy keys.
all_supported_policies = _GetSupportedChromeOSPolicies(policies, 'both')
f.write('// Registry key names for user and device policies.\n'
'namespace key {\n\n')
for policy in all_supported_policies:
f.write('extern const char k' + policy.name + '[];\n')
f.write('\n} // namespace key\n\n')
# Device policy keys.
f.write('// NULL-terminated list of device policy registry key names.\n')
f.write('extern const char* kDevicePolicyKeys[];\n\n')
# User policy proto pointers, one struct for each protobuf type.
for protobuf_type in protobuf_types:
_WriteChromeOSPolicyAccessHeader(f, protobuf_type)
f.write('} // namespace policy\n\n'
'#endif // __BINDINGS_POLICY_CONSTANTS_H_\n')
#------------------ Chrome OS policy constants source --------------#
# Writes an array that contains the pointers to the mutable proto field for each
# policy in |policies| of the given |protobuf_type|.
def _WriteChromeOSPolicyAccessSource(policies, f, protobuf_type):
f.write('constexpr %sPolicyAccess k%sPolicyAccess[] = {\n' % (protobuf_type,
protobuf_type))
for policy in policies:
if policy.policy_protobuf_type == protobuf_type:
f.write(
' {key::k%s,\n'
' %s,\n'
' &em::CloudPolicySettings::mutable_%s},\n' %
(policy.name, str(policy.per_profile).lower(), policy.name.lower()))
# The list is nullptr-terminated.
f.write(' {nullptr, false, nullptr},\n' '};\n\n')
# Writes policy_constants.cc for use in Chrome OS.
def _WriteChromeOSPolicyConstantsSource(policies, policy_atomic_groups,
target_platform, f, risk_tags):
f.write('#include "bindings/cloud_policy.pb.h"\n'
'#include "bindings/policy_constants.h"\n\n'
'namespace em = enterprise_management;\n\n'
'namespace policy {\n\n')
# Policy keys.
all_supported_policies = _GetSupportedChromeOSPolicies(policies, 'both')
f.write('namespace key {\n\n')
for policy in all_supported_policies:
f.write('const char k{name}[] = "{name}";\n'.format(name=policy.name))
f.write('\n} // namespace key\n\n')
# Device policy keys.
supported_device_policies = _GetSupportedChromeOSPolicies(policies, 'device')
f.write('const char* kDevicePolicyKeys[] = {\n\n')
for policy in supported_device_policies:
f.write(' key::k%s,\n' % policy.name)
f.write(' nullptr};\n\n')
# User policy proto pointers, one struct for each protobuf type.
supported_user_policies = _GetSupportedChromeOSPolicies(policies, 'user')
protobuf_types = _GetProtobufTypes(supported_user_policies)
for protobuf_type in protobuf_types:
_WriteChromeOSPolicyAccessSource(supported_user_policies, f, protobuf_type)
f.write('} // namespace policy\n')
#------------------ app restrictions -------------------------------#
def _WriteAppRestrictions(policies, policy_atomic_groups, target_platform, f,
risk_tags):
def WriteRestrictionCommon(key):
f.write(' <restriction\n' ' android:key="%s"\n' % key)
f.write(' android:title="@string/%sTitle"\n' % key)
f.write(' android:description="@string/%sDesc"\n' % key)
def WriteItemsDefinition(key):
f.write(' android:entries="@array/%sEntries"\n' % key)
f.write(' android:entryValues="@array/%sValues"\n' % key)
def WriteAppRestriction(policy):
policy_name = policy.name
WriteRestrictionCommon(policy_name)
if policy.items is not None:
WriteItemsDefinition(policy_name)
f.write(' android:restrictionType="%s"/>' % policy.restriction_type)
f.write('\n\n')
# _WriteAppRestrictions body
f.write('<restrictions xmlns:android="'
'http://schemas.android.com/apk/res/android">\n\n')
for policy in policies:
if (policy.is_supported and policy.restriction_type != 'invalid'
and not policy.is_deprecated and not policy.is_future
and not policy.internal_only):
WriteAppRestriction(policy)
f.write('</restrictions>')
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | 6,799,088,792,365,111,000 | 37.666278 | 80 | 0.645022 | false |
aferr/TimingCompartments | configs/topologies/BaseTopology.py | 15 | 2949 | # Copyright (c) 2012 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Jason Power
import m5
class BaseTopology(object):
description = "BaseTopology"
def __init__(self):
""" When overriding place any objects created in
configs/ruby/<protocol>.py that are needed in
makeTopology (below) here. The minimum is usually
all of the controllers created in the above file.
"""
def makeTopology(self, options, IntLink, ExtLink, Router):
""" Called from configs/ruby/Ruby.py
The return value is ( list(Router), list(IntLink), list(ExtLink))
The API of this function cannot change when subclassing!!
Any additional information needed to create this topology should
be passed into the constructor when it's instantiated in
configs/ruby/<protocol>.py
"""
m5.util.fatal("BaseTopology should have been overridden!!")
class SimpleTopology(BaseTopology):
""" Provides methods needed for the topologies included in Ruby before
topology changes.
These topologies are "simple" in the sense that they only use a flat
list of controllers to construct the topology.
"""
description = "SimpleTopology"
def __init__(self, controllers):
self.nodes = controllers
def addController(self, controller):
self.nodes.append(controller)
def __len__(self):
return len(self.nodes)
| bsd-3-clause | 1,094,965,278,747,200,800 | 43.681818 | 77 | 0.726348 | false |
helloworldajou/webserver | demos/classifier_webcam.py | 4 | 7059 | #!/usr/bin/env python2
#
# Example to run classifier on webcam stream.
# Brandon Amos & Vijayenthiran
# 2016/06/21
#
# Copyright 2015-2016 Carnegie Mellon University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Contrib: Vijayenthiran
# This example file shows to run a classifier on webcam stream. You need to
# run the classifier.py to generate classifier with your own dataset.
# To run this file from the openface home dir:
# ./demo/classifier_webcam.py <path-to-your-classifier>
import time
start = time.time()
import argparse
import cv2
import os
import pickle
import sys
import numpy as np
np.set_printoptions(precision=2)
from sklearn.mixture import GMM
import openface
fileDir = os.path.dirname(os.path.realpath(__file__))
modelDir = os.path.join(fileDir, '..', 'models')
dlibModelDir = os.path.join(modelDir, 'dlib')
openfaceModelDir = os.path.join(modelDir, 'openface')
def getRep(bgrImg):
start = time.time()
if bgrImg is None:
raise Exception("Unable to load image/frame")
rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)
if args.verbose:
print(" + Original size: {}".format(rgbImg.shape))
if args.verbose:
print("Loading the image took {} seconds.".format(time.time() - start))
start = time.time()
# Get the largest face bounding box
# bb = align.getLargestFaceBoundingBox(rgbImg) #Bounding box
# Get all bounding boxes
bb = align.getAllFaceBoundingBoxes(rgbImg)
if bb is None:
# raise Exception("Unable to find a face: {}".format(imgPath))
return None
if args.verbose:
print("Face detection took {} seconds.".format(time.time() - start))
start = time.time()
alignedFaces = []
for box in bb:
alignedFaces.append(
align.align(
args.imgDim,
rgbImg,
box,
landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE))
if alignedFaces is None:
raise Exception("Unable to align the frame")
if args.verbose:
print("Alignment took {} seconds.".format(time.time() - start))
start = time.time()
reps = []
for alignedFace in alignedFaces:
reps.append(net.forward(alignedFace))
if args.verbose:
print("Neural network forward pass took {} seconds.".format(
time.time() - start))
# print (reps)
return reps
def infer(img, args):
with open(args.classifierModel, 'r') as f:
if sys.version_info[0] < 3:
(le, clf) = pickle.load(f) # le - label and clf - classifer
else:
(le, clf) = pickle.load(f, encoding='latin1') # le - label and clf - classifer
reps = getRep(img)
persons = []
confidences = []
for rep in reps:
try:
rep = rep.reshape(1, -1)
except:
print ("No Face detected")
return (None, None)
start = time.time()
predictions = clf.predict_proba(rep).ravel()
# print (predictions)
maxI = np.argmax(predictions)
# max2 = np.argsort(predictions)[-3:][::-1][1]
persons.append(le.inverse_transform(maxI))
# print (str(le.inverse_transform(max2)) + ": "+str( predictions [max2]))
# ^ prints the second prediction
confidences.append(predictions[maxI])
if args.verbose:
print("Prediction took {} seconds.".format(time.time() - start))
pass
# print("Predict {} with {:.2f} confidence.".format(person.decode('utf-8'), confidence))
if isinstance(clf, GMM):
dist = np.linalg.norm(rep - clf.means_[maxI])
print(" + Distance from the mean: {}".format(dist))
pass
return (persons, confidences)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--dlibFacePredictor',
type=str,
help="Path to dlib's face predictor.",
default=os.path.join(
dlibModelDir,
"shape_predictor_68_face_landmarks.dat"))
parser.add_argument(
'--networkModel',
type=str,
help="Path to Torch network model.",
default=os.path.join(
openfaceModelDir,
'nn4.small2.v1.t7'))
parser.add_argument('--imgDim', type=int,
help="Default image dimension.", default=96)
parser.add_argument(
'--captureDevice',
type=int,
default=0,
help='Capture device. 0 for latop webcam and 1 for usb webcam')
parser.add_argument('--width', type=int, default=320)
parser.add_argument('--height', type=int, default=240)
parser.add_argument('--threshold', type=float, default=0.5)
parser.add_argument('--cuda', action='store_true')
parser.add_argument('--verbose', action='store_true')
parser.add_argument(
'classifierModel',
type=str,
help='The Python pickle representing the classifier. This is NOT the Torch network model, which can be set with --networkModel.')
args = parser.parse_args()
align = openface.AlignDlib(args.dlibFacePredictor)
net = openface.TorchNeuralNet(
args.networkModel,
imgDim=args.imgDim,
cuda=args.cuda)
# Capture device. Usually 0 will be webcam and 1 will be usb cam.
video_capture = cv2.VideoCapture(args.captureDevice)
video_capture.set(3, args.width)
video_capture.set(4, args.height)
confidenceList = []
while True:
ret, frame = video_capture.read()
persons, confidences = infer(frame, args)
print ("P: " + str(persons) + " C: " + str(confidences))
try:
# append with two floating point precision
confidenceList.append('%.2f' % confidences[0])
except:
# If there is no face detected, confidences matrix will be empty.
# We can simply ignore it.
pass
for i, c in enumerate(confidences):
if c <= args.threshold: # 0.5 is kept as threshold for known face.
persons[i] = "_unknown"
# Print the person name and conf value on the frame
cv2.putText(frame, "P: {} C: {}".format(persons, confidences),
(50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
cv2.imshow('', frame)
# quit the program on the press of key 'q'
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
| apache-2.0 | 5,748,460,909,555,260,000 | 31.985981 | 137 | 0.618785 | false |
ibinti/intellij-community | python/helpers/pycharm/__jb.for_twisted/twisted/plugins/teamcity_plugin.py | 11 | 1180 | import sys
from teamcity.unittestpy import TeamcityTestResult
from twisted.trial.reporter import Reporter
from twisted.python.failure import Failure
from twisted.plugins.twisted_trial import _Reporter
class FailureWrapper(Failure):
def __getitem__(self, key):
return self.value[key]
class TeamcityReporter(TeamcityTestResult, Reporter):
def __init__(self,
stream=sys.stdout,
tbformat='default',
realtime=False,
publisher=None):
TeamcityTestResult.__init__(self)
Reporter.__init__(self,
stream=stream,
tbformat=tbformat,
realtime=realtime,
publisher=publisher)
def addError(self, test, failure, *k):
super(TeamcityReporter, self).addError(test, FailureWrapper(failure), *k)
Teamcity = _Reporter("Teamcity Reporter",
"twisted.plugins.teamcity_plugin",
description="teamcity messages",
longOpt="teamcity",
shortOpt="teamcity",
klass="TeamcityReporter")
| apache-2.0 | 2,395,397,164,970,123,000 | 30.891892 | 81 | 0.574576 | false |
Lujeni/ansible | lib/ansible/modules/network/nxos/nxos_vrf.py | 5 | 17884 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_vrf
extends_documentation_fragment: nxos
version_added: "2.1"
short_description: Manages global VRF configuration.
description:
- This module provides declarative management of VRFs
on CISCO NXOS network devices.
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
- Trishna Guha (@trishnaguha)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- Cisco NX-OS creates the default VRF by itself. Therefore,
you're not allowed to use default as I(vrf) name in this module.
- C(vrf) name must be shorter than 32 chars.
- VRF names are not case sensible in NX-OS. Anyway, the name is stored
just like it's inserted by the user and it'll not be changed again
unless the VRF is removed and re-created. i.e. C(vrf=NTC) will create
a VRF named NTC, but running it again with C(vrf=ntc) will not cause
a configuration change.
options:
name:
description:
- Name of VRF to be managed.
required: true
aliases: [vrf]
admin_state:
description:
- Administrative state of the VRF.
default: up
choices: ['up','down']
vni:
description:
- Specify virtual network identifier. Valid values are Integer
or keyword 'default'.
version_added: "2.2"
rd:
description:
- VPN Route Distinguisher (RD). Valid values are a string in
one of the route-distinguisher formats (ASN2:NN, ASN4:NN, or
IPV4:NN); the keyword 'auto', or the keyword 'default'.
version_added: "2.2"
interfaces:
description:
- List of interfaces to check the VRF has been
configured correctly or keyword 'default'.
version_added: 2.5
associated_interfaces:
description:
- This is a intent option and checks the operational state of the for given vrf C(name)
for associated interfaces. If the value in the C(associated_interfaces) does not match with
the operational state of vrf interfaces on device it will result in failure.
version_added: "2.5"
aggregate:
description: List of VRFs definitions.
version_added: 2.5
purge:
description:
- Purge VRFs not defined in the I(aggregate) parameter.
type: bool
default: 'no'
version_added: 2.5
state:
description:
- Manages desired state of the resource.
default: present
choices: ['present','absent']
description:
description:
- Description of the VRF or keyword 'default'.
delay:
description:
- Time in seconds to wait before checking for the operational state on remote
device. This wait is applicable for operational state arguments.
default: 10
'''
EXAMPLES = '''
- name: Ensure ntc VRF exists on switch
nxos_vrf:
name: ntc
description: testing
state: present
- name: Aggregate definition of VRFs
nxos_vrf:
aggregate:
- { name: test1, description: Testing, admin_state: down }
- { name: test2, interfaces: Ethernet1/2 }
- name: Aggregate definitions of VRFs with Purge
nxos_vrf:
aggregate:
- { name: ntc1, description: purge test1 }
- { name: ntc2, description: purge test2 }
state: present
purge: yes
- name: Delete VRFs exist on switch
nxos_vrf:
aggregate:
- { name: ntc1 }
- { name: ntc2 }
state: absent
- name: Assign interfaces to VRF declaratively
nxos_vrf:
name: test1
interfaces:
- Ethernet2/3
- Ethernet2/5
- name: Check interfaces assigned to VRF
nxos_vrf:
name: test1
associated_interfaces:
- Ethernet2/3
- Ethernet2/5
- name: Ensure VRF is tagged with interface Ethernet2/5 only (Removes from Ethernet2/3)
nxos_vrf:
name: test1
interfaces:
- Ethernet2/5
- name: Delete VRF
nxos_vrf:
name: ntc
state: absent
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample:
- vrf context ntc
- no shutdown
- interface Ethernet1/2
- no switchport
- vrf member test2
'''
import re
import time
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.nxos.nxos import load_config, run_commands
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, get_interface_type
from ansible.module_utils.network.common.utils import remove_default_spec
def search_obj_in_list(name, lst):
for o in lst:
if o['name'] == name:
return o
def execute_show_command(command, module):
if 'show run' not in command:
output = 'json'
else:
output = 'text'
cmds = [{
'command': command,
'output': output,
}]
body = run_commands(module, cmds)
return body
def get_existing_vrfs(module):
objs = list()
command = "show vrf all"
try:
body = execute_show_command(command, module)[0]
except IndexError:
return list()
try:
vrf_table = body['TABLE_vrf']['ROW_vrf']
except (TypeError, IndexError, KeyError):
return list()
if isinstance(vrf_table, list):
for vrf in vrf_table:
obj = {}
obj['name'] = vrf['vrf_name']
objs.append(obj)
elif isinstance(vrf_table, dict):
obj = {}
obj['name'] = vrf_table['vrf_name']
objs.append(obj)
return objs
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
state = module.params['state']
purge = module.params['purge']
args = ('rd', 'description', 'vni')
for w in want:
name = w['name']
admin_state = w['admin_state']
vni = w['vni']
interfaces = w.get('interfaces') or []
state = w['state']
del w['state']
obj_in_have = search_obj_in_list(name, have)
if state == 'absent' and obj_in_have:
commands.append('no vrf context {0}'.format(name))
elif state == 'present':
if not obj_in_have:
commands.append('vrf context {0}'.format(name))
for item in args:
candidate = w.get(item)
if candidate and candidate != 'default':
cmd = item + ' ' + str(candidate)
commands.append(cmd)
if admin_state == 'up':
commands.append('no shutdown')
elif admin_state == 'down':
commands.append('shutdown')
commands.append('exit')
if interfaces and interfaces[0] != 'default':
for i in interfaces:
commands.append('interface {0}'.format(i))
if get_interface_type(i) in ('ethernet', 'portchannel'):
commands.append('no switchport')
commands.append('vrf member {0}'.format(name))
else:
# If vni is already configured on vrf, unconfigure it first.
if vni:
if obj_in_have.get('vni') and vni != obj_in_have.get('vni'):
commands.append('no vni {0}'.format(obj_in_have.get('vni')))
for item in args:
candidate = w.get(item)
if candidate == 'default':
if obj_in_have.get(item):
cmd = 'no ' + item + ' ' + obj_in_have.get(item)
commands.append(cmd)
elif candidate and candidate != obj_in_have.get(item):
cmd = item + ' ' + str(candidate)
commands.append(cmd)
if admin_state and admin_state != obj_in_have.get('admin_state'):
if admin_state == 'up':
commands.append('no shutdown')
elif admin_state == 'down':
commands.append('shutdown')
if commands:
commands.insert(0, 'vrf context {0}'.format(name))
commands.append('exit')
if interfaces and interfaces[0] != 'default':
if not obj_in_have['interfaces']:
for i in interfaces:
commands.append('vrf context {0}'.format(name))
commands.append('exit')
commands.append('interface {0}'.format(i))
if get_interface_type(i) in ('ethernet', 'portchannel'):
commands.append('no switchport')
commands.append('vrf member {0}'.format(name))
elif set(interfaces) != set(obj_in_have['interfaces']):
missing_interfaces = list(set(interfaces) - set(obj_in_have['interfaces']))
for i in missing_interfaces:
commands.append('vrf context {0}'.format(name))
commands.append('exit')
commands.append('interface {0}'.format(i))
if get_interface_type(i) in ('ethernet', 'portchannel'):
commands.append('no switchport')
commands.append('vrf member {0}'.format(name))
superfluous_interfaces = list(set(obj_in_have['interfaces']) - set(interfaces))
for i in superfluous_interfaces:
commands.append('vrf context {0}'.format(name))
commands.append('exit')
commands.append('interface {0}'.format(i))
if get_interface_type(i) in ('ethernet', 'portchannel'):
commands.append('no switchport')
commands.append('no vrf member {0}'.format(name))
elif interfaces and interfaces[0] == 'default':
if obj_in_have['interfaces']:
for i in obj_in_have['interfaces']:
commands.append('vrf context {0}'.format(name))
commands.append('exit')
commands.append('interface {0}'.format(i))
if get_interface_type(i) in ('ethernet', 'portchannel'):
commands.append('no switchport')
commands.append('no vrf member {0}'.format(name))
if purge:
existing = get_existing_vrfs(module)
if existing:
for h in existing:
if h['name'] in ('default', 'management'):
pass
else:
obj_in_want = search_obj_in_list(h['name'], want)
if not obj_in_want:
commands.append('no vrf context {0}'.format(h['name']))
return commands
def validate_vrf(name, module):
if name == 'default':
module.fail_json(msg='cannot use default as name of a VRF')
elif len(name) > 32:
module.fail_json(msg='VRF name exceeded max length of 32', name=name)
else:
return name
def map_params_to_obj(module):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
d = item.copy()
d['name'] = validate_vrf(d['name'], module)
obj.append(d)
else:
obj.append({
'name': validate_vrf(module.params['name'], module),
'description': module.params['description'],
'vni': module.params['vni'],
'rd': module.params['rd'],
'admin_state': module.params['admin_state'],
'state': module.params['state'],
'interfaces': module.params['interfaces'],
'associated_interfaces': module.params['associated_interfaces']
})
return obj
def get_value(arg, config, module):
extra_arg_regex = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(arg), re.M)
value = ''
if arg in config:
value = extra_arg_regex.search(config).group('value')
return value
def map_config_to_obj(want, element_spec, module):
objs = list()
for w in want:
obj = deepcopy(element_spec)
del obj['delay']
del obj['state']
command = 'show vrf {0}'.format(w['name'])
try:
body = execute_show_command(command, module)[0]
vrf_table = body['TABLE_vrf']['ROW_vrf']
except (TypeError, IndexError):
return list()
name = vrf_table['vrf_name']
obj['name'] = name
obj['admin_state'] = vrf_table['vrf_state'].lower()
command = 'show run all | section vrf.context.{0}'.format(name)
body = execute_show_command(command, module)[0]
extra_params = ['vni', 'rd', 'description']
for param in extra_params:
obj[param] = get_value(param, body, module)
obj['interfaces'] = []
command = 'show vrf {0} interface'.format(name)
try:
body = execute_show_command(command, module)[0]
vrf_int = body['TABLE_if']['ROW_if']
except (TypeError, IndexError):
vrf_int = None
if vrf_int:
if isinstance(vrf_int, list):
for i in vrf_int:
intf = i['if_name']
obj['interfaces'].append(intf)
elif isinstance(vrf_int, dict):
intf = vrf_int['if_name']
obj['interfaces'].append(intf)
objs.append(obj)
return objs
def check_declarative_intent_params(want, module, element_spec, result):
have = None
is_delay = False
for w in want:
if w.get('associated_interfaces') is None:
continue
if result['changed'] and not is_delay:
time.sleep(module.params['delay'])
is_delay = True
if have is None:
have = map_config_to_obj(want, element_spec, module)
for i in w['associated_interfaces']:
obj_in_have = search_obj_in_list(w['name'], have)
if obj_in_have:
interfaces = obj_in_have.get('interfaces')
if interfaces is not None and i not in interfaces:
module.fail_json(msg="Interface %s not configured on vrf %s" % (i, w['name']))
def vrf_error_check(module, commands, responses):
"""Checks for VRF config errors and executes a retry in some circumstances.
"""
pattern = 'ERROR: Deletion of VRF .* in progress'
if re.search(pattern, str(responses)):
# Allow delay/retry for VRF changes
time.sleep(15)
responses = load_config(module, commands, opts={'catch_clierror': True})
if re.search(pattern, str(responses)):
module.fail_json(msg='VRF config (and retry) failure: %s ' % responses)
module.warn('VRF config delayed by VRF deletion - passed on retry')
def main():
""" main entry point for module execution
"""
element_spec = dict(
name=dict(type='str', aliases=['vrf']),
description=dict(type='str'),
vni=dict(type='str'),
rd=dict(type='str'),
admin_state=dict(type='str', default='up', choices=['up', 'down']),
interfaces=dict(type='list'),
associated_interfaces=dict(type='list'),
delay=dict(type='int', default=10),
state=dict(type='str', default='present', choices=['present', 'absent']),
)
aggregate_spec = deepcopy(element_spec)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
purge=dict(type='bool', default=False),
)
argument_spec.update(element_spec)
argument_spec.update(nxos_argument_spec)
required_one_of = [['name', 'aggregate']]
mutually_exclusive = [['name', 'aggregate']]
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(want, element_spec, module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands and not module.check_mode:
responses = load_config(module, commands, opts={'catch_clierror': True})
vrf_error_check(module, commands, responses)
result['changed'] = True
check_declarative_intent_params(want, module, element_spec, result)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | 6,767,702,643,672,515,000 | 32.743396 | 103 | 0.566372 | false |
sonaht/ansible | lib/ansible/modules/cloud/amazon/ec2_metric_alarm.py | 57 | 11596 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'curated'}
DOCUMENTATION = """
module: ec2_metric_alarm
short_description: "Create/update or delete AWS Cloudwatch 'metric alarms'"
description:
- Can create or delete AWS metric alarms.
- Metrics you wish to alarm on must already exist.
version_added: "1.6"
author: "Zacharie Eakin (@zeekin)"
options:
state:
description:
- register or deregister the alarm
required: true
choices: ['present', 'absent']
name:
description:
- Unique name for the alarm
required: true
metric:
description:
- Name of the monitored metric (e.g. CPUUtilization)
- Metric must already exist
required: false
namespace:
description:
- Name of the appropriate namespace ('AWS/EC2', 'System/Linux', etc.), which determines the category it will appear under in cloudwatch
required: false
statistic:
description:
- Operation applied to the metric
- Works in conjunction with period and evaluation_periods to determine the comparison value
required: false
choices: ['SampleCount','Average','Sum','Minimum','Maximum']
comparison:
description:
- Determines how the threshold value is compared
required: false
choices: ['<=','<','>','>=']
threshold:
description:
- Sets the min/max bound for triggering the alarm
required: false
period:
description:
- The time (in seconds) between metric evaluations
required: false
evaluation_periods:
description:
- The number of times in which the metric is evaluated before final calculation
required: false
unit:
description:
- The threshold's unit of measurement
required: false
choices:
- 'Seconds'
- 'Microseconds'
- 'Milliseconds'
- 'Bytes'
- 'Kilobytes'
- 'Megabytes'
- 'Gigabytes'
- 'Terabytes'
- 'Bits'
- 'Kilobits'
- 'Megabits'
- 'Gigabits'
- 'Terabits'
- 'Percent'
- 'Count'
- 'Bytes/Second'
- 'Kilobytes/Second'
- 'Megabytes/Second'
- 'Gigabytes/Second'
- 'Terabytes/Second'
- 'Bits/Second'
- 'Kilobits/Second'
- 'Megabits/Second'
- 'Gigabits/Second'
- 'Terabits/Second'
- 'Count/Second'
- 'None'
description:
description:
- A longer description of the alarm
required: false
dimensions:
description:
- Describes to what the alarm is applied
required: false
alarm_actions:
description:
- A list of the names action(s) taken when the alarm is in the 'alarm' status
required: false
insufficient_data_actions:
description:
- A list of the names of action(s) to take when the alarm is in the 'insufficient_data' status
required: false
ok_actions:
description:
- A list of the names of action(s) to take when the alarm is in the 'ok' status
required: false
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = '''
- name: create alarm
ec2_metric_alarm:
state: present
region: ap-southeast-2
name: "cpu-low"
metric: "CPUUtilization"
namespace: "AWS/EC2"
statistic: Average
comparison: "<="
threshold: 5.0
period: 300
evaluation_periods: 3
unit: "Percent"
description: "This will alarm when a bamboo slave's cpu usage average is lower than 5% for 15 minutes "
dimensions: {'InstanceId':'i-XXX'}
alarm_actions: ["action1","action2"]
'''
try:
import boto.ec2.cloudwatch
from boto.ec2.cloudwatch import CloudWatchConnection, MetricAlarm
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def create_metric_alarm(connection, module):
name = module.params.get('name')
metric = module.params.get('metric')
namespace = module.params.get('namespace')
statistic = module.params.get('statistic')
comparison = module.params.get('comparison')
threshold = module.params.get('threshold')
period = module.params.get('period')
evaluation_periods = module.params.get('evaluation_periods')
unit = module.params.get('unit')
description = module.params.get('description')
dimensions = module.params.get('dimensions')
alarm_actions = module.params.get('alarm_actions')
insufficient_data_actions = module.params.get('insufficient_data_actions')
ok_actions = module.params.get('ok_actions')
alarms = connection.describe_alarms(alarm_names=[name])
if not alarms:
alm = MetricAlarm(
name=name,
metric=metric,
namespace=namespace,
statistic=statistic,
comparison=comparison,
threshold=threshold,
period=period,
evaluation_periods=evaluation_periods,
unit=unit,
description=description,
dimensions=dimensions,
alarm_actions=alarm_actions,
insufficient_data_actions=insufficient_data_actions,
ok_actions=ok_actions
)
try:
connection.create_alarm(alm)
changed = True
alarms = connection.describe_alarms(alarm_names=[name])
except BotoServerError as e:
module.fail_json(msg=str(e))
else:
alarm = alarms[0]
changed = False
for attr in ('comparison','metric','namespace','statistic','threshold','period','evaluation_periods','unit','description'):
if getattr(alarm, attr) != module.params.get(attr):
changed = True
setattr(alarm, attr, module.params.get(attr))
#this is to deal with a current bug where you cannot assign '<=>' to the comparator when modifying an existing alarm
comparison = alarm.comparison
comparisons = {'<=' : 'LessThanOrEqualToThreshold', '<' : 'LessThanThreshold', '>=' : 'GreaterThanOrEqualToThreshold', '>' : 'GreaterThanThreshold'}
alarm.comparison = comparisons[comparison]
dim1 = module.params.get('dimensions')
dim2 = alarm.dimensions
for keys in dim1:
if not isinstance(dim1[keys], list):
dim1[keys] = [dim1[keys]]
if keys not in dim2 or dim1[keys] != dim2[keys]:
changed=True
setattr(alarm, 'dimensions', dim1)
for attr in ('alarm_actions','insufficient_data_actions','ok_actions'):
action = module.params.get(attr) or []
# Boto and/or ansible may provide same elements in lists but in different order.
# Compare on sets since they do not need any order.
if set(getattr(alarm, attr)) != set(action):
changed = True
setattr(alarm, attr, module.params.get(attr))
try:
if changed:
connection.create_alarm(alarm)
except BotoServerError as e:
module.fail_json(msg=str(e))
result = alarms[0]
module.exit_json(changed=changed, name=result.name,
actions_enabled=result.actions_enabled,
alarm_actions=result.alarm_actions,
alarm_arn=result.alarm_arn,
comparison=result.comparison,
description=result.description,
dimensions=result.dimensions,
evaluation_periods=result.evaluation_periods,
insufficient_data_actions=result.insufficient_data_actions,
last_updated=result.last_updated,
metric=result.metric,
namespace=result.namespace,
ok_actions=result.ok_actions,
period=result.period,
state_reason=result.state_reason,
state_value=result.state_value,
statistic=result.statistic,
threshold=result.threshold,
unit=result.unit)
def delete_metric_alarm(connection, module):
name = module.params.get('name')
alarms = connection.describe_alarms(alarm_names=[name])
if alarms:
try:
connection.delete_alarms([name])
module.exit_json(changed=True)
except BotoServerError as e:
module.fail_json(msg=str(e))
else:
module.exit_json(changed=False)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
metric=dict(type='str'),
namespace=dict(type='str'),
statistic=dict(type='str', choices=['SampleCount', 'Average', 'Sum', 'Minimum', 'Maximum']),
comparison=dict(type='str', choices=['<=', '<', '>', '>=']),
threshold=dict(type='float'),
period=dict(type='int'),
unit=dict(type='str', choices=['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes', 'Terabytes',
'Bits', 'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count', 'Bytes/Second', 'Kilobytes/Second',
'Megabytes/Second', 'Gigabytes/Second', 'Terabytes/Second', 'Bits/Second', 'Kilobits/Second', 'Megabits/Second',
'Gigabits/Second', 'Terabits/Second', 'Count/Second', 'None']),
evaluation_periods=dict(type='int'),
description=dict(type='str'),
dimensions=dict(type='dict', default={}),
alarm_actions=dict(type='list'),
insufficient_data_actions=dict(type='list'),
ok_actions=dict(type='list'),
state=dict(default='present', choices=['present', 'absent']),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
if state == 'present':
create_metric_alarm(connection, module)
elif state == 'absent':
delete_metric_alarm(connection, module)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 | 7,852,049,397,230,085,000 | 34.461774 | 156 | 0.601673 | false |
murraymeehan/marsyas | scripts/Python/batch.py | 7 | 1475 | import os
from glob import glob
inputDirectory = "../../../../Databases/taslp/";
outputDirectory = "../../../output3 ";
testCommand = " ";
#testCommand = " -q 1 ";
beginCommand = "../../bin/release/peakClustering ";
beginCommand = "..\\..\\bin\\release\\peakClustering.exe ";
endCommand = " -P -f -S 0 -r -k 2 -c 3 -N music -i 250_2500 -o "+outputDirectory;
execStyle=[
#hwps
"-T 1 -s 20 -t hoabfb ",
"-T 10 -s 20 -t hoabfb ",
"-T 1 -s 20 -t hoabfb -u ",
"-T 10 -s 20 -t hoabfb -u ",
#virtanen
"-T 1 -s 20 -t voabfb ",
"-T 10 -s 20 -t voabfb ",
"-T 1 -s 20 -t voabfb -u ",
"-T 10 -s 20 -t voabfb -u ",
#srinivasan criterion
"-T 1 -s 20 -t soabfb ",
"-T 10 -s 20 -t soabfb ",
"-T 1 -s 20 -t soabfb -u ",
"-T 10 -s 20 -t soabfb -u ",
# amplitude only
"-T 1 -s 20 -t abfb ",
"-T 1 -s 20 -t abfb -u ",
# harmonicity only
"-T 1 -s 20 -t ho ",
"-T 1 -s 20 -t ho -u ",
"-T 1 -s 20 -t vo ",
"-T 1 -s 20 -t vo -u ",
"-T 1 -s 20 -t so ",
"-T 1 -s 20 -t so -u ",
# srinivasan algo
" -s 1024 -npp -u -T 1 -t soabfb ",
"-s 1024 -npp -u -T 10 -t soabfb "];
for style in execStyle:
for name in glob(inputDirectory+"*V*.wav"):
command = beginCommand+style+testCommand+endCommand+name
print command
os.system(command)
| gpl-2.0 | -3,598,926,552,755,465,700 | 28.102041 | 81 | 0.472542 | false |
sathiamour/foursquared | util/oget.py | 262 | 3416 | #!/usr/bin/python
"""
Pull a oAuth protected page from foursquare.
Expects ~/.oget to contain (one on each line):
CONSUMER_KEY
CONSUMER_KEY_SECRET
USERNAME
PASSWORD
Don't forget to chmod 600 the file!
"""
import httplib
import os
import re
import sys
import urllib
import urllib2
import urlparse
import user
from xml.dom import pulldom
from xml.dom import minidom
import oauth
"""From: http://groups.google.com/group/foursquare-api/web/oauth
@consumer = OAuth::Consumer.new("consumer_token","consumer_secret", {
:site => "http://foursquare.com",
:scheme => :header,
:http_method => :post,
:request_token_path => "/oauth/request_token",
:access_token_path => "/oauth/access_token",
:authorize_path => "/oauth/authorize"
})
"""
SERVER = 'api.foursquare.com:80'
CONTENT_TYPE_HEADER = {'Content-Type' :'application/x-www-form-urlencoded'}
SIGNATURE_METHOD = oauth.OAuthSignatureMethod_HMAC_SHA1()
AUTHEXCHANGE_URL = 'http://api.foursquare.com/v1/authexchange'
def parse_auth_response(auth_response):
return (
re.search('<oauth_token>(.*)</oauth_token>', auth_response).groups()[0],
re.search('<oauth_token_secret>(.*)</oauth_token_secret>',
auth_response).groups()[0]
)
def create_signed_oauth_request(username, password, consumer):
oauth_request = oauth.OAuthRequest.from_consumer_and_token(
consumer, http_method='POST', http_url=AUTHEXCHANGE_URL,
parameters=dict(fs_username=username, fs_password=password))
oauth_request.sign_request(SIGNATURE_METHOD, consumer, None)
return oauth_request
def main():
url = urlparse.urlparse(sys.argv[1])
# Nevermind that the query can have repeated keys.
parameters = dict(urlparse.parse_qsl(url.query))
password_file = open(os.path.join(user.home, '.oget'))
lines = [line.strip() for line in password_file.readlines()]
if len(lines) == 4:
cons_key, cons_key_secret, username, password = lines
access_token = None
else:
cons_key, cons_key_secret, username, password, token, secret = lines
access_token = oauth.OAuthToken(token, secret)
consumer = oauth.OAuthConsumer(cons_key, cons_key_secret)
if not access_token:
oauth_request = create_signed_oauth_request(username, password, consumer)
connection = httplib.HTTPConnection(SERVER)
headers = {'Content-Type' :'application/x-www-form-urlencoded'}
connection.request(oauth_request.http_method, AUTHEXCHANGE_URL,
body=oauth_request.to_postdata(), headers=headers)
auth_response = connection.getresponse().read()
token = parse_auth_response(auth_response)
access_token = oauth.OAuthToken(*token)
open(os.path.join(user.home, '.oget'), 'w').write('\n'.join((
cons_key, cons_key_secret, username, password, token[0], token[1])))
oauth_request = oauth.OAuthRequest.from_consumer_and_token(consumer,
access_token, http_method='POST', http_url=url.geturl(),
parameters=parameters)
oauth_request.sign_request(SIGNATURE_METHOD, consumer, access_token)
connection = httplib.HTTPConnection(SERVER)
connection.request(oauth_request.http_method, oauth_request.to_url(),
body=oauth_request.to_postdata(), headers=CONTENT_TYPE_HEADER)
print connection.getresponse().read()
#print minidom.parse(connection.getresponse()).toprettyxml(indent=' ')
if __name__ == '__main__':
main()
| apache-2.0 | -4,553,869,164,026,361,000 | 29.774775 | 78 | 0.695843 | false |
dmarteau/QGIS | python/plugins/db_manager/db_plugins/gpkg/sql_dictionary.py | 71 | 1200 | # -*- coding: utf-8 -*-
"""
***************************************************************************
sql_dictionary.py
---------------------
Date : April 2012
Copyright : (C) 2012 by Giuseppe Sucameli
Email : brush dot tyler at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
def getSqlDictionary(spatial=True):
from ..spatialite.sql_dictionary import getSqlDictionary
return getSqlDictionary(spatial)
def getQueryBuilderDictionary():
from ..spatialite.sql_dictionary import getQueryBuilderDictionary
return getQueryBuilderDictionary()
| gpl-2.0 | 1,423,807,678,453,540,000 | 41.857143 | 75 | 0.431667 | false |
smourph/PGo-TrainerTools | pgoapi/protos/POGOProtos/Networking/Responses/GetIncensePokemonResponse_pb2.py | 12 | 6521 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Networking/Responses/GetIncensePokemonResponse.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from POGOProtos.Enums import PokemonId_pb2 as POGOProtos_dot_Enums_dot_PokemonId__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Networking/Responses/GetIncensePokemonResponse.proto',
package='POGOProtos.Networking.Responses',
syntax='proto3',
serialized_pb=_b('\n?POGOProtos/Networking/Responses/GetIncensePokemonResponse.proto\x12\x1fPOGOProtos.Networking.Responses\x1a POGOProtos/Enums/PokemonId.proto\"\x85\x03\n\x19GetIncensePokemonResponse\x12Q\n\x06result\x18\x01 \x01(\x0e\x32\x41.POGOProtos.Networking.Responses.GetIncensePokemonResponse.Result\x12/\n\npokemon_id\x18\x02 \x01(\x0e\x32\x1b.POGOProtos.Enums.PokemonId\x12\x10\n\x08latitude\x18\x03 \x01(\x01\x12\x11\n\tlongitude\x18\x04 \x01(\x01\x12\x1a\n\x12\x65ncounter_location\x18\x05 \x01(\t\x12\x14\n\x0c\x65ncounter_id\x18\x06 \x01(\x06\x12\x1e\n\x16\x64isappear_timestamp_ms\x18\x07 \x01(\x03\"m\n\x06Result\x12\x1d\n\x19INCENSE_ENCOUNTER_UNKNOWN\x10\x00\x12\x1f\n\x1bINCENSE_ENCOUNTER_AVAILABLE\x10\x01\x12#\n\x1fINCENSE_ENCOUNTER_NOT_AVAILABLE\x10\x02\x62\x06proto3')
,
dependencies=[POGOProtos_dot_Enums_dot_PokemonId__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_GETINCENSEPOKEMONRESPONSE_RESULT = _descriptor.EnumDescriptor(
name='Result',
full_name='POGOProtos.Networking.Responses.GetIncensePokemonResponse.Result',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='INCENSE_ENCOUNTER_UNKNOWN', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INCENSE_ENCOUNTER_AVAILABLE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INCENSE_ENCOUNTER_NOT_AVAILABLE', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=415,
serialized_end=524,
)
_sym_db.RegisterEnumDescriptor(_GETINCENSEPOKEMONRESPONSE_RESULT)
_GETINCENSEPOKEMONRESPONSE = _descriptor.Descriptor(
name='GetIncensePokemonResponse',
full_name='POGOProtos.Networking.Responses.GetIncensePokemonResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='POGOProtos.Networking.Responses.GetIncensePokemonResponse.result', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pokemon_id', full_name='POGOProtos.Networking.Responses.GetIncensePokemonResponse.pokemon_id', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='latitude', full_name='POGOProtos.Networking.Responses.GetIncensePokemonResponse.latitude', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='longitude', full_name='POGOProtos.Networking.Responses.GetIncensePokemonResponse.longitude', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='encounter_location', full_name='POGOProtos.Networking.Responses.GetIncensePokemonResponse.encounter_location', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='encounter_id', full_name='POGOProtos.Networking.Responses.GetIncensePokemonResponse.encounter_id', index=5,
number=6, type=6, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='disappear_timestamp_ms', full_name='POGOProtos.Networking.Responses.GetIncensePokemonResponse.disappear_timestamp_ms', index=6,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_GETINCENSEPOKEMONRESPONSE_RESULT,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=135,
serialized_end=524,
)
_GETINCENSEPOKEMONRESPONSE.fields_by_name['result'].enum_type = _GETINCENSEPOKEMONRESPONSE_RESULT
_GETINCENSEPOKEMONRESPONSE.fields_by_name['pokemon_id'].enum_type = POGOProtos_dot_Enums_dot_PokemonId__pb2._POKEMONID
_GETINCENSEPOKEMONRESPONSE_RESULT.containing_type = _GETINCENSEPOKEMONRESPONSE
DESCRIPTOR.message_types_by_name['GetIncensePokemonResponse'] = _GETINCENSEPOKEMONRESPONSE
GetIncensePokemonResponse = _reflection.GeneratedProtocolMessageType('GetIncensePokemonResponse', (_message.Message,), dict(
DESCRIPTOR = _GETINCENSEPOKEMONRESPONSE,
__module__ = 'POGOProtos.Networking.Responses.GetIncensePokemonResponse_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Networking.Responses.GetIncensePokemonResponse)
))
_sym_db.RegisterMessage(GetIncensePokemonResponse)
# @@protoc_insertion_point(module_scope)
| gpl-3.0 | 8,084,210,095,682,330,000 | 44.601399 | 794 | 0.750038 | false |
renegelinas/mi-instrument | mi/idk/platform/test/test_metadata.py | 11 | 2605 | #!/usr/bin/env python
"""
@package mi.idk.platform.test.test_metadata
@file mi.idk/platform/test/test_metadata.py
@author Bill French
@brief test metadata object
"""
__author__ = 'Bill French'
__license__ = 'Apache 2.0'
from os.path import basename, dirname
from os import makedirs
from os.path import exists
import sys
from nose.plugins.attrib import attr
from mock import Mock
import unittest
from mi.core.unit_test import MiUnitTest
from mi.core.log import get_logger ; log = get_logger()
from mi.idk.platform.metadata import Metadata
from mi.idk.exceptions import InvalidParameters
import os
BASE_DIR = "/tmp"
DRIVER_PATH = "test_driver/foo"
METADATA_DIR = "/tmp/mi/platform/driver/test_driver/foo"
METADATA_FILE = "metadata.yml"
@attr('UNIT', group='mi')
class TestMetadata(MiUnitTest):
"""
Test the metadata object
"""
def setUp(self):
"""
Setup the test case
"""
self.createMetadataFile()
def createMetadataFile(self):
"""
"""
self.addCleanup(self.removeMetadataFile)
if(not exists(METADATA_DIR)):
os.makedirs(METADATA_DIR)
md_file = open("%s/%s" % (METADATA_DIR, METADATA_FILE), 'w')
md_file.write("driver_metadata:\n")
md_file.write(" author: Bill French\n")
md_file.write(" driver_path: test_driver/foo\n")
md_file.write(" driver_name: test_driver_foo\n")
md_file.write(" email: [email protected]\n")
md_file.write(" release_notes: some note\n")
md_file.write(" version: 0.2.2\n")
md_file.close()
def removeMetadataFile(self):
filename = "%s/%s" % (METADATA_DIR, METADATA_FILE)
if(exists(filename)):
pass
#os.unlink(filename)
def test_constructor(self):
"""
Test object creation
"""
default_metadata = Metadata()
self.assertTrue(default_metadata)
specific_metadata = Metadata(DRIVER_PATH, BASE_DIR)
self.assertTrue(specific_metadata)
self.assertTrue(os.path.isfile(specific_metadata.metadata_path()), msg="file doesn't exist: %s" % specific_metadata.metadata_path())
self.assertEqual(specific_metadata.driver_path, "test_driver/foo")
self.assertEqual(specific_metadata.driver_name, "test_driver_foo")
self.assertEqual(specific_metadata.author, "Bill French")
self.assertEqual(specific_metadata.email, "[email protected]")
self.assertEqual(specific_metadata.notes, "some note")
self.assertEqual(specific_metadata.version, "0.2.2")
| bsd-2-clause | 9,126,583,008,312,784,000 | 27.626374 | 140 | 0.646449 | false |
pgoeser/gnuradio | gr-howto-write-a-block/apps/howto_square.py | 36 | 2164 | #!/usr/bin/env python
##################################################
# Gnuradio Python Flow Graph
# Title: Howto Square
# Generated: Thu Nov 12 11:26:07 2009
##################################################
import howto
from gnuradio import eng_notation
from gnuradio import gr
from gnuradio.eng_option import eng_option
from gnuradio.gr import firdes
from gnuradio.wxgui import scopesink2
from grc_gnuradio import wxgui as grc_wxgui
from optparse import OptionParser
import wx
class howto_square(grc_wxgui.top_block_gui):
def __init__(self):
grc_wxgui.top_block_gui.__init__(self, title="Howto Square")
##################################################
# Variables
##################################################
self.samp_rate = samp_rate = 10e3
##################################################
# Blocks
##################################################
self.sink = scopesink2.scope_sink_f(
self.GetWin(),
title="Input",
sample_rate=samp_rate,
v_scale=20,
v_offset=0,
t_scale=0.002,
ac_couple=False,
xy_mode=False,
num_inputs=1,
)
self.Add(self.sink.win)
self.sink2 = scopesink2.scope_sink_f(
self.GetWin(),
title="Output",
sample_rate=samp_rate,
v_scale=0,
v_offset=0,
t_scale=0.002,
ac_couple=False,
xy_mode=False,
num_inputs=1,
)
self.Add(self.sink2.win)
self.sqr = howto.square_ff()
self.src = gr.vector_source_f(([float(n)-50 for n in range(100)]), True, 1)
self.thr = gr.throttle(gr.sizeof_float*1, samp_rate)
##################################################
# Connections
##################################################
self.connect((self.thr, 0), (self.sqr, 0))
self.connect((self.src, 0), (self.thr, 0))
self.connect((self.thr, 0), (self.sink, 0))
self.connect((self.sqr, 0), (self.sink2, 0))
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.sink.set_sample_rate(self.samp_rate)
self.sink2.set_sample_rate(self.samp_rate)
if __name__ == '__main__':
parser = OptionParser(option_class=eng_option, usage="%prog: [options]")
(options, args) = parser.parse_args()
tb = howto_square()
tb.Run(True)
| gpl-3.0 | 3,557,765,258,694,196,000 | 27.103896 | 77 | 0.546673 | false |
EricMuller/mynotes-backend | requirements/twisted/Twisted-17.1.0/src/twisted/test/test_paths.py | 13 | 74412 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases covering L{twisted.python.filepath}.
"""
from __future__ import division, absolute_import
import os, time, pickle, errno, stat
from pprint import pformat
from twisted.python.compat import _PY3, unicode
from twisted.python.win32 import WindowsError, ERROR_DIRECTORY
from twisted.python import filepath
from twisted.python.runtime import platform
from twisted.trial.unittest import SkipTest, SynchronousTestCase as TestCase
from zope.interface.verify import verifyObject
if not platform._supportsSymlinks():
symlinkSkip = "Platform does not support symlinks"
else:
symlinkSkip = None
class BytesTestCase(TestCase):
"""
Override default method implementations to support byte paths.
"""
def mktemp(self):
"""
Return a temporary path, encoded as bytes.
"""
return TestCase.mktemp(self).encode("utf-8")
class AbstractFilePathTests(BytesTestCase):
"""
Tests for L{IFilePath} implementations.
"""
f1content = b"file 1"
f2content = b"file 2"
def _mkpath(self, *p):
x = os.path.abspath(os.path.join(self.cmn, *p))
self.all.append(x)
return x
def subdir(self, *dirname):
os.mkdir(self._mkpath(*dirname))
def subfile(self, *dirname):
return open(self._mkpath(*dirname), "wb")
def setUp(self):
self.now = time.time()
cmn = self.cmn = os.path.abspath(self.mktemp())
self.all = [cmn]
os.mkdir(cmn)
self.subdir(b"sub1")
with self.subfile(b"file1") as f:
f.write(self.f1content)
with self.subfile(b"sub1", b"file2") as f:
f.write(self.f2content)
self.subdir(b'sub3')
self.subfile(b"sub3", b"file3.ext1").close()
self.subfile(b"sub3", b"file3.ext2").close()
self.subfile(b"sub3", b"file3.ext3").close()
self.path = filepath.FilePath(cmn)
self.root = filepath.FilePath(b"/")
def test_verifyObject(self):
"""
Instances of the path type being tested provide L{IFilePath}.
"""
self.assertTrue(verifyObject(filepath.IFilePath, self.path))
def test_segmentsFromPositive(self):
"""
Verify that the segments between two paths are correctly identified.
"""
self.assertEqual(
self.path.child(b"a").child(b"b").child(b"c").segmentsFrom(self.path),
[b"a", b"b", b"c"])
def test_segmentsFromNegative(self):
"""
Verify that segmentsFrom notices when the ancestor isn't an ancestor.
"""
self.assertRaises(
ValueError,
self.path.child(b"a").child(b"b").child(b"c").segmentsFrom,
self.path.child(b"d").child(b"c").child(b"e"))
def test_walk(self):
"""
Verify that walking the path gives the same result as the known file
hierarchy.
"""
x = [foo.path for foo in self.path.walk()]
self.assertEqual(set(x), set(self.all))
def test_parents(self):
"""
L{FilePath.parents()} should return an iterator of every ancestor of
the L{FilePath} in question.
"""
L = []
pathobj = self.path.child(b"a").child(b"b").child(b"c")
fullpath = pathobj.path
lastpath = fullpath
thispath = os.path.dirname(fullpath)
while lastpath != self.root.path:
L.append(thispath)
lastpath = thispath
thispath = os.path.dirname(thispath)
self.assertEqual([x.path for x in pathobj.parents()], L)
def test_validSubdir(self):
"""
Verify that a valid subdirectory will show up as a directory, but not as a
file, not as a symlink, and be listable.
"""
sub1 = self.path.child(b'sub1')
self.assertTrue(sub1.exists(),
"This directory does exist.")
self.assertTrue(sub1.isdir(),
"It's a directory.")
self.assertFalse(sub1.isfile(),
"It's a directory.")
self.assertFalse(sub1.islink(),
"It's a directory.")
self.assertEqual(sub1.listdir(),
[b'file2'])
def test_invalidSubdir(self):
"""
Verify that a subdirectory that doesn't exist is reported as such.
"""
sub2 = self.path.child(b'sub2')
self.assertFalse(sub2.exists(),
"This directory does not exist.")
def test_validFiles(self):
"""
Make sure that we can read existent non-empty files.
"""
f1 = self.path.child(b'file1')
with f1.open() as f:
self.assertEqual(f.read(), self.f1content)
f2 = self.path.child(b'sub1').child(b'file2')
with f2.open() as f:
self.assertEqual(f.read(), self.f2content)
def test_multipleChildSegments(self):
"""
C{fp.descendant([a, b, c])} returns the same L{FilePath} as is returned
by C{fp.child(a).child(b).child(c)}.
"""
multiple = self.path.descendant([b'a', b'b', b'c'])
single = self.path.child(b'a').child(b'b').child(b'c')
self.assertEqual(multiple, single)
def test_dictionaryKeys(self):
"""
Verify that path instances are usable as dictionary keys.
"""
f1 = self.path.child(b'file1')
f1prime = self.path.child(b'file1')
f2 = self.path.child(b'file2')
dictoid = {}
dictoid[f1] = 3
dictoid[f1prime] = 4
self.assertEqual(dictoid[f1], 4)
self.assertEqual(list(dictoid.keys()), [f1])
self.assertIs(list(dictoid.keys())[0], f1)
self.assertIsNot(list(dictoid.keys())[0], f1prime) # sanity check
dictoid[f2] = 5
self.assertEqual(dictoid[f2], 5)
self.assertEqual(len(dictoid), 2)
def test_dictionaryKeyWithString(self):
"""
Verify that path instances are usable as dictionary keys which do not clash
with their string counterparts.
"""
f1 = self.path.child(b'file1')
dictoid = {f1: 'hello'}
dictoid[f1.path] = 'goodbye'
self.assertEqual(len(dictoid), 2)
def test_childrenNonexistentError(self):
"""
Verify that children raises the appropriate exception for non-existent
directories.
"""
self.assertRaises(filepath.UnlistableError,
self.path.child(b'not real').children)
def test_childrenNotDirectoryError(self):
"""
Verify that listdir raises the appropriate exception for attempting to list
a file rather than a directory.
"""
self.assertRaises(filepath.UnlistableError,
self.path.child(b'file1').children)
def test_newTimesAreFloats(self):
"""
Verify that all times returned from the various new time functions are ints
(and hopefully therefore 'high precision').
"""
for p in self.path, self.path.child(b'file1'):
self.assertEqual(type(p.getAccessTime()), float)
self.assertEqual(type(p.getModificationTime()), float)
self.assertEqual(type(p.getStatusChangeTime()), float)
def test_oldTimesAreInts(self):
"""
Verify that all times returned from the various time functions are
integers, for compatibility.
"""
for p in self.path, self.path.child(b'file1'):
self.assertEqual(type(p.getatime()), int)
self.assertEqual(type(p.getmtime()), int)
self.assertEqual(type(p.getctime()), int)
class FakeWindowsPath(filepath.FilePath):
"""
A test version of FilePath which overrides listdir to raise L{WindowsError}.
"""
def listdir(self):
"""
@raise WindowsError: always.
"""
if _PY3:
# For Python 3.3 and higher, WindowsError is an alias for OSError.
# The first argument to the OSError constructor is errno, and the fourth
# argument is winerror.
# For further details, refer to:
# https://docs.python.org/3/library/exceptions.html#OSError
#
# On Windows, if winerror is set in the constructor,
# the errno value in the constructor is ignored, and OSError internally
# maps the winerror value to an errno value.
raise WindowsError(
None,
"A directory's validness was called into question",
self.path,
ERROR_DIRECTORY)
else:
raise WindowsError(
ERROR_DIRECTORY,
"A directory's validness was called into question")
class ListingCompatibilityTests(BytesTestCase):
"""
These tests verify compatibility with legacy behavior of directory listing.
"""
def test_windowsErrorExcept(self):
"""
Verify that when a WindowsError is raised from listdir, catching
WindowsError works.
"""
fwp = FakeWindowsPath(self.mktemp())
self.assertRaises(filepath.UnlistableError, fwp.children)
self.assertRaises(WindowsError, fwp.children)
if not platform.isWindows():
test_windowsErrorExcept.skip = "Only relevant on on Windows."
def test_alwaysCatchOSError(self):
"""
Verify that in the normal case where a directory does not exist, we will
get an OSError.
"""
fp = filepath.FilePath(self.mktemp())
self.assertRaises(OSError, fp.children)
def test_keepOriginalAttributes(self):
"""
Verify that the Unlistable exception raised will preserve the attributes of
the previously-raised exception.
"""
fp = filepath.FilePath(self.mktemp())
ose = self.assertRaises(OSError, fp.children)
d1 = list(ose.__dict__.keys())
d1.remove('originalException')
d2 = list(ose.originalException.__dict__.keys())
d1.sort()
d2.sort()
self.assertEqual(d1, d2)
class ExplodingFile:
"""
A C{file}-alike which raises exceptions from its I/O methods and keeps track
of whether it has been closed.
@ivar closed: A C{bool} which is C{False} until C{close} is called, then it
is C{True}.
"""
closed = False
def read(self, n=0):
"""
@raise IOError: Always raised.
"""
raise IOError()
def write(self, what):
"""
@raise IOError: Always raised.
"""
raise IOError()
def close(self):
"""
Mark the file as having been closed.
"""
self.closed = True
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
class TrackingFilePath(filepath.FilePath):
"""
A subclass of L{filepath.FilePath} which maintains a list of all other paths
created by clonePath.
@ivar trackingList: A list of all paths created by this path via
C{clonePath} (which also includes paths created by methods like
C{parent}, C{sibling}, C{child}, etc (and all paths subsequently created
by those paths, etc).
@type trackingList: C{list} of L{TrackingFilePath}
@ivar openedFiles: A list of all file objects opened by this
L{TrackingFilePath} or any other L{TrackingFilePath} in C{trackingList}.
@type openedFiles: C{list} of C{file}
"""
def __init__(self, path, alwaysCreate=False, trackingList=None):
filepath.FilePath.__init__(self, path, alwaysCreate)
if trackingList is None:
trackingList = []
self.trackingList = trackingList
self.openedFiles = []
def open(self, *a, **k):
"""
Override 'open' to track all files opened by this path.
"""
f = filepath.FilePath.open(self, *a, **k)
self.openedFiles.append(f)
return f
def openedPaths(self):
"""
Return a list of all L{TrackingFilePath}s associated with this
L{TrackingFilePath} that have had their C{open()} method called.
"""
return [path for path in self.trackingList if path.openedFiles]
def clonePath(self, name):
"""
Override L{filepath.FilePath.clonePath} to give the new path a reference
to the same tracking list.
"""
clone = TrackingFilePath(name, trackingList=self.trackingList)
self.trackingList.append(clone)
return clone
class ExplodingFilePath(filepath.FilePath):
"""
A specialized L{FilePath} which always returns an instance of
L{ExplodingFile} from its C{open} method.
@ivar fp: The L{ExplodingFile} instance most recently returned from the
C{open} method.
"""
def __init__(self, pathName, originalExploder=None):
"""
Initialize an L{ExplodingFilePath} with a name and a reference to the
@param pathName: The path name as passed to L{filepath.FilePath}.
@type pathName: C{str}
@param originalExploder: The L{ExplodingFilePath} to associate opened
files with.
@type originalExploder: L{ExplodingFilePath}
"""
filepath.FilePath.__init__(self, pathName)
if originalExploder is None:
originalExploder = self
self._originalExploder = originalExploder
def open(self, mode=None):
"""
Create, save, and return a new C{ExplodingFile}.
@param mode: Present for signature compatibility. Ignored.
@return: A new C{ExplodingFile}.
"""
f = self._originalExploder.fp = ExplodingFile()
return f
def clonePath(self, name):
return ExplodingFilePath(name, self._originalExploder)
class PermissionsTests(BytesTestCase):
"""
Test Permissions and RWX classes
"""
def assertNotUnequal(self, first, second, msg=None):
"""
Tests that C{first} != C{second} is false. This method tests the
__ne__ method, as opposed to L{assertEqual} (C{first} == C{second}),
which tests the __eq__ method.
Note: this should really be part of trial
"""
if first != second:
if msg is None:
msg = '';
if len(msg) > 0:
msg += '\n'
raise self.failureException(
'%snot not unequal (__ne__ not implemented correctly):'
'\na = %s\nb = %s\n'
% (msg, pformat(first), pformat(second)))
return first
def test_rwxFromBools(self):
"""
L{RWX}'s constructor takes a set of booleans
"""
for r in (True, False):
for w in (True, False):
for x in (True, False):
rwx = filepath.RWX(r, w, x)
self.assertEqual(rwx.read, r)
self.assertEqual(rwx.write, w)
self.assertEqual(rwx.execute, x)
rwx = filepath.RWX(True, True, True)
self.assertTrue(rwx.read and rwx.write and rwx.execute)
def test_rwxEqNe(self):
"""
L{RWX}'s created with the same booleans are equivalent. If booleans
are different, they are not equal.
"""
for r in (True, False):
for w in (True, False):
for x in (True, False):
self.assertEqual(filepath.RWX(r, w, x),
filepath.RWX(r, w, x))
self.assertNotUnequal(filepath.RWX(r, w, x),
filepath.RWX(r, w, x))
self.assertNotEqual(filepath.RWX(True, True, True),
filepath.RWX(True, True, False))
self.assertNotEqual(3, filepath.RWX(True, True, True))
def test_rwxShorthand(self):
"""
L{RWX}'s shorthand string should be 'rwx' if read, write, and execute
permission bits are true. If any of those permissions bits are false,
the character is replaced by a '-'.
"""
def getChar(val, letter):
if val:
return letter
return '-'
for r in (True, False):
for w in (True, False):
for x in (True, False):
rwx = filepath.RWX(r, w, x)
self.assertEqual(rwx.shorthand(),
getChar(r, 'r') +
getChar(w, 'w') +
getChar(x, 'x'))
self.assertEqual(filepath.RWX(True, False, True).shorthand(), "r-x")
def test_permissionsFromStat(self):
"""
L{Permissions}'s constructor takes a valid permissions bitmask and
parsaes it to produce the correct set of boolean permissions.
"""
def _rwxFromStat(statModeInt, who):
def getPermissionBit(what, who):
return (statModeInt &
getattr(stat, "S_I%s%s" % (what, who))) > 0
return filepath.RWX(*[getPermissionBit(what, who) for what in
('R', 'W', 'X')])
for u in range(0, 8):
for g in range(0, 8):
for o in range(0, 8):
chmodString = "%d%d%d" % (u, g, o)
chmodVal = int(chmodString, 8)
perm = filepath.Permissions(chmodVal)
self.assertEqual(perm.user,
_rwxFromStat(chmodVal, "USR"),
"%s: got user: %s" %
(chmodString, perm.user))
self.assertEqual(perm.group,
_rwxFromStat(chmodVal, "GRP"),
"%s: got group: %s" %
(chmodString, perm.group))
self.assertEqual(perm.other,
_rwxFromStat(chmodVal, "OTH"),
"%s: got other: %s" %
(chmodString, perm.other))
perm = filepath.Permissions(0o777)
for who in ("user", "group", "other"):
for what in ("read", "write", "execute"):
self.assertTrue(getattr(getattr(perm, who), what))
def test_permissionsEq(self):
"""
Two L{Permissions}'s that are created with the same bitmask
are equivalent
"""
self.assertEqual(filepath.Permissions(0o777),
filepath.Permissions(0o777))
self.assertNotUnequal(filepath.Permissions(0o777),
filepath.Permissions(0o777))
self.assertNotEqual(filepath.Permissions(0o777),
filepath.Permissions(0o700))
self.assertNotEqual(3, filepath.Permissions(0o777))
def test_permissionsShorthand(self):
"""
L{Permissions}'s shorthand string is the RWX shorthand string for its
user permission bits, group permission bits, and other permission bits
concatenated together, without a space.
"""
for u in range(0, 8):
for g in range(0, 8):
for o in range(0, 8):
perm = filepath.Permissions(int("0o%d%d%d" % (u, g, o), 8))
self.assertEqual(perm.shorthand(),
''.join(x.shorthand() for x in (
perm.user, perm.group, perm.other)))
self.assertEqual(filepath.Permissions(0o770).shorthand(), "rwxrwx---")
class FilePathTests(AbstractFilePathTests):
"""
Test various L{FilePath} path manipulations.
In particular, note that tests defined on this class instead of on the base
class are only run against L{twisted.python.filepath}.
"""
def test_chmod(self):
"""
L{FilePath.chmod} modifies the permissions of
the passed file as expected (using C{os.stat} to check). We use some
basic modes that should work everywhere (even on Windows).
"""
for mode in (0o555, 0o777):
self.path.child(b"sub1").chmod(mode)
self.assertEqual(
stat.S_IMODE(os.stat(self.path.child(b"sub1").path).st_mode),
mode)
def symlink(self, target, name):
"""
Create a symbolic link named C{name} pointing at C{target}.
@type target: C{str}
@type name: C{str}
@raise SkipTest: raised if symbolic links are not supported on the
host platform.
"""
if symlinkSkip:
raise SkipTest(symlinkSkip)
os.symlink(target, name)
def createLinks(self):
"""
Create several symbolic links to files and directories.
"""
subdir = self.path.child(b"sub1")
self.symlink(subdir.path, self._mkpath(b"sub1.link"))
self.symlink(subdir.child(b"file2").path, self._mkpath(b"file2.link"))
self.symlink(subdir.child(b"file2").path,
self._mkpath(b"sub1", b"sub1.file2.link"))
def test_realpathSymlink(self):
"""
L{FilePath.realpath} returns the path of the ultimate target of a
symlink.
"""
self.createLinks()
self.symlink(self.path.child(b"file2.link").path,
self.path.child(b"link.link").path)
self.assertEqual(self.path.child(b"link.link").realpath(),
self.path.child(b"sub1").child(b"file2"))
def test_realpathCyclicalSymlink(self):
"""
L{FilePath.realpath} raises L{filepath.LinkError} if the path is a
symbolic link which is part of a cycle.
"""
self.symlink(self.path.child(b"link1").path, self.path.child(b"link2").path)
self.symlink(self.path.child(b"link2").path, self.path.child(b"link1").path)
self.assertRaises(filepath.LinkError,
self.path.child(b"link2").realpath)
def test_realpathNoSymlink(self):
"""
L{FilePath.realpath} returns the path itself if the path is not a
symbolic link.
"""
self.assertEqual(self.path.child(b"sub1").realpath(),
self.path.child(b"sub1"))
def test_walkCyclicalSymlink(self):
"""
Verify that walking a path with a cyclical symlink raises an error
"""
self.createLinks()
self.symlink(self.path.child(b"sub1").path,
self.path.child(b"sub1").child(b"sub1.loopylink").path)
def iterateOverPath():
return [foo.path for foo in self.path.walk()]
self.assertRaises(filepath.LinkError, iterateOverPath)
def test_walkObeysDescendWithCyclicalSymlinks(self):
"""
Verify that, after making a path with cyclical symlinks, when the
supplied C{descend} predicate returns C{False}, the target is not
traversed, as if it was a simple symlink.
"""
self.createLinks()
# we create cyclical symlinks
self.symlink(self.path.child(b"sub1").path,
self.path.child(b"sub1").child(b"sub1.loopylink").path)
def noSymLinks(path):
return not path.islink()
def iterateOverPath():
return [foo.path for foo in self.path.walk(descend=noSymLinks)]
self.assertTrue(iterateOverPath())
def test_walkObeysDescend(self):
"""
Verify that when the supplied C{descend} predicate returns C{False},
the target is not traversed.
"""
self.createLinks()
def noSymLinks(path):
return not path.islink()
x = [foo.path for foo in self.path.walk(descend=noSymLinks)]
self.assertEqual(set(x), set(self.all))
def test_getAndSet(self):
content = b'newcontent'
self.path.child(b'new').setContent(content)
newcontent = self.path.child(b'new').getContent()
self.assertEqual(content, newcontent)
content = b'content'
self.path.child(b'new').setContent(content, b'.tmp')
newcontent = self.path.child(b'new').getContent()
self.assertEqual(content, newcontent)
def test_getContentFileClosing(self):
"""
If reading from the underlying file raises an exception,
L{FilePath.getContent} raises that exception after closing the file.
"""
fp = ExplodingFilePath(b"")
self.assertRaises(IOError, fp.getContent)
self.assertTrue(fp.fp.closed)
def test_symbolicLink(self):
"""
Verify the behavior of the C{isLink} method against links and
non-links. Also check that the symbolic link shares the directory
property with its target.
"""
s4 = self.path.child(b"sub4")
s3 = self.path.child(b"sub3")
self.symlink(s3.path, s4.path)
self.assertTrue(s4.islink())
self.assertFalse(s3.islink())
self.assertTrue(s4.isdir())
self.assertTrue(s3.isdir())
def test_linkTo(self):
"""
Verify that symlink creates a valid symlink that is both a link and a
file if its target is a file, or a directory if its target is a
directory.
"""
targetLinks = [
(self.path.child(b"sub2"), self.path.child(b"sub2.link")),
(self.path.child(b"sub2").child(b"file3.ext1"),
self.path.child(b"file3.ext1.link"))
]
for target, link in targetLinks:
target.linkTo(link)
self.assertTrue(link.islink(), "This is a link")
self.assertEqual(target.isdir(), link.isdir())
self.assertEqual(target.isfile(), link.isfile())
def test_linkToErrors(self):
"""
Verify C{linkTo} fails in the following case:
- the target is in a directory that doesn't exist
- the target already exists
"""
self.assertRaises(OSError, self.path.child(b"file1").linkTo,
self.path.child(b'nosub').child(b'file1'))
self.assertRaises(OSError, self.path.child(b"file1").linkTo,
self.path.child(b'sub1').child(b'file2'))
if symlinkSkip:
test_symbolicLink.skip = symlinkSkip
test_linkTo.skip = symlinkSkip
test_linkToErrors.skip = symlinkSkip
def testMultiExt(self):
f3 = self.path.child(b'sub3').child(b'file3')
exts = b'.foo', b'.bar', b'ext1', b'ext2', b'ext3'
self.assertFalse(f3.siblingExtensionSearch(*exts))
f3e = f3.siblingExtension(b".foo")
f3e.touch()
self.assertFalse(not f3.siblingExtensionSearch(*exts).exists())
self.assertFalse(not f3.siblingExtensionSearch(b'*').exists())
f3e.remove()
self.assertFalse(f3.siblingExtensionSearch(*exts))
def testPreauthChild(self):
fp = filepath.FilePath(b'.')
fp.preauthChild(b'foo/bar')
self.assertRaises(filepath.InsecurePath, fp.child, u'/mon\u20acy')
def testStatCache(self):
p = self.path.child(b'stattest')
p.touch()
self.assertEqual(p.getsize(), 0)
self.assertEqual(abs(p.getmtime() - time.time()) // 20, 0)
self.assertEqual(abs(p.getctime() - time.time()) // 20, 0)
self.assertEqual(abs(p.getatime() - time.time()) // 20, 0)
self.assertTrue(p.exists())
self.assertTrue(p.exists())
# OOB removal: FilePath.remove() will automatically restat
os.remove(p.path)
# test caching
self.assertTrue(p.exists())
p.restat(reraise=False)
self.assertFalse(p.exists())
self.assertFalse(p.islink())
self.assertFalse(p.isdir())
self.assertFalse(p.isfile())
def testPersist(self):
newpath = pickle.loads(pickle.dumps(self.path))
self.assertEqual(self.path.__class__, newpath.__class__)
self.assertEqual(self.path.path, newpath.path)
def testInsecureUNIX(self):
self.assertRaises(filepath.InsecurePath, self.path.child, b"..")
self.assertRaises(filepath.InsecurePath, self.path.child, b"/etc")
self.assertRaises(filepath.InsecurePath, self.path.child, b"../..")
def testInsecureWin32(self):
self.assertRaises(filepath.InsecurePath, self.path.child, b"..\\..")
self.assertRaises(filepath.InsecurePath, self.path.child, b"C:randomfile")
if platform.getType() != 'win32':
testInsecureWin32.skip = "Test will run only on Windows."
def testInsecureWin32Whacky(self):
"""
Windows has 'special' filenames like NUL and CON and COM1 and LPR
and PRN and ... god knows what else. They can be located anywhere in
the filesystem. For obvious reasons, we do not wish to normally permit
access to these.
"""
self.assertRaises(filepath.InsecurePath, self.path.child, b"CON")
self.assertRaises(filepath.InsecurePath, self.path.child, b"C:CON")
self.assertRaises(filepath.InsecurePath, self.path.child, r"C:\CON")
if platform.getType() != 'win32':
testInsecureWin32Whacky.skip = "Test will run only on Windows."
def testComparison(self):
self.assertEqual(filepath.FilePath(b'a'),
filepath.FilePath(b'a'))
self.assertTrue(filepath.FilePath(b'z') >
filepath.FilePath(b'a'))
self.assertTrue(filepath.FilePath(b'z') >=
filepath.FilePath(b'a'))
self.assertTrue(filepath.FilePath(b'a') >=
filepath.FilePath(b'a'))
self.assertTrue(filepath.FilePath(b'a') <=
filepath.FilePath(b'a'))
self.assertTrue(filepath.FilePath(b'a') <
filepath.FilePath(b'z'))
self.assertTrue(filepath.FilePath(b'a') <=
filepath.FilePath(b'z'))
self.assertTrue(filepath.FilePath(b'a') !=
filepath.FilePath(b'z'))
self.assertTrue(filepath.FilePath(b'z') !=
filepath.FilePath(b'a'))
self.assertFalse(filepath.FilePath(b'z') !=
filepath.FilePath(b'z'))
def test_descendantOnly(self):
"""
If C{".."} is in the sequence passed to L{FilePath.descendant},
L{InsecurePath} is raised.
"""
self.assertRaises(
filepath.InsecurePath,
self.path.descendant, [u'mon\u20acy', u'..'])
def testSibling(self):
p = self.path.child(b'sibling_start')
ts = p.sibling(b'sibling_test')
self.assertEqual(ts.dirname(), p.dirname())
self.assertEqual(ts.basename(), b'sibling_test')
ts.createDirectory()
self.assertIn(ts, self.path.children())
def testTemporarySibling(self):
ts = self.path.temporarySibling()
self.assertEqual(ts.dirname(), self.path.dirname())
self.assertNotIn(ts.basename(), self.path.listdir())
ts.createDirectory()
self.assertIn(ts, self.path.parent().children())
def test_temporarySiblingExtension(self):
"""
If L{FilePath.temporarySibling} is given an extension argument, it will
produce path objects with that extension appended to their names.
"""
testExtension = b".test-extension"
ts = self.path.temporarySibling(testExtension)
self.assertTrue(ts.basename().endswith(testExtension),
"%s does not end with %s" % (
ts.basename(), testExtension))
def test_removeDirectory(self):
"""
L{FilePath.remove} on a L{FilePath} that refers to a directory will
recursively delete its contents.
"""
self.path.remove()
self.assertFalse(self.path.exists())
def test_removeWithSymlink(self):
"""
For a path which is a symbolic link, L{FilePath.remove} just deletes
the link, not the target.
"""
link = self.path.child(b"sub1.link")
# setUp creates the sub1 child
self.symlink(self.path.child(b"sub1").path, link.path)
link.remove()
self.assertFalse(link.exists())
self.assertTrue(self.path.child(b"sub1").exists())
def test_copyToDirectory(self):
"""
L{FilePath.copyTo} makes a copy of all the contents of the directory
named by that L{FilePath} if it is able to do so.
"""
oldPaths = list(self.path.walk()) # Record initial state
fp = filepath.FilePath(self.mktemp())
self.path.copyTo(fp)
self.path.remove()
fp.copyTo(self.path)
newPaths = list(self.path.walk()) # Record double-copy state
newPaths.sort()
oldPaths.sort()
self.assertEqual(newPaths, oldPaths)
def test_copyToMissingDestFileClosing(self):
"""
If an exception is raised while L{FilePath.copyTo} is trying to open
source file to read from, the destination file is closed and the
exception is raised to the caller of L{FilePath.copyTo}.
"""
nosuch = self.path.child(b"nothere")
# Make it look like something to copy, even though it doesn't exist.
# This could happen if the file is deleted between the isfile check and
# the file actually being opened.
nosuch.isfile = lambda: True
# We won't get as far as writing to this file, but it's still useful for
# tracking whether we closed it.
destination = ExplodingFilePath(self.mktemp())
self.assertRaises(IOError, nosuch.copyTo, destination)
self.assertTrue(destination.fp.closed)
def test_copyToFileClosing(self):
"""
If an exception is raised while L{FilePath.copyTo} is copying bytes
between two regular files, the source and destination files are closed
and the exception propagates to the caller of L{FilePath.copyTo}.
"""
destination = ExplodingFilePath(self.mktemp())
source = ExplodingFilePath(__file__)
self.assertRaises(IOError, source.copyTo, destination)
self.assertTrue(source.fp.closed)
self.assertTrue(destination.fp.closed)
def test_copyToDirectoryItself(self):
"""
L{FilePath.copyTo} fails with an OSError or IOError (depending on
platform, as it propagates errors from open() and write()) when
attempting to copy a directory to a child of itself.
"""
self.assertRaises((OSError, IOError),
self.path.copyTo, self.path.child(b'file1'))
def test_copyToWithSymlink(self):
"""
Verify that copying with followLinks=True copies symlink targets
instead of symlinks
"""
self.symlink(self.path.child(b"sub1").path,
self.path.child(b"link1").path)
fp = filepath.FilePath(self.mktemp())
self.path.copyTo(fp)
self.assertFalse(fp.child(b"link1").islink())
self.assertEqual([x.basename() for x in fp.child(b"sub1").children()],
[x.basename() for x in fp.child(b"link1").children()])
def test_copyToWithoutSymlink(self):
"""
Verify that copying with followLinks=False copies symlinks as symlinks
"""
self.symlink(b"sub1", self.path.child(b"link1").path)
fp = filepath.FilePath(self.mktemp())
self.path.copyTo(fp, followLinks=False)
self.assertTrue(fp.child(b"link1").islink())
self.assertEqual(os.readlink(self.path.child(b"link1").path),
os.readlink(fp.child(b"link1").path))
def test_copyToMissingSource(self):
"""
If the source path is missing, L{FilePath.copyTo} raises L{OSError}.
"""
path = filepath.FilePath(self.mktemp())
exc = self.assertRaises(OSError, path.copyTo, b'some other path')
self.assertEqual(exc.errno, errno.ENOENT)
def test_moveTo(self):
"""
Verify that moving an entire directory results into another directory
with the same content.
"""
oldPaths = list(self.path.walk()) # Record initial state
fp = filepath.FilePath(self.mktemp())
self.path.moveTo(fp)
fp.moveTo(self.path)
newPaths = list(self.path.walk()) # Record double-move state
newPaths.sort()
oldPaths.sort()
self.assertEqual(newPaths, oldPaths)
def test_moveToExistsCache(self):
"""
A L{FilePath} that has been moved aside with L{FilePath.moveTo} no
longer registers as existing. Its previously non-existent target
exists, though, as it was created by the call to C{moveTo}.
"""
fp = filepath.FilePath(self.mktemp())
fp2 = filepath.FilePath(self.mktemp())
fp.touch()
# Both a sanity check (make sure the file status looks right) and an
# enticement for stat-caching logic to kick in and remember that these
# exist / don't exist.
self.assertTrue(fp.exists())
self.assertFalse(fp2.exists())
fp.moveTo(fp2)
self.assertFalse(fp.exists())
self.assertTrue(fp2.exists())
def test_moveToExistsCacheCrossMount(self):
"""
The assertion of test_moveToExistsCache should hold in the case of a
cross-mount move.
"""
self.setUpFaultyRename()
self.test_moveToExistsCache()
def test_moveToSizeCache(self, hook=lambda : None):
"""
L{FilePath.moveTo} clears its destination's status cache, such that
calls to L{FilePath.getsize} after the call to C{moveTo} will report the
new size, not the old one.
This is a separate test from C{test_moveToExistsCache} because it is
intended to cover the fact that the destination's cache is dropped;
test_moveToExistsCache doesn't cover this case because (currently) a
file that doesn't exist yet does not cache the fact of its non-
existence.
"""
fp = filepath.FilePath(self.mktemp())
fp2 = filepath.FilePath(self.mktemp())
fp.setContent(b"1234")
fp2.setContent(b"1234567890")
hook()
# Sanity check / kick off caching.
self.assertEqual(fp.getsize(), 4)
self.assertEqual(fp2.getsize(), 10)
# Actually attempting to replace a file on Windows would fail with
# ERROR_ALREADY_EXISTS, but we don't need to test that, just the cached
# metadata, so, delete the file ...
os.remove(fp2.path)
# ... but don't clear the status cache, as fp2.remove() would.
self.assertEqual(fp2.getsize(), 10)
fp.moveTo(fp2)
self.assertEqual(fp2.getsize(), 4)
def test_moveToSizeCacheCrossMount(self):
"""
The assertion of test_moveToSizeCache should hold in the case of a
cross-mount move.
"""
self.test_moveToSizeCache(hook=self.setUpFaultyRename)
def test_moveToError(self):
"""
Verify error behavior of moveTo: it should raises one of OSError or
IOError if you want to move a path into one of its child. It's simply
the error raised by the underlying rename system call.
"""
self.assertRaises((OSError, IOError), self.path.moveTo, self.path.child(b'file1'))
def setUpFaultyRename(self):
"""
Set up a C{os.rename} that will fail with L{errno.EXDEV} on first call.
This is used to simulate a cross-device rename failure.
@return: a list of pair (src, dest) of calls to C{os.rename}
@rtype: C{list} of C{tuple}
"""
invokedWith = []
def faultyRename(src, dest):
invokedWith.append((src, dest))
if len(invokedWith) == 1:
raise OSError(errno.EXDEV, 'Test-induced failure simulating '
'cross-device rename failure')
return originalRename(src, dest)
originalRename = os.rename
self.patch(os, "rename", faultyRename)
return invokedWith
def test_crossMountMoveTo(self):
"""
C{moveTo} should be able to handle C{EXDEV} error raised by
C{os.rename} when trying to move a file on a different mounted
filesystem.
"""
invokedWith = self.setUpFaultyRename()
# Bit of a whitebox test - force os.rename, which moveTo tries
# before falling back to a slower method, to fail, forcing moveTo to
# use the slower behavior.
self.test_moveTo()
# A bit of a sanity check for this whitebox test - if our rename
# was never invoked, the test has probably fallen into disrepair!
self.assertTrue(invokedWith)
def test_crossMountMoveToWithSymlink(self):
"""
By default, when moving a symlink, it should follow the link and
actually copy the content of the linked node.
"""
invokedWith = self.setUpFaultyRename()
f2 = self.path.child(b'file2')
f3 = self.path.child(b'file3')
self.symlink(self.path.child(b'file1').path, f2.path)
f2.moveTo(f3)
self.assertFalse(f3.islink())
self.assertEqual(f3.getContent(), b'file 1')
self.assertTrue(invokedWith)
def test_crossMountMoveToWithoutSymlink(self):
"""
Verify that moveTo called with followLinks=False actually create
another symlink.
"""
invokedWith = self.setUpFaultyRename()
f2 = self.path.child(b'file2')
f3 = self.path.child(b'file3')
self.symlink(self.path.child(b'file1').path, f2.path)
f2.moveTo(f3, followLinks=False)
self.assertTrue(f3.islink())
self.assertEqual(f3.getContent(), b'file 1')
self.assertTrue(invokedWith)
def test_createBinaryMode(self):
"""
L{FilePath.create} should always open (and write to) files in binary
mode; line-feed octets should be unmodified.
(While this test should pass on all platforms, it is only really
interesting on platforms which have the concept of binary mode, i.e.
Windows platforms.)
"""
path = filepath.FilePath(self.mktemp())
with path.create() as f:
self.assertIn("b", f.mode)
f.write(b"\n")
with open(path.path, "rb") as fp:
read = fp.read()
self.assertEqual(read, b"\n")
def testOpen(self):
# Opening a file for reading when it does not already exist is an error
nonexistent = self.path.child(b'nonexistent')
e = self.assertRaises(IOError, nonexistent.open)
self.assertEqual(e.errno, errno.ENOENT)
# Opening a file for writing when it does not exist is okay
writer = self.path.child(b'writer')
with writer.open('w') as f:
f.write(b'abc\ndef')
# Make sure those bytes ended up there - and test opening a file for
# reading when it does exist at the same time
with writer.open() as f:
self.assertEqual(f.read(), b'abc\ndef')
# Re-opening that file in write mode should erase whatever was there.
writer.open('w').close()
with writer.open() as f:
self.assertEqual(f.read(), b'')
# Put some bytes in a file so we can test that appending does not
# destroy them.
appender = self.path.child(b'appender')
with appender.open('w') as f:
f.write(b'abc')
with appender.open('a') as f:
f.write(b'def')
with appender.open('r') as f:
self.assertEqual(f.read(), b'abcdef')
# read/write should let us do both without erasing those bytes
with appender.open('r+') as f:
self.assertEqual(f.read(), b'abcdef')
# ANSI C *requires* an fseek or an fgetpos between an fread and an
# fwrite or an fwrite and an fread. We can't reliably get Python to
# invoke fgetpos, so we seek to a 0 byte offset from the current
# position instead. Also, Python sucks for making this seek
# relative to 1 instead of a symbolic constant representing the
# current file position.
f.seek(0, 1)
# Put in some new bytes for us to test for later.
f.write(b'ghi')
# Make sure those new bytes really showed up
with appender.open('r') as f:
self.assertEqual(f.read(), b'abcdefghi')
# write/read should let us do both, but erase anything that's there
# already.
with appender.open('w+') as f:
self.assertEqual(f.read(), b'')
f.seek(0, 1) # Don't forget this!
f.write(b'123')
# super append mode should let us read and write and also position the
# cursor at the end of the file, without erasing everything.
with appender.open('a+') as f:
# The order of these lines may seem surprising, but it is
# necessary. The cursor is not at the end of the file until after
# the first write.
f.write(b'456')
f.seek(0, 1) # Asinine.
self.assertEqual(f.read(), b'')
f.seek(0, 0)
self.assertEqual(f.read(), b'123456')
# Opening a file exclusively must fail if that file exists already.
nonexistent.requireCreate(True)
nonexistent.open('w').close()
existent = nonexistent
del nonexistent
self.assertRaises((OSError, IOError), existent.open)
def test_openWithExplicitBinaryMode(self):
"""
Due to a bug in Python 2.7 on Windows including multiple 'b'
characters in the mode passed to the built-in open() will cause an
error. FilePath.open() ensures that only a single 'b' character is
included in the mode passed to the built-in open().
See http://bugs.python.org/issue7686 for details about the bug.
"""
writer = self.path.child(b'explicit-binary')
with writer.open('wb') as file:
file.write(b'abc\ndef')
self.assertTrue(writer.exists)
def test_openWithRedundantExplicitBinaryModes(self):
"""
Due to a bug in Python 2.7 on Windows including multiple 'b'
characters in the mode passed to the built-in open() will cause an
error. No matter how many 'b' modes are specified, FilePath.open()
ensures that only a single 'b' character is included in the mode
passed to the built-in open().
See http://bugs.python.org/issue7686 for details about the bug.
"""
writer = self.path.child(b'multiple-binary')
with writer.open('wbb') as file:
file.write(b'abc\ndef')
self.assertTrue(writer.exists)
def test_existsCache(self):
"""
Check that C{filepath.FilePath.exists} correctly restat the object if
an operation has occurred in the mean time.
"""
fp = filepath.FilePath(self.mktemp())
self.assertFalse(fp.exists())
fp.makedirs()
self.assertTrue(fp.exists())
def test_makedirsMakesDirectoriesRecursively(self):
"""
C{FilePath.makedirs} creates a directory at C{path}}, including
recursively creating all parent directories leading up to the path.
"""
fp = filepath.FilePath(os.path.join(
self.mktemp(), b"foo", b"bar", b"baz"))
self.assertFalse(fp.exists())
fp.makedirs()
self.assertTrue(fp.exists())
self.assertTrue(fp.isdir())
def test_makedirsMakesDirectoriesWithIgnoreExistingDirectory(self):
"""
Calling C{FilePath.makedirs} with C{ignoreExistingDirectory} set to
C{True} has no effect if directory does not exist.
"""
fp = filepath.FilePath(self.mktemp())
self.assertFalse(fp.exists())
fp.makedirs(ignoreExistingDirectory=True)
self.assertTrue(fp.exists())
self.assertTrue(fp.isdir())
def test_makedirsThrowsWithExistentDirectory(self):
"""
C{FilePath.makedirs} throws an C{OSError} exception
when called on a directory that already exists.
"""
fp = filepath.FilePath(os.path.join(self.mktemp()))
fp.makedirs()
exception = self.assertRaises(OSError, fp.makedirs)
self.assertEqual(exception.errno, errno.EEXIST)
def test_makedirsAcceptsIgnoreExistingDirectory(self):
"""
C{FilePath.makedirs} succeeds when called on a directory that already
exists and the c{ignoreExistingDirectory} argument is set to C{True}.
"""
fp = filepath.FilePath(self.mktemp())
fp.makedirs()
self.assertTrue(fp.exists())
fp.makedirs(ignoreExistingDirectory=True)
self.assertTrue(fp.exists())
def test_makedirsIgnoreExistingDirectoryExistAlreadyAFile(self):
"""
When C{FilePath.makedirs} is called with C{ignoreExistingDirectory} set
to C{True} it throws an C{OSError} exceptions if path is a file.
"""
fp = filepath.FilePath(self.mktemp())
fp.create()
self.assertTrue(fp.isfile())
exception = self.assertRaises(
OSError, fp.makedirs, ignoreExistingDirectory=True)
self.assertEqual(exception.errno, errno.EEXIST)
def test_makedirsRaisesNonEexistErrorsIgnoreExistingDirectory(self):
"""
When C{FilePath.makedirs} is called with C{ignoreExistingDirectory} set
to C{True} it raises an C{OSError} exception if exception errno is not
EEXIST.
"""
def faultyMakedirs(path):
raise OSError(errno.EACCES, 'Permission Denied')
self.patch(os, 'makedirs', faultyMakedirs)
fp = filepath.FilePath(self.mktemp())
exception = self.assertRaises(
OSError, fp.makedirs, ignoreExistingDirectory=True)
self.assertEqual(exception.errno, errno.EACCES)
def test_changed(self):
"""
L{FilePath.changed} indicates that the L{FilePath} has changed, but does
not re-read the status information from the filesystem until it is
queried again via another method, such as C{getsize}.
"""
fp = filepath.FilePath(self.mktemp())
fp.setContent(b"12345")
self.assertEqual(fp.getsize(), 5)
# Someone else comes along and changes the file.
with open(fp.path, 'wb') as fObj:
fObj.write(b"12345678")
# Sanity check for caching: size should still be 5.
self.assertEqual(fp.getsize(), 5)
fp.changed()
# This path should look like we don't know what status it's in, not that
# we know that it didn't exist when last we checked.
self.assertIsNone(fp.statinfo)
self.assertEqual(fp.getsize(), 8)
def test_getPermissions_POSIX(self):
"""
Getting permissions for a file returns a L{Permissions} object for
POSIX platforms (which supports separate user, group, and other
permissions bits.
"""
for mode in (0o777, 0o700):
self.path.child(b"sub1").chmod(mode)
self.assertEqual(self.path.child(b"sub1").getPermissions(),
filepath.Permissions(mode))
self.path.child(b"sub1").chmod(0o764) #sanity check
self.assertEqual(
self.path.child(b"sub1").getPermissions().shorthand(),
"rwxrw-r--")
def test_deprecateStatinfoGetter(self):
"""
Getting L{twisted.python.filepath.FilePath.statinfo} is deprecated.
"""
fp = filepath.FilePath(self.mktemp())
fp.statinfo
warningInfo = self.flushWarnings([self.test_deprecateStatinfoGetter])
self.assertEqual(len(warningInfo), 1)
self.assertEqual(warningInfo[0]['category'], DeprecationWarning)
self.assertEqual(
warningInfo[0]['message'],
"twisted.python.filepath.FilePath.statinfo was deprecated in "
"Twisted 15.0.0; please use other FilePath methods such as "
"getsize(), isdir(), getModificationTime(), etc. instead")
def test_deprecateStatinfoSetter(self):
"""
Setting L{twisted.python.filepath.FilePath.statinfo} is deprecated.
"""
fp = filepath.FilePath(self.mktemp())
fp.statinfo = None
warningInfo = self.flushWarnings([self.test_deprecateStatinfoSetter])
self.assertEqual(len(warningInfo), 1)
self.assertEqual(warningInfo[0]['category'], DeprecationWarning)
self.assertEqual(
warningInfo[0]['message'],
"twisted.python.filepath.FilePath.statinfo was deprecated in "
"Twisted 15.0.0; please use other FilePath methods such as "
"getsize(), isdir(), getModificationTime(), etc. instead")
def test_deprecateStatinfoSetterSets(self):
"""
Setting L{twisted.python.filepath.FilePath.statinfo} changes the value
of _statinfo such that getting statinfo again returns the new value.
"""
fp = filepath.FilePath(self.mktemp())
fp.statinfo = None
self.assertIsNone(fp.statinfo)
def test_filePathNotDeprecated(self):
"""
While accessing L{twisted.python.filepath.FilePath.statinfo} is
deprecated, the filepath itself is not.
"""
filepath.FilePath(self.mktemp())
warningInfo = self.flushWarnings([self.test_filePathNotDeprecated])
self.assertEqual(warningInfo, [])
def test_getPermissions_Windows(self):
"""
Getting permissions for a file returns a L{Permissions} object in
Windows. Windows requires a different test, because user permissions
= group permissions = other permissions. Also, chmod may not be able
to set the execute bit, so we are skipping tests that set the execute
bit.
"""
# Change permission after test so file can be deleted
self.addCleanup(self.path.child(b"sub1").chmod, 0o777)
for mode in (0o777, 0o555):
self.path.child(b"sub1").chmod(mode)
self.assertEqual(self.path.child(b"sub1").getPermissions(),
filepath.Permissions(mode))
self.path.child(b"sub1").chmod(0o511) #sanity check to make sure that
# user=group=other permissions
self.assertEqual(self.path.child(b"sub1").getPermissions().shorthand(),
"r-xr-xr-x")
def test_whetherBlockOrSocket(self):
"""
Ensure that a file is not a block or socket
"""
self.assertFalse(self.path.isBlockDevice())
self.assertFalse(self.path.isSocket())
def test_statinfoBitsNotImplementedInWindows(self):
"""
Verify that certain file stats are not available on Windows
"""
self.assertRaises(NotImplementedError, self.path.getInodeNumber)
self.assertRaises(NotImplementedError, self.path.getDevice)
self.assertRaises(NotImplementedError, self.path.getNumberOfHardLinks)
self.assertRaises(NotImplementedError, self.path.getUserID)
self.assertRaises(NotImplementedError, self.path.getGroupID)
def test_statinfoBitsAreNumbers(self):
"""
Verify that file inode/device/nlinks/uid/gid stats are numbers in
a POSIX environment
"""
if _PY3:
numbers = int
else:
numbers = (int, long)
c = self.path.child(b'file1')
for p in self.path, c:
self.assertIsInstance(p.getInodeNumber(), numbers)
self.assertIsInstance(p.getDevice(), numbers)
self.assertIsInstance(p.getNumberOfHardLinks(), numbers)
self.assertIsInstance(p.getUserID(), numbers)
self.assertIsInstance(p.getGroupID(), numbers)
self.assertEqual(self.path.getUserID(), c.getUserID())
self.assertEqual(self.path.getGroupID(), c.getGroupID())
def test_statinfoNumbersAreValid(self):
"""
Verify that the right numbers come back from the right accessor methods
for file inode/device/nlinks/uid/gid (in a POSIX environment)
"""
# specify fake statinfo information
class FakeStat:
st_ino = 200
st_dev = 300
st_nlink = 400
st_uid = 500
st_gid = 600
# monkey patch in a fake restat method for self.path
fake = FakeStat()
def fakeRestat(*args, **kwargs):
self.path._statinfo = fake
self.path.restat = fakeRestat
# ensure that restat will need to be called to get values
self.path._statinfo = None
self.assertEqual(self.path.getInodeNumber(), fake.st_ino)
self.assertEqual(self.path.getDevice(), fake.st_dev)
self.assertEqual(self.path.getNumberOfHardLinks(), fake.st_nlink)
self.assertEqual(self.path.getUserID(), fake.st_uid)
self.assertEqual(self.path.getGroupID(), fake.st_gid)
if platform.isWindows():
test_statinfoBitsAreNumbers.skip = True
test_statinfoNumbersAreValid.skip = True
test_getPermissions_POSIX.skip = True
else:
test_statinfoBitsNotImplementedInWindows.skip = "Test will run only on Windows."
test_getPermissions_Windows.skip = "Test will run only on Windows."
class SetContentTests(BytesTestCase):
"""
Tests for L{FilePath.setContent}.
"""
def test_write(self):
"""
Contents of the file referred to by a L{FilePath} can be written using
L{FilePath.setContent}.
"""
pathString = self.mktemp()
path = filepath.FilePath(pathString)
path.setContent(b"hello, world")
with open(pathString, "rb") as fObj:
contents = fObj.read()
self.assertEqual(b"hello, world", contents)
def test_fileClosing(self):
"""
If writing to the underlying file raises an exception,
L{FilePath.setContent} raises that exception after closing the file.
"""
fp = ExplodingFilePath(b"")
self.assertRaises(IOError, fp.setContent, b"blah")
self.assertTrue(fp.fp.closed)
def test_nameCollision(self):
"""
L{FilePath.setContent} will use a different temporary filename on each
invocation, so that multiple processes, threads, or reentrant
invocations will not collide with each other.
"""
fp = TrackingFilePath(self.mktemp())
fp.setContent(b"alpha")
fp.setContent(b"beta")
# Sanity check: setContent should only open one derivative path each
# time to store the temporary file.
openedSiblings = fp.openedPaths()
self.assertEqual(len(openedSiblings), 2)
self.assertNotEqual(openedSiblings[0], openedSiblings[1])
def _assertOneOpened(self, fp, extension):
"""
Assert that the L{TrackingFilePath} C{fp} was used to open one sibling
with the given extension.
@param fp: A L{TrackingFilePath} which should have been used to open
file at a sibling path.
@type fp: L{TrackingFilePath}
@param extension: The extension the sibling path is expected to have
had.
@type extension: L{bytes}
@raise: C{self.failureException} is raised if the extension of the
opened file is incorrect or if not exactly one file was opened
using C{fp}.
"""
opened = fp.openedPaths()
self.assertEqual(len(opened), 1, "expected exactly one opened file")
self.assertTrue(
opened[0].basename().endswith(extension),
"%s does not end with %r extension" % (
opened[0].basename(), extension))
def test_defaultExtension(self):
"""
L{FilePath.setContent} creates temporary files with the extension
I{.new} if no alternate extension value is given.
"""
fp = TrackingFilePath(self.mktemp())
fp.setContent(b"hello")
self._assertOneOpened(fp, b".new")
def test_customExtension(self):
"""
L{FilePath.setContent} creates temporary files with a user-supplied
extension so that if it is somehow interrupted while writing them the
file that it leaves behind will be identifiable.
"""
fp = TrackingFilePath(self.mktemp())
fp.setContent(b"goodbye", b"-something-else")
self._assertOneOpened(fp, b"-something-else")
class UnicodeFilePathTests(TestCase):
"""
L{FilePath} instances should have the same internal representation as they
were instantiated with.
"""
def test_UnicodeInstantiation(self):
"""
L{FilePath} instantiated with a text path will return a text-mode
FilePath.
"""
fp = filepath.FilePath(u'./mon\u20acy')
self.assertEqual(type(fp.path), unicode)
def test_UnicodeInstantiationBytesChild(self):
"""
Calling L{FilePath.child} on a text-mode L{FilePath} with a L{bytes}
subpath will return a bytes-mode FilePath.
"""
fp = filepath.FilePath(u'./parent-mon\u20acy')
child = fp.child(u'child-mon\u20acy'.encode('utf-8'))
self.assertEqual(type(child.path), bytes)
def test_UnicodeInstantiationUnicodeChild(self):
"""
Calling L{FilePath.child} on a text-mode L{FilePath} with a text
subpath will return a text-mode FilePath.
"""
fp = filepath.FilePath(u'./parent-mon\u20acy')
child = fp.child(u'mon\u20acy')
self.assertEqual(type(child.path), unicode)
def test_UnicodeInstantiationUnicodePreauthChild(self):
"""
Calling L{FilePath.preauthChild} on a text-mode L{FilePath} with a text
subpath will return a text-mode FilePath.
"""
fp = filepath.FilePath(u'./parent-mon\u20acy')
child = fp.preauthChild(u'mon\u20acy')
self.assertEqual(type(child.path), unicode)
def test_UnicodeInstantiationBytesPreauthChild(self):
"""
Calling L{FilePath.preauthChild} on a text-mode L{FilePath} with a bytes
subpath will return a bytes-mode FilePath.
"""
fp = filepath.FilePath(u'./parent-mon\u20acy')
child = fp.preauthChild(u'child-mon\u20acy'.encode('utf-8'))
self.assertEqual(type(child.path), bytes)
def test_BytesInstantiation(self):
"""
L{FilePath} instantiated with a L{bytes} path will return a bytes-mode
FilePath.
"""
fp = filepath.FilePath(b"./")
self.assertEqual(type(fp.path), bytes)
def test_BytesInstantiationBytesChild(self):
"""
Calling L{FilePath.child} on a bytes-mode L{FilePath} with a bytes
subpath will return a bytes-mode FilePath.
"""
fp = filepath.FilePath(b"./")
child = fp.child(u'child-mon\u20acy'.encode('utf-8'))
self.assertEqual(type(child.path), bytes)
def test_BytesInstantiationUnicodeChild(self):
"""
Calling L{FilePath.child} on a bytes-mode L{FilePath} with a text
subpath will return a text-mode FilePath.
"""
fp = filepath.FilePath(u'parent-mon\u20acy'.encode('utf-8'))
child = fp.child(u"mon\u20acy")
self.assertEqual(type(child.path), unicode)
def test_BytesInstantiationBytesPreauthChild(self):
"""
Calling L{FilePath.preauthChild} on a bytes-mode L{FilePath} with a
bytes subpath will return a bytes-mode FilePath.
"""
fp = filepath.FilePath(u'./parent-mon\u20acy'.encode('utf-8'))
child = fp.preauthChild(u'child-mon\u20acy'.encode('utf-8'))
self.assertEqual(type(child.path), bytes)
def test_BytesInstantiationUnicodePreauthChild(self):
"""
Calling L{FilePath.preauthChild} on a bytes-mode L{FilePath} with a text
subpath will return a text-mode FilePath.
"""
fp = filepath.FilePath(u'./parent-mon\u20acy'.encode('utf-8'))
child = fp.preauthChild(u"mon\u20acy")
self.assertEqual(type(child.path), unicode)
def test_unicoderepr(self):
"""
The repr of a L{unicode} L{FilePath} shouldn't burst into flames.
"""
fp = filepath.FilePath(u"/mon\u20acy")
reprOutput = repr(fp)
if _PY3:
self.assertEqual("FilePath('/mon\u20acy')", reprOutput)
else:
self.assertEqual("FilePath(u'/mon\\u20acy')", reprOutput)
def test_bytesrepr(self):
"""
The repr of a L{bytes} L{FilePath} shouldn't burst into flames.
"""
fp = filepath.FilePath(u'/parent-mon\u20acy'.encode('utf-8'))
reprOutput = repr(fp)
if _PY3:
self.assertEqual(
"FilePath(b'/parent-mon\\xe2\\x82\\xacy')", reprOutput)
else:
self.assertEqual(
"FilePath('/parent-mon\\xe2\\x82\\xacy')", reprOutput)
def test_unicodereprWindows(self):
"""
The repr of a L{unicode} L{FilePath} shouldn't burst into flames.
"""
fp = filepath.FilePath(u"C:\\")
reprOutput = repr(fp)
if _PY3:
self.assertEqual("FilePath('C:\\\\')", reprOutput)
else:
self.assertEqual("FilePath(u'C:\\\\')", reprOutput)
def test_bytesreprWindows(self):
"""
The repr of a L{bytes} L{FilePath} shouldn't burst into flames.
"""
fp = filepath.FilePath(b"C:\\")
reprOutput = repr(fp)
if _PY3:
self.assertEqual("FilePath(b'C:\\\\')", reprOutput)
else:
self.assertEqual("FilePath('C:\\\\')", reprOutput)
if platform.isWindows():
test_unicoderepr.skip = "Test will not work on Windows"
test_bytesrepr.skip = "Test will not work on Windows"
else:
test_unicodereprWindows.skip = "Test only works on Windows"
test_bytesreprWindows.skip = "Test only works on Windows"
def test_mixedTypeGlobChildren(self):
"""
C{globChildren} will return the same type as the pattern argument.
"""
fp = filepath.FilePath(u"/")
children = fp.globChildren(b"*")
self.assertIsInstance(children[0].path, bytes)
def test_unicodeGlobChildren(self):
"""
C{globChildren} works with L{unicode}.
"""
fp = filepath.FilePath(u"/")
children = fp.globChildren(u"*")
self.assertIsInstance(children[0].path, unicode)
def test_unicodeBasename(self):
"""
Calling C{basename} on an text- L{FilePath} returns L{unicode}.
"""
fp = filepath.FilePath(u"./")
self.assertIsInstance(fp.basename(), unicode)
def test_unicodeDirname(self):
"""
Calling C{dirname} on a text-mode L{FilePath} returns L{unicode}.
"""
fp = filepath.FilePath(u"./")
self.assertIsInstance(fp.dirname(), unicode)
def test_unicodeParent(self):
"""
Calling C{parent} on a text-mode L{FilePath} will return a text-mode
L{FilePath}.
"""
fp = filepath.FilePath(u"./")
parent = fp.parent()
self.assertIsInstance(parent.path, unicode)
def test_mixedTypeTemporarySibling(self):
"""
A L{bytes} extension to C{temporarySibling} will mean a L{bytes} mode
L{FilePath} is returned.
"""
fp = filepath.FilePath(u"./mon\u20acy")
tempSibling = fp.temporarySibling(b".txt")
self.assertIsInstance(tempSibling.path, bytes)
def test_unicodeTemporarySibling(self):
"""
A L{unicode} extension to C{temporarySibling} will mean a L{unicode}
mode L{FilePath} is returned.
"""
fp = filepath.FilePath(u"/tmp/mon\u20acy")
tempSibling = fp.temporarySibling(u".txt")
self.assertIsInstance(tempSibling.path, unicode)
def test_mixedTypeSiblingExtensionSearch(self):
"""
C{siblingExtensionSearch} called with L{bytes} on a L{unicode}-mode
L{FilePath} will return a L{list} of L{bytes}-mode L{FilePath}s.
"""
fp = filepath.FilePath(u"./mon\u20acy")
sibling = filepath.FilePath(fp._asTextPath() + u".txt")
sibling.touch()
newPath = fp.siblingExtensionSearch(b".txt")
self.assertIsInstance(newPath, filepath.FilePath)
self.assertIsInstance(newPath.path, bytes)
def test_unicodeSiblingExtensionSearch(self):
"""
C{siblingExtensionSearch} called with L{unicode} on a L{unicode}-mode
L{FilePath} will return a L{list} of L{unicode}-mode L{FilePath}s.
"""
fp = filepath.FilePath(u"./mon\u20acy")
sibling = filepath.FilePath(fp._asTextPath() + u".txt")
sibling.touch()
newPath = fp.siblingExtensionSearch(u".txt")
self.assertIsInstance(newPath, filepath.FilePath)
self.assertIsInstance(newPath.path, unicode)
def test_mixedTypeSiblingExtension(self):
"""
C{siblingExtension} called with L{bytes} on a L{unicode}-mode
L{FilePath} will return a L{bytes}-mode L{FilePath}.
"""
fp = filepath.FilePath(u"./mon\u20acy")
sibling = filepath.FilePath(fp._asTextPath() + u".txt")
sibling.touch()
newPath = fp.siblingExtension(b".txt")
self.assertIsInstance(newPath, filepath.FilePath)
self.assertIsInstance(newPath.path, bytes)
def test_unicodeSiblingExtension(self):
"""
C{siblingExtension} called with L{unicode} on a L{unicode}-mode
L{FilePath} will return a L{unicode}-mode L{FilePath}.
"""
fp = filepath.FilePath(u"./mon\u20acy")
sibling = filepath.FilePath(fp._asTextPath() + u".txt")
sibling.touch()
newPath = fp.siblingExtension(u".txt")
self.assertIsInstance(newPath, filepath.FilePath)
self.assertIsInstance(newPath.path, unicode)
def test_mixedTypeChildSearchPreauth(self):
"""
C{childSearchPreauth} called with L{bytes} on a L{unicode}-mode
L{FilePath} will return a L{bytes}-mode L{FilePath}.
"""
fp = filepath.FilePath(u"./mon\u20acy")
fp.createDirectory()
self.addCleanup(lambda: fp.remove())
child = fp.child("text.txt")
child.touch()
newPath = fp.childSearchPreauth(b"text.txt")
self.assertIsInstance(newPath, filepath.FilePath)
self.assertIsInstance(newPath.path, bytes)
def test_unicodeChildSearchPreauth(self):
"""
C{childSearchPreauth} called with L{unicode} on a L{unicode}-mode
L{FilePath} will return a L{unicode}-mode L{FilePath}.
"""
fp = filepath.FilePath(u"./mon\u20acy")
fp.createDirectory()
self.addCleanup(lambda: fp.remove())
child = fp.child("text.txt")
child.touch()
newPath = fp.childSearchPreauth(u"text.txt")
self.assertIsInstance(newPath, filepath.FilePath)
self.assertIsInstance(newPath.path, unicode)
def test_asBytesModeFromUnicode(self):
"""
C{asBytesMode} on a L{unicode}-mode L{FilePath} returns a new
L{bytes}-mode L{FilePath}.
"""
fp = filepath.FilePath(u"./tmp")
newfp = fp.asBytesMode()
self.assertIsNot(fp, newfp)
self.assertIsInstance(newfp.path, bytes)
def test_asTextModeFromBytes(self):
"""
C{asBytesMode} on a L{unicode}-mode L{FilePath} returns a new
L{bytes}-mode L{FilePath}.
"""
fp = filepath.FilePath(b"./tmp")
newfp = fp.asTextMode()
self.assertIsNot(fp, newfp)
self.assertIsInstance(newfp.path, unicode)
def test_asBytesModeFromBytes(self):
"""
C{asBytesMode} on a L{bytes}-mode L{FilePath} returns the same
L{bytes}-mode L{FilePath}.
"""
fp = filepath.FilePath(b"./tmp")
newfp = fp.asBytesMode()
self.assertIs(fp, newfp)
self.assertIsInstance(newfp.path, bytes)
def test_asTextModeFromUnicode(self):
"""
C{asTextMode} on a L{unicode}-mode L{FilePath} returns the same
L{unicode}-mode L{FilePath}.
"""
fp = filepath.FilePath(u"./tmp")
newfp = fp.asTextMode()
self.assertIs(fp, newfp)
self.assertIsInstance(newfp.path, unicode)
def test_asBytesModeFromUnicodeWithEncoding(self):
"""
C{asBytesMode} with an C{encoding} argument uses that encoding when
coercing the L{unicode}-mode L{FilePath} to a L{bytes}-mode L{FilePath}.
"""
fp = filepath.FilePath(u"\u2603")
newfp = fp.asBytesMode(encoding="utf-8")
self.assertIn(b"\xe2\x98\x83", newfp.path)
def test_asTextModeFromBytesWithEncoding(self):
"""
C{asTextMode} with an C{encoding} argument uses that encoding when
coercing the L{bytes}-mode L{FilePath} to a L{unicode}-mode L{FilePath}.
"""
fp = filepath.FilePath(b'\xe2\x98\x83')
newfp = fp.asTextMode(encoding="utf-8")
self.assertIn(u"\u2603", newfp.path)
def test_asBytesModeFromUnicodeWithUnusableEncoding(self):
"""
C{asBytesMode} with an C{encoding} argument that can't be used to encode
the unicode path raises a L{UnicodeError}.
"""
fp = filepath.FilePath(u"\u2603")
with self.assertRaises(UnicodeError):
fp.asBytesMode(encoding="ascii")
def test_asTextModeFromBytesWithUnusableEncoding(self):
"""
C{asTextMode} with an C{encoding} argument that can't be used to encode
the unicode path raises a L{UnicodeError}.
"""
fp = filepath.FilePath(b"\u2603")
with self.assertRaises(UnicodeError):
fp.asTextMode(encoding="utf-32")
| mit | 5,482,609,063,870,650,000 | 34.133144 | 90 | 0.600051 | false |
blockstack/packaging | imported/future/src/libpasteurize/fixes/fix_add_all_future_builtins.py | 60 | 1270 | """
For the ``future`` package.
Adds this import line::
from builtins import (ascii, bytes, chr, dict, filter, hex, input,
int, list, map, next, object, oct, open, pow,
range, round, str, super, zip)
to a module, irrespective of whether each definition is used.
Adds these imports after any other imports (in an initial block of them).
"""
from __future__ import unicode_literals
from lib2to3 import fixer_base
from libfuturize.fixer_util import touch_import_top
class FixAddAllFutureBuiltins(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "file_input"
run_order = 1
def transform(self, node, results):
# import_str = """(ascii, bytes, chr, dict, filter, hex, input,
# int, list, map, next, object, oct, open, pow,
# range, round, str, super, zip)"""
touch_import_top(u'builtins', '*', node)
# builtins = """ascii bytes chr dict filter hex input
# int list map next object oct open pow
# range round str super zip"""
# for builtin in sorted(builtins.split(), reverse=True):
# touch_import_top(u'builtins', builtin, node)
| gpl-3.0 | -7,345,221,367,281,167,000 | 32.421053 | 76 | 0.584252 | false |
manpen/thrill | frontends/swig_python/python_test.py | 4 | 4291 | #!/usr/bin/env python
##########################################################################
# frontends/swig_python/python_test.py
#
# Part of Project Thrill - http://project-thrill.org
#
# Copyright (C) 2015 Timo Bingmann <[email protected]>
#
# All rights reserved. Published under the BSD-2 license in the LICENSE file.
##########################################################################
import unittest
import threading
import sys
import thrill
class TryThread(threading.Thread):
def __init__(self, **kwargs):
threading.Thread.__init__(self, **kwargs)
self.exception = None
def run(self):
try:
threading.Thread.run(self)
except Exception:
self.exception = sys.exc_info()
raise
def run_thrill_threads(num_threads, thread_func):
# construct a local context mock network
ctxs = thrill.PyContext.ConstructLoopback(num_threads, 1)
# but then start python threads for each context
threads = []
for thrid in range(0, num_threads):
t = TryThread(target=thread_func, args=(ctxs[thrid],))
t.start()
threads.append(t)
# wait for computation to finish
for thr in threads:
thr.join()
# check for exceptions
for thr in threads:
if thr.exception:
raise Exception(thr.exception)
def run_tests(thread_func):
for num_threads in [1, 2, 5]:
run_thrill_threads(num_threads, thread_func)
class TestOperations(unittest.TestCase):
def test_generate_allgather(self):
def test(ctx):
test_size = 1024
dia1 = ctx.Generate(
lambda x: [int(x), "hello %d" % (x)], test_size)
self.assertEqual(dia1.Size(), test_size)
check = [[int(x), "hello %d" % (x)] for x in range(0, test_size)]
self.assertEqual(dia1.AllGather(), check)
run_tests(test)
def test_generate_map_allgather(self):
def test(ctx):
test_size = 1024
dia1 = ctx.Generate(lambda x: int(x), test_size)
self.assertEqual(dia1.Size(), test_size)
dia2 = dia1.Map(lambda x: [int(x), "hello %d" % (x)])
check = [[int(x), "hello %d" % (x)] for x in range(0, test_size)]
self.assertEqual(dia2.Size(), test_size)
self.assertEqual(dia2.AllGather(), check)
dia3 = dia1.Map(lambda x: [int(x), "two %d" % (x)])
check = [[int(x), "two %d" % (x)] for x in range(0, test_size)]
self.assertEqual(dia3.Size(), test_size)
self.assertEqual(dia3.AllGather(), check)
run_tests(test)
def test_distribute_map_filter_allgather(self):
def test(ctx):
test_size = 1024
dia1 = ctx.Distribute([x * x for x in range(0, test_size)])
self.assertEqual(dia1.Size(), test_size)
dia2 = dia1.Map(lambda x: [int(x), "hello %d" % (x)])
dia3 = dia2.Filter(lambda x: x[0] >= 16 and x[0] < 10000)
check = [[int(x * x), "hello %d" % (x * x)]
for x in range(4, 100)]
self.assertEqual(dia3.AllGather(), check)
run_tests(test)
def my_generator(self, index):
#print("generator at index", index)
return (index, "hello at %d" % (index))
def my_thread(self, ctx):
print("thread in python, rank", ctx.my_rank())
dia1 = ctx.Generate(lambda x: [int(x), x], 50)
dia2 = dia1.Map(lambda x: (x[0], x[1] + " mapped"))
s = dia2.Size()
print("Size:", s)
self.assertEqual(s, 50)
print("AllGather:", dia2.AllGather())
dia3 = dia2.ReduceBy(lambda x: x[0] % 10,
lambda x, y: (x + y))
print("dia3.Size:", dia3.Size())
print("dia3.AllGather:", dia3.AllGather())
dia4 = dia3.Filter(lambda x: x[0] == 2)
print("dia4.AllGather:", dia4.AllGather())
#####
dia5 = ctx.Distribute([2, 3, 5, 7, 11, 13, 17, 19])
print("dia5.AllGather:", dia5.AllGather())
def notest_operations(self):
run_thrill_threads(4, self.my_thread)
if __name__ == '__main__':
unittest.main()
##########################################################################
| bsd-2-clause | -3,734,699,750,667,732,500 | 27.417219 | 77 | 0.528548 | false |
lkeijser/func | test/unittest/test_func_arg.py | 8 | 4973 | ##
## Copyright 2007, Red Hat, Inc
## see AUTHORS
##
## This software may be freely redistributed under the terms of the GNU
## general public license.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
#tester module for ArgCompatibility
from func.minion.func_arg import ArgCompatibility
class TestArgCompatibility:
def setUp(self):
#create the simple object
self.ac = ArgCompatibility(self.dummy_arg_getter())
def test_arg_compatibility(self):
"""
Testing the method argument compatiblity
"""
result = self.ac.validate_all()
assert result == True
self.ac = ArgCompatibility(self.dummy_no_getter())
result = self.ac.validate_all()
assert result == True
self.ac = ArgCompatibility(self.dummy_empty_args())
result = self.ac.validate_all()
assert result == True
def test_is_all_arguments_registered(self):
#create the dummy class
tc = FooClass()
arguments = tc.register_method()
assert self.ac.is_all_arguments_registered(tc,'foomethod',arguments['foomethod']['args'])==True
print arguments
assert self.ac.validate_all()==True
def dummy_no_getter(self):
return {}
def dummy_empty_args(self):
return{
'myfunc':{
'args':{},
'description':'Cool methods here'
}
}
def dummy_arg_getter(self):
"""
A simple method to test the stuff we have written for
arg compatiblity. I just return a dict with proper stuff
Should more an more tests here to see if didnt miss something
"""
return {
'hifunc':{
'args':{
'app':{
'type':'int',
'range':[0,100],
'optional':False,
'default' : 12
},
'platform':{
'type':'string',
'options':["fedora","redhat","ubuntu"],
'description':"Hey im a fedora fan",
'default':'fedora8',
},
'platform2':{
'type':'string',
'min_length':4,
'max_length':33,
'description':"Hey im a fedora fan",
'default':'fedora8',
},
'is_independent':{
'type':'boolean',
'default' :False,
'description':'Are you independent ?',
'optional':False
},
'some_string':{
'type':'string',
'validator': "^[a-zA-Z]$",
'description':'String to be validated',
'default':'makkalot',
'optional':False}, # validator is a re string for those whoo need better validation,so when we have options there is no need to use validator and reverse is True
#to define also a float we dont need it actually but maybe useful for the UI stuff.
'some_float':{
'type':'float',
'description':'The float point value',
'default':33.44,
'optional':False
},
'some_iterable':{
'type':'list',
'description':'The value and description for *arg',
'optional':True, #that is where it makes sense
'validator':'^[0-9]+$',#maybe useful to say it is to be a number for example
},
'some_hash':{
'type':'hash',
'description':"Dummy desc here",
'optional':True, #of course it is,
'validator':"^[a-z]*$",#only for values not keys
}
},
'description':"The dummy method description",
}
}
class FooClass(object):
"""
Sample class for testing the is_all_arguments_registered
method functionality ...
"""
def foomethod(self,arg1,arg5,arg4,*arg,**kw):
pass
def register_method(self):
return{
'foomethod':{
'args':{
'arg1':{},
'arg4':{},
'arg5':{},
'arg':{},
'kw':{},
}
}
}
| gpl-2.0 | -5,862,978,857,972,503,000 | 31.933775 | 181 | 0.448421 | false |
mujiansu/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/distutils/command/bdist_dumb.py | 53 | 4901 | """distutils.command.bdist_dumb
Implements the Distutils 'bdist_dumb' command (create a "dumb" built
distribution -- i.e., just an archive to be unpacked under $prefix or
$exec_prefix)."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: bdist_dumb.py 61000 2008-02-23 17:40:11Z christian.heimes $"
import os
from distutils.core import Command
from distutils.util import get_platform
from distutils.dir_util import remove_tree, ensure_relative
from distutils.errors import *
from distutils.sysconfig import get_python_version
from distutils import log
class bdist_dumb (Command):
description = "create a \"dumb\" built distribution"
user_options = [('bdist-dir=', 'd',
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('format=', 'f',
"archive format to create (tar, ztar, gztar, zip)"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('relative', None,
"build the archive using relative paths"
"(default: false)"),
]
boolean_options = ['keep-temp', 'skip-build', 'relative']
default_format = { 'posix': 'gztar',
'nt': 'zip',
'os2': 'zip' }
def initialize_options (self):
self.bdist_dir = None
self.plat_name = None
self.format = None
self.keep_temp = 0
self.dist_dir = None
self.skip_build = 0
self.relative = 0
# initialize_options()
def finalize_options (self):
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'dumb')
if self.format is None:
try:
self.format = self.default_format[os.name]
except KeyError:
raise DistutilsPlatformError, \
("don't know how to create dumb built distributions " +
"on platform %s") % os.name
self.set_undefined_options('bdist',
('dist_dir', 'dist_dir'),
('plat_name', 'plat_name'))
# finalize_options()
def run (self):
if not self.skip_build:
self.run_command('build')
install = self.reinitialize_command('install', reinit_subcommands=1)
install.root = self.bdist_dir
install.skip_build = self.skip_build
install.warn_dir = 0
log.info("installing to %s" % self.bdist_dir)
self.run_command('install')
# And make an archive relative to the root of the
# pseudo-installation tree.
archive_basename = "%s.%s" % (self.distribution.get_fullname(),
self.plat_name)
# OS/2 objects to any ":" characters in a filename (such as when
# a timestamp is used in a version) so change them to hyphens.
if os.name == "os2":
archive_basename = archive_basename.replace(":", "-")
pseudoinstall_root = os.path.join(self.dist_dir, archive_basename)
if not self.relative:
archive_root = self.bdist_dir
else:
if (self.distribution.has_ext_modules() and
(install.install_base != install.install_platbase)):
raise DistutilsPlatformError, \
("can't make a dumb built distribution where "
"base and platbase are different (%s, %s)"
% (repr(install.install_base),
repr(install.install_platbase)))
else:
archive_root = os.path.join(self.bdist_dir,
ensure_relative(install.install_base))
# Make the archive
filename = self.make_archive(pseudoinstall_root,
self.format, root_dir=archive_root)
if self.distribution.has_ext_modules():
pyversion = get_python_version()
else:
pyversion = 'any'
self.distribution.dist_files.append(('bdist_dumb', pyversion,
filename))
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
# run()
# class bdist_dumb
| apache-2.0 | -3,497,645,944,928,719,400 | 35.303704 | 81 | 0.537033 | false |
shujaatak/UAV_MissionPlanner | Lib/site-packages/numpy/lib/tests/test__datasource.py | 54 | 10225 | import os
import sys
from tempfile import mkdtemp, mkstemp, NamedTemporaryFile
from shutil import rmtree
from urlparse import urlparse
from urllib2 import URLError
import urllib2
from numpy.testing import *
from numpy.compat import asbytes
import numpy.lib._datasource as datasource
def urlopen_stub(url, data=None):
'''Stub to replace urlopen for testing.'''
if url == valid_httpurl():
tmpfile = NamedTemporaryFile(prefix='urltmp_')
return tmpfile
else:
raise URLError('Name or service not known')
old_urlopen = None
def setup():
global old_urlopen
old_urlopen = urllib2.urlopen
urllib2.urlopen = urlopen_stub
def teardown():
urllib2.urlopen = old_urlopen
# A valid website for more robust testing
http_path = 'http://www.google.com/'
http_file = 'index.html'
http_fakepath = 'http://fake.abc.web/site/'
http_fakefile = 'fake.txt'
malicious_files = ['/etc/shadow', '../../shadow',
'..\\system.dat', 'c:\\windows\\system.dat']
magic_line = asbytes('three is the magic number')
# Utility functions used by many TestCases
def valid_textfile(filedir):
# Generate and return a valid temporary file.
fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir, text=True)
os.close(fd)
return path
def invalid_textfile(filedir):
# Generate and return an invalid filename.
fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir)
os.close(fd)
os.remove(path)
return path
def valid_httpurl():
return http_path+http_file
def invalid_httpurl():
return http_fakepath+http_fakefile
def valid_baseurl():
return http_path
def invalid_baseurl():
return http_fakepath
def valid_httpfile():
return http_file
def invalid_httpfile():
return http_fakefile
class TestDataSourceOpen(TestCase):
def setUp(self):
self.tmpdir = mkdtemp()
self.ds = datasource.DataSource(self.tmpdir)
def tearDown(self):
rmtree(self.tmpdir)
del self.ds
def test_ValidHTTP(self):
assert self.ds.open(valid_httpurl())
def test_InvalidHTTP(self):
url = invalid_httpurl()
self.assertRaises(IOError, self.ds.open, url)
try:
self.ds.open(url)
except IOError, e:
# Regression test for bug fixed in r4342.
assert e.errno is None
def test_InvalidHTTPCacheURLError(self):
self.assertRaises(URLError, self.ds._cache, invalid_httpurl())
def test_ValidFile(self):
local_file = valid_textfile(self.tmpdir)
assert self.ds.open(local_file)
def test_InvalidFile(self):
invalid_file = invalid_textfile(self.tmpdir)
self.assertRaises(IOError, self.ds.open, invalid_file)
def test_ValidGzipFile(self):
try:
import gzip
except ImportError:
# We don't have the gzip capabilities to test.
import nose
raise nose.SkipTest
# Test datasource's internal file_opener for Gzip files.
filepath = os.path.join(self.tmpdir, 'foobar.txt.gz')
fp = gzip.open(filepath, 'w')
fp.write(magic_line)
fp.close()
fp = self.ds.open(filepath)
result = fp.readline()
fp.close()
self.assertEqual(magic_line, result)
def test_ValidBz2File(self):
try:
import bz2
except ImportError:
# We don't have the bz2 capabilities to test.
import nose
raise nose.SkipTest
# Test datasource's internal file_opener for BZip2 files.
filepath = os.path.join(self.tmpdir, 'foobar.txt.bz2')
fp = bz2.BZ2File(filepath, 'w')
fp.write(magic_line)
fp.close()
fp = self.ds.open(filepath)
result = fp.readline()
fp.close()
self.assertEqual(magic_line, result)
class TestDataSourceExists(TestCase):
def setUp(self):
self.tmpdir = mkdtemp()
self.ds = datasource.DataSource(self.tmpdir)
def tearDown(self):
rmtree(self.tmpdir)
del self.ds
def test_ValidHTTP(self):
assert self.ds.exists(valid_httpurl())
def test_InvalidHTTP(self):
self.assertEqual(self.ds.exists(invalid_httpurl()), False)
def test_ValidFile(self):
# Test valid file in destpath
tmpfile = valid_textfile(self.tmpdir)
assert self.ds.exists(tmpfile)
# Test valid local file not in destpath
localdir = mkdtemp()
tmpfile = valid_textfile(localdir)
assert self.ds.exists(tmpfile)
rmtree(localdir)
def test_InvalidFile(self):
tmpfile = invalid_textfile(self.tmpdir)
self.assertEqual(self.ds.exists(tmpfile), False)
class TestDataSourceAbspath(TestCase):
def setUp(self):
self.tmpdir = os.path.abspath(mkdtemp())
self.ds = datasource.DataSource(self.tmpdir)
def tearDown(self):
rmtree(self.tmpdir)
del self.ds
def test_ValidHTTP(self):
scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl())
local_path = os.path.join(self.tmpdir, netloc,
upath.strip(os.sep).strip('/'))
self.assertEqual(local_path, self.ds.abspath(valid_httpurl()))
def test_ValidFile(self):
tmpfile = valid_textfile(self.tmpdir)
tmpfilename = os.path.split(tmpfile)[-1]
# Test with filename only
self.assertEqual(tmpfile, self.ds.abspath(os.path.split(tmpfile)[-1]))
# Test filename with complete path
self.assertEqual(tmpfile, self.ds.abspath(tmpfile))
def test_InvalidHTTP(self):
scheme, netloc, upath, pms, qry, frg = urlparse(invalid_httpurl())
invalidhttp = os.path.join(self.tmpdir, netloc,
upath.strip(os.sep).strip('/'))
self.assertNotEqual(invalidhttp, self.ds.abspath(valid_httpurl()))
def test_InvalidFile(self):
invalidfile = valid_textfile(self.tmpdir)
tmpfile = valid_textfile(self.tmpdir)
tmpfilename = os.path.split(tmpfile)[-1]
# Test with filename only
self.assertNotEqual(invalidfile, self.ds.abspath(tmpfilename))
# Test filename with complete path
self.assertNotEqual(invalidfile, self.ds.abspath(tmpfile))
def test_sandboxing(self):
tmpfile = valid_textfile(self.tmpdir)
tmpfilename = os.path.split(tmpfile)[-1]
tmp_path = lambda x: os.path.abspath(self.ds.abspath(x))
assert tmp_path(valid_httpurl()).startswith(self.tmpdir)
assert tmp_path(invalid_httpurl()).startswith(self.tmpdir)
assert tmp_path(tmpfile).startswith(self.tmpdir)
assert tmp_path(tmpfilename).startswith(self.tmpdir)
for fn in malicious_files:
assert tmp_path(http_path+fn).startswith(self.tmpdir)
assert tmp_path(fn).startswith(self.tmpdir)
def test_windows_os_sep(self):
orig_os_sep = os.sep
try:
os.sep = '\\'
self.test_ValidHTTP()
self.test_ValidFile()
self.test_InvalidHTTP()
self.test_InvalidFile()
self.test_sandboxing()
finally:
os.sep = orig_os_sep
class TestRepositoryAbspath(TestCase):
def setUp(self):
self.tmpdir = os.path.abspath(mkdtemp())
self.repos = datasource.Repository(valid_baseurl(), self.tmpdir)
def tearDown(self):
rmtree(self.tmpdir)
del self.repos
def test_ValidHTTP(self):
scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl())
local_path = os.path.join(self.repos._destpath, netloc, \
upath.strip(os.sep).strip('/'))
filepath = self.repos.abspath(valid_httpfile())
self.assertEqual(local_path, filepath)
def test_sandboxing(self):
tmp_path = lambda x: os.path.abspath(self.repos.abspath(x))
assert tmp_path(valid_httpfile()).startswith(self.tmpdir)
for fn in malicious_files:
assert tmp_path(http_path+fn).startswith(self.tmpdir)
assert tmp_path(fn).startswith(self.tmpdir)
def test_windows_os_sep(self):
orig_os_sep = os.sep
try:
os.sep = '\\'
self.test_ValidHTTP()
self.test_sandboxing()
finally:
os.sep = orig_os_sep
class TestRepositoryExists(TestCase):
def setUp(self):
self.tmpdir = mkdtemp()
self.repos = datasource.Repository(valid_baseurl(), self.tmpdir)
def tearDown(self):
rmtree(self.tmpdir)
del self.repos
def test_ValidFile(self):
# Create local temp file
tmpfile = valid_textfile(self.tmpdir)
assert self.repos.exists(tmpfile)
def test_InvalidFile(self):
tmpfile = invalid_textfile(self.tmpdir)
self.assertEqual(self.repos.exists(tmpfile), False)
def test_RemoveHTTPFile(self):
assert self.repos.exists(valid_httpurl())
def test_CachedHTTPFile(self):
localfile = valid_httpurl()
# Create a locally cached temp file with an URL based
# directory structure. This is similar to what Repository.open
# would do.
scheme, netloc, upath, pms, qry, frg = urlparse(localfile)
local_path = os.path.join(self.repos._destpath, netloc)
os.mkdir(local_path, 0700)
tmpfile = valid_textfile(local_path)
assert self.repos.exists(tmpfile)
class TestOpenFunc(TestCase):
def setUp(self):
self.tmpdir = mkdtemp()
def tearDown(self):
rmtree(self.tmpdir)
def test_DataSourceOpen(self):
local_file = valid_textfile(self.tmpdir)
# Test case where destpath is passed in
assert datasource.open(local_file, destpath=self.tmpdir)
# Test case where default destpath is used
assert datasource.open(local_file)
if hasattr(sys, 'gettotalrefcount'):
# skip these, when Python was compiled using the --with-pydebug option
del TestDataSourceOpen
del TestDataSourceExists
del TestDataSourceAbspath
del TestRepositoryExists
del TestOpenFunc
if __name__ == "__main__":
run_module_suite()
| gpl-2.0 | 7,366,578,464,571,990,000 | 30.079027 | 78 | 0.633252 | false |
omarocegueda/dipy | dipy/segment/tissue.py | 6 | 6207 | import numpy as np
from dipy.sims.voxel import add_noise
from dipy.segment.mrf import (ConstantObservationModel,
IteratedConditionalModes)
class TissueClassifierHMRF(object):
r"""
This class contains the methods for tissue classification using the Markov
Random Fields modeling approach
"""
def __init__(self, save_history=False, verbose=True):
self.save_history = save_history
self.segmentations = []
self.pves = []
self.energies = []
self.energies_sum = []
self.verbose = verbose
def classify(self, image, nclasses, beta, tolerance=None, max_iter=None):
r"""
This method uses the Maximum a posteriori - Markov Random Field
approach for segmentation by using the Iterative Conditional Modes and
Expectation Maximization to estimate the parameters.
Parameters
----------
image : ndarray,
3D structural image.
nclasses : int,
number of desired classes.
beta : float,
smoothing parameter, the higher this number the smoother the
output will be.
tolerance: float,
value that defines the percentage of change tolerated to
prevent the ICM loop to stop. Default is 1e-05.
max_iter : float,
fixed number of desired iterations. Default is 100.
If the user only specifies this parameter, the tolerance
value will not be considered. If none of these two
parameters
Returns
-------
initial_segmentation : ndarray,
3D segmented image with all tissue types
specified in nclasses.
final_segmentation : ndarray,
3D final refined segmentation containing all
tissue types.
PVE : ndarray,
3D probability map of each tissue type.
"""
nclasses = nclasses + 1 # One extra class for the background
energy_sum = [1e-05]
com = ConstantObservationModel()
icm = IteratedConditionalModes()
if image.max() > 1:
image = np.interp(image, [0, image.max()], [0.0, 1.0])
mu, sigma = com.initialize_param_uniform(image, nclasses)
p = np.argsort(mu)
mu = mu[p]
sigma = sigma[p]
sigmasq = sigma ** 2
neglogl = com.negloglikelihood(image, mu, sigmasq, nclasses)
seg_init = icm.initialize_maximum_likelihood(neglogl)
mu, sigma = com.seg_stats(image, seg_init, nclasses)
sigmasq = sigma ** 2
zero = np.zeros_like(image) + 0.001
zero_noise = add_noise(zero, 10000, 1, noise_type='gaussian')
image_gauss = np.where(image == 0, zero_noise, image)
final_segmentation = np.empty_like(image)
initial_segmentation = seg_init.copy()
if max_iter is not None and tolerance is None:
for i in range(max_iter):
if self.verbose:
print('>> Iteration: ' + str(i))
PLN = icm.prob_neighborhood(seg_init, beta, nclasses)
PVE = com.prob_image(image_gauss, nclasses, mu, sigmasq, PLN)
mu_upd, sigmasq_upd = com.update_param(image_gauss,
PVE, mu, nclasses)
ind = np.argsort(mu_upd)
mu_upd = mu_upd[ind]
sigmasq_upd = sigmasq_upd[ind]
negll = com.negloglikelihood(image_gauss,
mu_upd, sigmasq_upd, nclasses)
final_segmentation, energy = icm.icm_ising(negll,
beta, seg_init)
if self.save_history:
self.segmentations.append(final_segmentation)
self.pves.append(PVE)
self.energies.append(energy)
self.energies_sum.append(energy[energy > -np.inf].sum())
seg_init = final_segmentation.copy()
mu = mu_upd.copy()
sigmasq = sigmasq_upd.copy()
else:
max_iter = 100
for i in range(max_iter):
if self.verbose:
print('>> Iteration: ' + str(i))
PLN = icm.prob_neighborhood(seg_init, beta, nclasses)
PVE = com.prob_image(image_gauss, nclasses, mu, sigmasq, PLN)
mu_upd, sigmasq_upd = com.update_param(image_gauss,
PVE, mu, nclasses)
ind = np.argsort(mu_upd)
mu_upd = mu_upd[ind]
sigmasq_upd = sigmasq_upd[ind]
negll = com.negloglikelihood(image_gauss,
mu_upd, sigmasq_upd, nclasses)
final_segmentation, energy = icm.icm_ising(negll,
beta, seg_init)
energy_sum.append(energy[energy > -np.inf].sum())
if self.save_history:
self.segmentations.append(final_segmentation)
self.pves.append(PVE)
self.energies.append(energy)
self.energies_sum.append(energy[energy > -np.inf].sum())
if tolerance is None:
tolerance = 1e-05
if i % 10 == 0 and i != 0:
tol = tolerance * (np.amax(energy_sum) -
np.amin(energy_sum))
test_dist = np.absolute(np.amax(
energy_sum[np.size(energy_sum) - 5: i]) -
np.amin(energy_sum[np.size(energy_sum) - 5: i])
)
if test_dist < tol:
break
seg_init = final_segmentation.copy()
mu = mu_upd.copy()
sigmasq = sigmasq_upd.copy()
PVE = PVE[..., 1:]
return initial_segmentation, final_segmentation, PVE
| bsd-3-clause | 1,052,245,938,812,822,500 | 35.727811 | 79 | 0.507653 | false |
davidharrigan/django | tests/csrf_tests/tests.py | 78 | 23643 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from django.conf import settings
from django.http import HttpRequest, HttpResponse
from django.middleware.csrf import (
CSRF_KEY_LENGTH, CsrfViewMiddleware, get_token,
)
from django.template import RequestContext, Template
from django.template.context_processors import csrf
from django.test import SimpleTestCase, override_settings
from django.views.decorators.csrf import (
csrf_exempt, ensure_csrf_cookie, requires_csrf_token,
)
# Response/views used for CsrfResponseMiddleware and CsrfViewMiddleware tests
def post_form_response():
resp = HttpResponse(content="""
<html><body><h1>\u00a1Unicode!<form method="post"><input type="text" /></form></body></html>
""", mimetype="text/html")
return resp
def post_form_view(request):
"""A view that returns a POST form (without a token)"""
return post_form_response()
# Response/views used for template tag tests
def token_view(request):
"""A view that uses {% csrf_token %}"""
context = RequestContext(request, processors=[csrf])
template = Template("{% csrf_token %}")
return HttpResponse(template.render(context))
def non_token_view_using_request_processor(request):
"""
A view that doesn't use the token, but does use the csrf view processor.
"""
context = RequestContext(request, processors=[csrf])
template = Template("")
return HttpResponse(template.render(context))
class TestingHttpRequest(HttpRequest):
"""
A version of HttpRequest that allows us to change some things
more easily
"""
def is_secure(self):
return getattr(self, '_is_secure_override', False)
class CsrfViewMiddlewareTest(SimpleTestCase):
# The csrf token is potentially from an untrusted source, so could have
# characters that need dealing with.
_csrf_id_cookie = b"<1>\xc2\xa1"
_csrf_id = "1"
def _get_GET_no_csrf_cookie_request(self):
return TestingHttpRequest()
def _get_GET_csrf_cookie_request(self):
req = TestingHttpRequest()
req.COOKIES[settings.CSRF_COOKIE_NAME] = self._csrf_id_cookie
return req
def _get_POST_csrf_cookie_request(self):
req = self._get_GET_csrf_cookie_request()
req.method = "POST"
return req
def _get_POST_no_csrf_cookie_request(self):
req = self._get_GET_no_csrf_cookie_request()
req.method = "POST"
return req
def _get_POST_request_with_token(self):
req = self._get_POST_csrf_cookie_request()
req.POST['csrfmiddlewaretoken'] = self._csrf_id
return req
def _check_token_present(self, response, csrf_id=None):
self.assertContains(response, "name='csrfmiddlewaretoken' value='%s'" % (csrf_id or self._csrf_id))
def test_process_view_token_too_long(self):
"""
If the token is longer than expected, it is ignored and a new token is
created.
"""
req = self._get_GET_no_csrf_cookie_request()
req.COOKIES[settings.CSRF_COOKIE_NAME] = 'x' * 10000000
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, False)
self.assertEqual(len(csrf_cookie.value), CSRF_KEY_LENGTH)
def test_process_response_get_token_used(self):
"""
When get_token is used, check that the cookie is created and headers
patched.
"""
req = self._get_GET_no_csrf_cookie_request()
# Put tests for CSRF_COOKIE_* settings here
with self.settings(CSRF_COOKIE_NAME='myname',
CSRF_COOKIE_DOMAIN='.example.com',
CSRF_COOKIE_PATH='/test/',
CSRF_COOKIE_SECURE=True,
CSRF_COOKIE_HTTPONLY=True):
# token_view calls get_token() indirectly
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get('myname', False)
self.assertNotEqual(csrf_cookie, False)
self.assertEqual(csrf_cookie['domain'], '.example.com')
self.assertEqual(csrf_cookie['secure'], True)
self.assertEqual(csrf_cookie['httponly'], True)
self.assertEqual(csrf_cookie['path'], '/test/')
self.assertIn('Cookie', resp2.get('Vary', ''))
def test_process_response_get_token_not_used(self):
"""
Check that if get_token() is not called, the view middleware does not
add a cookie.
"""
# This is important to make pages cacheable. Pages which do call
# get_token(), assuming they use the token, are not cacheable because
# the token is specific to the user
req = self._get_GET_no_csrf_cookie_request()
# non_token_view_using_request_processor does not call get_token(), but
# does use the csrf request processor. By using this, we are testing
# that the view processor is properly lazy and doesn't call get_token()
# until needed.
CsrfViewMiddleware().process_view(req, non_token_view_using_request_processor, (), {})
resp = non_token_view_using_request_processor(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, False)
self.assertEqual(csrf_cookie, False)
# Check the request processing
def test_process_request_no_csrf_cookie(self):
"""
Check that if no CSRF cookies is present, the middleware rejects the
incoming request. This will stop login CSRF.
"""
req = self._get_POST_no_csrf_cookie_request()
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(403, req2.status_code)
def test_process_request_csrf_cookie_no_token(self):
"""
Check that if a CSRF cookie is present but no token, the middleware
rejects the incoming request.
"""
req = self._get_POST_csrf_cookie_request()
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(403, req2.status_code)
def test_process_request_csrf_cookie_and_token(self):
"""
Check that if both a cookie and a token is present, the middleware lets it through.
"""
req = self._get_POST_request_with_token()
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(req2)
def test_process_request_csrf_cookie_no_token_exempt_view(self):
"""
Check that if a CSRF cookie is present and no token, but the csrf_exempt
decorator has been applied to the view, the middleware lets it through
"""
req = self._get_POST_csrf_cookie_request()
req2 = CsrfViewMiddleware().process_view(req, csrf_exempt(post_form_view), (), {})
self.assertIsNone(req2)
def test_csrf_token_in_header(self):
"""
Check that we can pass in the token in a header instead of in the form
"""
req = self._get_POST_csrf_cookie_request()
req.META['HTTP_X_CSRFTOKEN'] = self._csrf_id
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(req2)
@override_settings(CSRF_HEADER_NAME='HTTP_X_CSRFTOKEN_CUSTOMIZED')
def test_csrf_token_in_header_with_customized_name(self):
"""
settings.CSRF_HEADER_NAME can be used to customize the CSRF header name
"""
req = self._get_POST_csrf_cookie_request()
req.META['HTTP_X_CSRFTOKEN_CUSTOMIZED'] = self._csrf_id
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(req2)
def test_put_and_delete_rejected(self):
"""
Tests that HTTP PUT and DELETE methods have protection
"""
req = TestingHttpRequest()
req.method = 'PUT'
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(403, req2.status_code)
req = TestingHttpRequest()
req.method = 'DELETE'
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(403, req2.status_code)
def test_put_and_delete_allowed(self):
"""
Tests that HTTP PUT and DELETE methods can get through with
X-CSRFToken and a cookie
"""
req = self._get_GET_csrf_cookie_request()
req.method = 'PUT'
req.META['HTTP_X_CSRFTOKEN'] = self._csrf_id
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(req2)
req = self._get_GET_csrf_cookie_request()
req.method = 'DELETE'
req.META['HTTP_X_CSRFTOKEN'] = self._csrf_id
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(req2)
# Tests for the template tag method
def test_token_node_no_csrf_cookie(self):
"""
Check that CsrfTokenNode works when no CSRF cookie is set
"""
req = self._get_GET_no_csrf_cookie_request()
resp = token_view(req)
token = get_token(req)
self.assertIsNotNone(token)
self._check_token_present(resp, token)
def test_token_node_empty_csrf_cookie(self):
"""
Check that we get a new token if the csrf_cookie is the empty string
"""
req = self._get_GET_no_csrf_cookie_request()
req.COOKIES[settings.CSRF_COOKIE_NAME] = b""
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
token = get_token(req)
self.assertIsNotNone(token)
self._check_token_present(resp, token)
def test_token_node_with_csrf_cookie(self):
"""
Check that CsrfTokenNode works when a CSRF cookie is set
"""
req = self._get_GET_csrf_cookie_request()
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
self._check_token_present(resp)
def test_get_token_for_exempt_view(self):
"""
Check that get_token still works for a view decorated with 'csrf_exempt'.
"""
req = self._get_GET_csrf_cookie_request()
CsrfViewMiddleware().process_view(req, csrf_exempt(token_view), (), {})
resp = token_view(req)
self._check_token_present(resp)
def test_get_token_for_requires_csrf_token_view(self):
"""
Check that get_token works for a view decorated solely with requires_csrf_token
"""
req = self._get_GET_csrf_cookie_request()
resp = requires_csrf_token(token_view)(req)
self._check_token_present(resp)
def test_token_node_with_new_csrf_cookie(self):
"""
Check that CsrfTokenNode works when a CSRF cookie is created by
the middleware (when one was not already present)
"""
req = self._get_GET_no_csrf_cookie_request()
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies[settings.CSRF_COOKIE_NAME]
self._check_token_present(resp, csrf_id=csrf_cookie.value)
@override_settings(DEBUG=True)
def test_https_bad_referer(self):
"""
Test that a POST HTTPS request with a bad referer is rejected
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'https://www.evil.org/somepage'
req.META['SERVER_PORT'] = '443'
response = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertContains(
response,
'Referer checking failed - https://www.evil.org/somepage does not '
'match any trusted origins.',
status_code=403,
)
@override_settings(DEBUG=True)
def test_https_malformed_referer(self):
"""
A POST HTTPS request with a bad referer is rejected.
"""
malformed_referer_msg = 'Referer checking failed - Referer is malformed.'
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_REFERER'] = 'http://http://www.example.com/'
response = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertContains(
response,
'Referer checking failed - Referer is insecure while host is secure.',
status_code=403,
)
# Empty
req.META['HTTP_REFERER'] = ''
response = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertContains(response, malformed_referer_msg, status_code=403)
# Non-ASCII
req.META['HTTP_REFERER'] = b'\xd8B\xf6I\xdf'
response = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertContains(response, malformed_referer_msg, status_code=403)
# missing scheme
# >>> urlparse('//example.com/')
# ParseResult(scheme='', netloc='example.com', path='/', params='', query='', fragment='')
req.META['HTTP_REFERER'] = '//example.com/'
response = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertContains(response, malformed_referer_msg, status_code=403)
# missing netloc
# >>> urlparse('https://')
# ParseResult(scheme='https', netloc='', path='', params='', query='', fragment='')
req.META['HTTP_REFERER'] = 'https://'
response = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertContains(response, malformed_referer_msg, status_code=403)
@override_settings(ALLOWED_HOSTS=['www.example.com'])
def test_https_good_referer(self):
"""
A POST HTTPS request with a good referer is accepted.
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'https://www.example.com/somepage'
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(req2)
@override_settings(ALLOWED_HOSTS=['www.example.com'])
def test_https_good_referer_2(self):
"""
A POST HTTPS request with a good referer is accepted where the referer
contains no trailing slash.
"""
# See ticket #15617
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'https://www.example.com'
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(req2)
@override_settings(ALLOWED_HOSTS=['www.example.com'], CSRF_TRUSTED_ORIGINS=['dashboard.example.com'])
def test_https_csrf_trusted_origin_allowed(self):
"""
A POST HTTPS request with a referer added to the CSRF_TRUSTED_ORIGINS
setting is accepted.
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'https://dashboard.example.com'
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(req2)
@override_settings(ALLOWED_HOSTS=['www.example.com'], CSRF_TRUSTED_ORIGINS=['.example.com'])
def test_https_csrf_wildcard_trusted_origin_allowed(self):
"""
A POST HTTPS request with a referer that matches a CSRF_TRUSTED_ORIGINS
wilcard is accepted.
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'https://dashboard.example.com'
response = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(response)
@override_settings(ALLOWED_HOSTS=['www.example.com'], CSRF_COOKIE_DOMAIN='.example.com')
def test_https_good_referer_matches_cookie_domain(self):
"""
A POST HTTPS request with a good referer should be accepted from a
subdomain that's allowed by CSRF_COOKIE_DOMAIN.
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_REFERER'] = 'https://foo.example.com/'
req.META['SERVER_PORT'] = '443'
response = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(response)
@override_settings(ALLOWED_HOSTS=['www.example.com'], CSRF_COOKIE_DOMAIN='.example.com')
def test_https_good_referer_matches_cookie_domain_with_different_port(self):
"""
A POST HTTPS request with a good referer should be accepted from a
subdomain that's allowed by CSRF_COOKIE_DOMAIN and a non-443 port.
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'https://foo.example.com:4443/'
req.META['SERVER_PORT'] = '4443'
response = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(response)
@override_settings(CSRF_COOKIE_DOMAIN='.example.com', DEBUG=True)
def test_https_reject_insecure_referer(self):
"""
A POST HTTPS request from an insecure referer should be rejected.
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_REFERER'] = 'http://example.com/'
req.META['SERVER_PORT'] = '443'
response = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertContains(
response,
'Referer checking failed - Referer is insecure while host is secure.',
status_code=403,
)
def test_ensures_csrf_cookie_no_middleware(self):
"""
The ensure_csrf_cookie() decorator works without middleware.
"""
@ensure_csrf_cookie
def view(request):
# Doesn't insert a token or anything
return HttpResponse(content="")
req = self._get_GET_no_csrf_cookie_request()
resp = view(req)
self.assertTrue(resp.cookies.get(settings.CSRF_COOKIE_NAME, False))
self.assertIn('Cookie', resp.get('Vary', ''))
def test_ensures_csrf_cookie_with_middleware(self):
"""
The ensure_csrf_cookie() decorator works with the CsrfViewMiddleware
enabled.
"""
@ensure_csrf_cookie
def view(request):
# Doesn't insert a token or anything
return HttpResponse(content="")
req = self._get_GET_no_csrf_cookie_request()
CsrfViewMiddleware().process_view(req, view, (), {})
resp = view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
self.assertTrue(resp2.cookies.get(settings.CSRF_COOKIE_NAME, False))
self.assertIn('Cookie', resp2.get('Vary', ''))
def test_ensures_csrf_cookie_no_logging(self):
"""
ensure_csrf_cookie() doesn't log warnings (#19436).
"""
@ensure_csrf_cookie
def view(request):
# Doesn't insert a token or anything
return HttpResponse(content="")
class TestHandler(logging.Handler):
def emit(self, record):
raise Exception("This shouldn't have happened!")
logger = logging.getLogger('django.request')
test_handler = TestHandler()
old_log_level = logger.level
try:
logger.addHandler(test_handler)
logger.setLevel(logging.WARNING)
req = self._get_GET_no_csrf_cookie_request()
view(req)
finally:
logger.removeHandler(test_handler)
logger.setLevel(old_log_level)
def test_csrf_cookie_age(self):
"""
CSRF cookie age can be set using settings.CSRF_COOKIE_AGE.
"""
req = self._get_GET_no_csrf_cookie_request()
MAX_AGE = 123
with self.settings(CSRF_COOKIE_NAME='csrfcookie',
CSRF_COOKIE_DOMAIN='.example.com',
CSRF_COOKIE_AGE=MAX_AGE,
CSRF_COOKIE_PATH='/test/',
CSRF_COOKIE_SECURE=True,
CSRF_COOKIE_HTTPONLY=True):
# token_view calls get_token() indirectly
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
max_age = resp2.cookies.get('csrfcookie').get('max-age')
self.assertEqual(max_age, MAX_AGE)
def test_csrf_cookie_age_none(self):
"""
CSRF cookie age does not have max age set and therefore uses
session-based cookies.
"""
req = self._get_GET_no_csrf_cookie_request()
MAX_AGE = None
with self.settings(CSRF_COOKIE_NAME='csrfcookie',
CSRF_COOKIE_DOMAIN='.example.com',
CSRF_COOKIE_AGE=MAX_AGE,
CSRF_COOKIE_PATH='/test/',
CSRF_COOKIE_SECURE=True,
CSRF_COOKIE_HTTPONLY=True):
# token_view calls get_token() indirectly
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
max_age = resp2.cookies.get('csrfcookie').get('max-age')
self.assertEqual(max_age, '')
def test_post_data_read_failure(self):
"""
#20128 -- IOErrors during POST data reading should be caught and
treated as if the POST data wasn't there.
"""
class CsrfPostRequest(HttpRequest):
"""
HttpRequest that can raise an IOError when accessing POST data
"""
def __init__(self, token, raise_error):
super(CsrfPostRequest, self).__init__()
self.method = 'POST'
self.raise_error = False
self.COOKIES[settings.CSRF_COOKIE_NAME] = token
self.POST['csrfmiddlewaretoken'] = token
self.raise_error = raise_error
def _load_post_and_files(self):
raise IOError('error reading input data')
def _get_post(self):
if self.raise_error:
self._load_post_and_files()
return self._post
def _set_post(self, post):
self._post = post
POST = property(_get_post, _set_post)
token = 'ABC'
req = CsrfPostRequest(token, raise_error=False)
resp = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(resp)
req = CsrfPostRequest(token, raise_error=True)
resp = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(resp.status_code, 403)
| bsd-3-clause | 2,972,456,851,825,409,500 | 39.346416 | 107 | 0.607833 | false |
wizyoung/workflows.kyoyue | PIL/ImageWin.py | 9 | 7167 | #
# The Python Imaging Library.
# $Id$
#
# a Windows DIB display interface
#
# History:
# 1996-05-20 fl Created
# 1996-09-20 fl Fixed subregion exposure
# 1997-09-21 fl Added draw primitive (for tzPrint)
# 2003-05-21 fl Added experimental Window/ImageWindow classes
# 2003-09-05 fl Added fromstring/tostring methods
#
# Copyright (c) Secret Labs AB 1997-2003.
# Copyright (c) Fredrik Lundh 1996-2003.
#
# See the README file for information on usage and redistribution.
#
from . import Image
class HDC(object):
"""
Wraps an HDC integer. The resulting object can be passed to the
:py:meth:`~PIL.ImageWin.Dib.draw` and :py:meth:`~PIL.ImageWin.Dib.expose`
methods.
"""
def __init__(self, dc):
self.dc = dc
def __int__(self):
return self.dc
class HWND(object):
"""
Wraps an HWND integer. The resulting object can be passed to the
:py:meth:`~PIL.ImageWin.Dib.draw` and :py:meth:`~PIL.ImageWin.Dib.expose`
methods, instead of a DC.
"""
def __init__(self, wnd):
self.wnd = wnd
def __int__(self):
return self.wnd
class Dib(object):
"""
A Windows bitmap with the given mode and size. The mode can be one of "1",
"L", "P", or "RGB".
If the display requires a palette, this constructor creates a suitable
palette and associates it with the image. For an "L" image, 128 greylevels
are allocated. For an "RGB" image, a 6x6x6 colour cube is used, together
with 20 greylevels.
To make sure that palettes work properly under Windows, you must call the
**palette** method upon certain events from Windows.
:param image: Either a PIL image, or a mode string. If a mode string is
used, a size must also be given. The mode can be one of "1",
"L", "P", or "RGB".
:param size: If the first argument is a mode string, this
defines the size of the image.
"""
def __init__(self, image, size=None):
if hasattr(image, "mode") and hasattr(image, "size"):
mode = image.mode
size = image.size
else:
mode = image
image = None
if mode not in ["1", "L", "P", "RGB"]:
mode = Image.getmodebase(mode)
self.image = Image.core.display(mode, size)
self.mode = mode
self.size = size
if image:
self.paste(image)
def expose(self, handle):
"""
Copy the bitmap contents to a device context.
:param handle: Device context (HDC), cast to a Python integer, or an
HDC or HWND instance. In PythonWin, you can use the
:py:meth:`CDC.GetHandleAttrib` to get a suitable handle.
"""
if isinstance(handle, HWND):
dc = self.image.getdc(handle)
try:
result = self.image.expose(dc)
finally:
self.image.releasedc(handle, dc)
else:
result = self.image.expose(handle)
return result
def draw(self, handle, dst, src=None):
"""
Same as expose, but allows you to specify where to draw the image, and
what part of it to draw.
The destination and source areas are given as 4-tuple rectangles. If
the source is omitted, the entire image is copied. If the source and
the destination have different sizes, the image is resized as
necessary.
"""
if not src:
src = (0, 0) + self.size
if isinstance(handle, HWND):
dc = self.image.getdc(handle)
try:
result = self.image.draw(dc, dst, src)
finally:
self.image.releasedc(handle, dc)
else:
result = self.image.draw(handle, dst, src)
return result
def query_palette(self, handle):
"""
Installs the palette associated with the image in the given device
context.
This method should be called upon **QUERYNEWPALETTE** and
**PALETTECHANGED** events from Windows. If this method returns a
non-zero value, one or more display palette entries were changed, and
the image should be redrawn.
:param handle: Device context (HDC), cast to a Python integer, or an
HDC or HWND instance.
:return: A true value if one or more entries were changed (this
indicates that the image should be redrawn).
"""
if isinstance(handle, HWND):
handle = self.image.getdc(handle)
try:
result = self.image.query_palette(handle)
finally:
self.image.releasedc(handle, handle)
else:
result = self.image.query_palette(handle)
return result
def paste(self, im, box=None):
"""
Paste a PIL image into the bitmap image.
:param im: A PIL image. The size must match the target region.
If the mode does not match, the image is converted to the
mode of the bitmap image.
:param box: A 4-tuple defining the left, upper, right, and
lower pixel coordinate. If None is given instead of a
tuple, all of the image is assumed.
"""
im.load()
if self.mode != im.mode:
im = im.convert(self.mode)
if box:
self.image.paste(im.im, box)
else:
self.image.paste(im.im)
def frombytes(self, buffer):
"""
Load display memory contents from byte data.
:param buffer: A buffer containing display data (usually
data returned from <b>tobytes</b>)
"""
return self.image.frombytes(buffer)
def tobytes(self):
"""
Copy display memory contents to bytes object.
:return: A bytes object containing display data.
"""
return self.image.tobytes()
class Window(object):
"""Create a Window with the given title size."""
def __init__(self, title="PIL", width=None, height=None):
self.hwnd = Image.core.createwindow(
title, self.__dispatcher, width or 0, height or 0
)
def __dispatcher(self, action, *args):
return getattr(self, "ui_handle_" + action)(*args)
def ui_handle_clear(self, dc, x0, y0, x1, y1):
pass
def ui_handle_damage(self, x0, y0, x1, y1):
pass
def ui_handle_destroy(self):
pass
def ui_handle_repair(self, dc, x0, y0, x1, y1):
pass
def ui_handle_resize(self, width, height):
pass
def mainloop(self):
Image.core.eventloop()
class ImageWindow(Window):
"""Create an image window which displays the given image."""
def __init__(self, image, title="PIL"):
if not isinstance(image, Dib):
image = Dib(image)
self.image = image
width, height = image.size
Window.__init__(self, title, width=width, height=height)
def ui_handle_repair(self, dc, x0, y0, x1, y1):
self.image.draw(dc, (x0, y0, x1, y1))
| mit | -5,730,389,802,643,529,000 | 30.572687 | 79 | 0.580578 | false |
detiber/openshift-ansible-contrib | reference-architecture/aws-ansible/add-cns-storage.py | 1 | 13272 | #!/usr/bin/env python
# vim: sw=2 ts=2
import click
import os
import sys
@click.command()
### Cluster options
@click.option('--console-port', default='443', type=click.IntRange(1,65535), help='OpenShift web console port',
show_default=True)
@click.option('--deployment-type', default='openshift-enterprise', help='OpenShift deployment type',
show_default=True)
@click.option('--openshift-sdn', default='openshift-ovs-subnet', type=click.Choice(['openshift-ovs-subnet', 'openshift-ovs-multitenant']), help='OpenShift SDN',
show_default=True)
### AWS/EC2 options
@click.option('--gluster-stack', help='Specify a gluster stack name. Making the name unique will allow for multiple deployments',
show_default=True)
@click.option('--region', default='us-east-1', help='ec2 region',
show_default=True)
@click.option('--ami', default='ami-10251c7a', help='ec2 ami',
show_default=True)
@click.option('--node-instance-type', default='m4.2xlarge', help='ec2 instance type',
show_default=True)
@click.option('--use-cloudformation-facts', is_flag=True, help='Use cloudformation to populate facts. Requires Deployment >= OCP 3.5',
show_default=True)
@click.option('--keypair', help='ec2 keypair name',
show_default=True)
@click.option('--private-subnet-id1', help='Specify a Private subnet within the existing VPC',
show_default=True)
@click.option('--private-subnet-id2', help='Specify a Private subnet within the existing VPC',
show_default=True)
@click.option('--private-subnet-id3', help='Specify a Private subnet within the existing VPC',
show_default=True)
@click.option('--gluster-volume-size', default='500', help='Gluster volume size in GB',
show_default=True)
@click.option('--gluster-volume-type', default='st1', help='Gluster volume type',
show_default=True)
@click.option('--iops', help='Specfify the IOPS for a volume (used only with IO1)',
show_default=True)
### DNS options
@click.option('--public-hosted-zone', help='hosted zone for accessing the environment')
### Subscription and Software options
@click.option('--rhsm-user', help='Red Hat Subscription Management User')
@click.option('--rhsm-password', help='Red Hat Subscription Management Password',
hide_input=True,)
@click.option('--rhsm-pool', help='Red Hat Subscription Management Pool Name')
### Miscellaneous options
@click.option('--containerized', default='False', help='Containerized installation of OpenShift',
show_default=True)
@click.option('--iam-role', help='Specify the name of the existing IAM Instance profile',
show_default=True)
@click.option('--node-sg', help='Specify the already existing node security group id',
show_default=True)
@click.option('--existing-stack', help='Specify the name of the existing CloudFormation stack')
@click.option('--no-confirm', is_flag=True,
help='Skip confirmation prompt')
@click.help_option('--help', '-h')
@click.option('-v', '--verbose', count=True)
def launch_refarch_env(region=None,
ami=None,
no_confirm=False,
node_instance_type=None,
gluster_stack=None,
keypair=None,
public_hosted_zone=None,
deployment_type=None,
console_port=443,
rhsm_user=None,
rhsm_password=None,
rhsm_pool=None,
containerized=None,
node_type=None,
private_subnet_id1=None,
private_subnet_id2=None,
private_subnet_id3=None,
gluster_volume_type=None,
gluster_volume_size=None,
openshift_sdn=None,
iops=None,
node_sg=None,
iam_role=None,
existing_stack=None,
use_cloudformation_facts=False,
verbose=0):
# Need to prompt for the R53 zone:
if public_hosted_zone is None:
public_hosted_zone = click.prompt('Hosted DNS zone for accessing the environment')
if existing_stack is None:
existing_stack = click.prompt('Specify the name of the existing CloudFormation stack')
if gluster_stack is None:
gluster_stack = click.prompt('Specify a unique name for the CNS CloudFormation stack')
# If no keypair is specified fail:
if keypair is None:
keypair = click.prompt('A SSH keypair must be specified or created')
# If the user already provided values, don't bother asking again
if deployment_type in ['openshift-enterprise'] and rhsm_user is None:
rhsm_user = click.prompt("RHSM username?")
if deployment_type in ['openshift-enterprise'] and rhsm_password is None:
rhsm_password = click.prompt("RHSM password?", hide_input=True)
if deployment_type in ['openshift-enterprise'] and rhsm_pool is None:
rhsm_pool = click.prompt("RHSM Pool ID or Subscription Name for OpenShift?")
# Prompt for vars if they are not defined
if use_cloudformation_facts and iam_role is None:
iam_role = "Computed by Cloudformations"
elif iam_role is None:
iam_role = click.prompt("Specify the IAM Role of the node?")
if use_cloudformation_facts and node_sg is None:
node_sg = "Computed by Cloudformations"
elif node_sg is None:
node_sg = click.prompt("Specify the Security Group for the nodes?")
if use_cloudformation_facts and private_subnet_id1 is None:
private_subnet_id1 = "Computed by Cloudformations"
elif private_subnet_id1 is None:
private_subnet_id1 = click.prompt("Specify the first private subnet for the nodes?")
if use_cloudformation_facts and private_subnet_id2 is None:
private_subnet_id2 = "Computed by Cloudformations"
elif private_subnet_id2 is None:
private_subnet_id2 = click.prompt("Specify the second private subnet for the nodes?")
if use_cloudformation_facts and private_subnet_id3 is None:
private_subnet_id3 = "Computed by Cloudformations"
elif private_subnet_id3 is None:
private_subnet_id3 = click.prompt("Specify the third private subnet for the nodes?")
if gluster_volume_type in ['io1']:
iops = click.prompt('Specify a numeric value for iops')
if iops is None:
iops = "NA"
# Hidden facts for infrastructure.yaml
create_key = "no"
create_vpc = "no"
add_node = "yes"
node_type = "gluster"
# Display information to the user about their choices
if use_cloudformation_facts:
click.echo('Configured values:')
click.echo('\tami: %s' % ami)
click.echo('\tregion: %s' % region)
click.echo('\tgluster_stack: %s' % gluster_stack)
click.echo('\tnode_instance_type: %s' % node_instance_type)
click.echo('\tgluster_volume_type: %s' % gluster_volume_type)
click.echo('\tgluster_volume_size: %s' % gluster_volume_size)
click.echo('\tiops: %s' % iops)
click.echo('\topenshift_sdn: %s' % openshift_sdn)
click.echo('\tkeypair: %s' % keypair)
click.echo('\tdeployment_type: %s' % deployment_type)
click.echo('\tpublic_hosted_zone: %s' % public_hosted_zone)
click.echo('\tconsole port: %s' % console_port)
click.echo('\trhsm_user: %s' % rhsm_user)
click.echo('\trhsm_password: *******')
click.echo('\trhsm_pool: %s' % rhsm_pool)
click.echo('\tcontainerized: %s' % containerized)
click.echo('\texisting_stack: %s' % existing_stack)
click.echo('\tSubnets, Security Groups, and IAM Roles will be gather from the CloudFormation')
click.echo("")
else:
click.echo('Configured values:')
click.echo('\tami: %s' % ami)
click.echo('\tregion: %s' % region)
click.echo('\tgluster_stack: %s' % gluster_stack)
click.echo('\tnode_instance_type: %s' % node_instance_type)
click.echo('\tprivate_subnet_id1: %s' % private_subnet_id1)
click.echo('\tprivate_subnet_id2: %s' % private_subnet_id2)
click.echo('\tprivate_subnet_id3: %s' % private_subnet_id3)
click.echo('\tgluster_volume_type: %s' % gluster_volume_type)
click.echo('\tgluster_volume_size: %s' % gluster_volume_size)
click.echo('\tiops: %s' % iops)
click.echo('\openshift_sdn: %s' % openshift_sdn)
click.echo('\tkeypair: %s' % keypair)
click.echo('\tkeypair: %s' % keypair)
click.echo('\tnode_sg: %s' % node_sg)
click.echo('\tdeployment_type: %s' % deployment_type)
click.echo('\tpublic_hosted_zone: %s' % public_hosted_zone)
click.echo('\tconsole port: %s' % console_port)
click.echo('\trhsm_user: %s' % rhsm_user)
click.echo('\trhsm_password: *******')
click.echo('\trhsm_pool: %s' % rhsm_pool)
click.echo('\tcontainerized: %s' % containerized)
click.echo('\tiam_role: %s' % iam_role)
click.echo('\texisting_stack: %s' % existing_stack)
click.echo("")
if not no_confirm:
click.confirm('Continue using these values?', abort=True)
playbooks = ['playbooks/infrastructure.yaml', 'playbooks/add-node.yaml']
for playbook in playbooks:
# hide cache output unless in verbose mode
devnull='> /dev/null'
if verbose > 0:
devnull=''
# refresh the inventory cache to prevent stale hosts from
# interferring with re-running
command='inventory/aws/hosts/ec2.py --refresh-cache %s' % (devnull)
os.system(command)
# remove any cached facts to prevent stale data during a re-run
command='rm -rf .ansible/cached_facts'
os.system(command)
if use_cloudformation_facts:
command='ansible-playbook -i inventory/aws/hosts -e \'region=%s \
ami=%s \
keypair=%s \
gluster_stack=%s \
add_node=yes \
node_instance_type=%s \
public_hosted_zone=%s \
deployment_type=%s \
console_port=%s \
rhsm_user=%s \
rhsm_password=%s \
rhsm_pool="%s" \
containerized=%s \
node_type=gluster \
key_path=/dev/null \
create_key=%s \
create_vpc=%s \
gluster_volume_type=%s \
gluster_volume_size=%s \
iops=%s \
openshift_sdn=%s \
stack_name=%s \' %s' % (region,
ami,
keypair,
gluster_stack,
node_instance_type,
public_hosted_zone,
deployment_type,
console_port,
rhsm_user,
rhsm_password,
rhsm_pool,
containerized,
create_key,
create_vpc,
gluster_volume_type,
gluster_volume_size,
iops,
openshift_sdn,
existing_stack,
playbook)
else:
command='ansible-playbook -i inventory/aws/hosts -e \'region=%s \
ami=%s \
keypair=%s \
gluster_stack=%s \
add_node=yes \
node_sg=%s \
node_instance_type=%s \
private_subnet_id1=%s \
private_subnet_id2=%s \
private_subnet_id3=%s \
public_hosted_zone=%s \
deployment_type=%s \
console_port=%s \
rhsm_user=%s \
rhsm_password=%s \
rhsm_pool="%s" \
containerized=%s \
node_type=gluster \
iam_role=%s \
key_path=/dev/null \
create_key=%s \
create_vpc=%s \
gluster_volume_type=%s \
gluster_volume_size=%s \
iops=%s \
openshift_sdn=%s \
stack_name=%s \' %s' % (region,
ami,
keypair,
gluster_stack,
node_sg,
node_instance_type,
private_subnet_id1,
private_subnet_id2,
private_subnet_id3,
public_hosted_zone,
deployment_type,
console_port,
rhsm_user,
rhsm_password,
rhsm_pool,
containerized,
iam_role,
create_key,
create_vpc,
gluster_volume_type,
gluster_volume_size,
iops,
openshift_sdn,
existing_stack,
playbook)
if verbose > 0:
command += " -" + "".join(['v']*verbose)
click.echo('We are running: %s' % command)
status = os.system(command)
if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0:
return os.WEXITSTATUS(status)
if __name__ == '__main__':
# check for AWS access info
if os.getenv('AWS_ACCESS_KEY_ID') is None or os.getenv('AWS_SECRET_ACCESS_KEY') is None:
print 'AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY **MUST** be exported as environment variables.'
sys.exit(1)
launch_refarch_env(auto_envvar_prefix='OSE_REFArch')
| apache-2.0 | 129,010,092,267,740,850 | 38.855856 | 161 | 0.588532 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-eventhub/azure/mgmt/eventhub/models/__init__.py | 2 | 2621 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .tracked_resource import TrackedResource
from .resource import Resource
from .sku import Sku
from .eh_namespace import EHNamespace
from .authorization_rule import AuthorizationRule
from .access_keys import AccessKeys
from .regenerate_access_key_parameters import RegenerateAccessKeyParameters
from .destination import Destination
from .capture_description import CaptureDescription
from .eventhub import Eventhub
from .consumer_group import ConsumerGroup
from .check_name_availability_parameter import CheckNameAvailabilityParameter
from .check_name_availability_result import CheckNameAvailabilityResult
from .operation_display import OperationDisplay
from .operation import Operation
from .error_response import ErrorResponse, ErrorResponseException
from .arm_disaster_recovery import ArmDisasterRecovery
from .operation_paged import OperationPaged
from .eh_namespace_paged import EHNamespacePaged
from .authorization_rule_paged import AuthorizationRulePaged
from .arm_disaster_recovery_paged import ArmDisasterRecoveryPaged
from .eventhub_paged import EventhubPaged
from .consumer_group_paged import ConsumerGroupPaged
from .event_hub_management_client_enums import (
SkuName,
SkuTier,
AccessRights,
KeyType,
EntityStatus,
EncodingCaptureDescription,
UnavailableReason,
ProvisioningStateDR,
RoleDisasterRecovery,
)
__all__ = [
'TrackedResource',
'Resource',
'Sku',
'EHNamespace',
'AuthorizationRule',
'AccessKeys',
'RegenerateAccessKeyParameters',
'Destination',
'CaptureDescription',
'Eventhub',
'ConsumerGroup',
'CheckNameAvailabilityParameter',
'CheckNameAvailabilityResult',
'OperationDisplay',
'Operation',
'ErrorResponse', 'ErrorResponseException',
'ArmDisasterRecovery',
'OperationPaged',
'EHNamespacePaged',
'AuthorizationRulePaged',
'ArmDisasterRecoveryPaged',
'EventhubPaged',
'ConsumerGroupPaged',
'SkuName',
'SkuTier',
'AccessRights',
'KeyType',
'EntityStatus',
'EncodingCaptureDescription',
'UnavailableReason',
'ProvisioningStateDR',
'RoleDisasterRecovery',
]
| mit | -387,128,429,407,252,160 | 31.7625 | 77 | 0.727966 | false |
mileswwatkins/pupa | pupa/scrape/schemas/bill.py | 2 | 4736 | """
Schema for bill objects.
"""
from .common import sources, extras, fuzzy_date_blank, fuzzy_date
from opencivicdata import common
versions_or_documents = {
"items": {
"properties": {
"note": {"type": "string"},
"date": fuzzy_date_blank,
"links": {
"items": {
"properties": {
"media_type": {"type": "string", "blank": True },
"url": {"type": "string", "format": "uri"}
},
"type": "object"
},
"type": "array",
},
},
"type": "object"
},
"type": "array",
}
schema = {
"type": "object",
"properties": {
"legislative_session": {"type": "string"},
"identifier": {"type": "string"},
"title": {"type": "string"},
"from_organization": { "type": ["string", "null"] },
"classification": {"items": {"type": "string", "enum": common.BILL_CLASSIFICATIONS},
"type": "array"},
"subject": { "items": {"type": "string"}, "type": "array"},
"abstracts": {
"items": {
"properties": {
"abstract": {"type": "string"},
"note": {"type": "string", "blank": True},
"date": {"type": "string", "blank": True},
},
"type": "object"},
"type": "array",
},
"other_titles": {
"items": {
"properties": {
"title": {"type": "string"},
"note": {"type": "string", "blank": True},
},
"type": "object"
},
"type": "array",
},
"other_identifiers": {
"items": {
"properties": {
"identifier": {"type": "string"},
"note": {"type": "string", "blank": True},
"scheme": {"type": "string", "blank": True},
},
"type": "object"
},
"type": "array",
},
"actions": {
"items": {
"properties": {
"organization": { "type": ["string", "null"] },
"date": fuzzy_date,
"description": { "type": "string" },
"classification": {"items": {"type": "string",
"enum": common.BILL_ACTION_CLASSIFICATIONS },
"type": "array",
},
"related_entities": {
"items": {
"properties": {
"name": {"type": "string"},
"entity_type": {
"enum": ["organization", "person", ""],
"type": "string", "blank": True,
},
"person_id": {"type": ["string", "null"]},
"organization_id": {"type": ["string", "null"]},
},
"type": "object"
},
"type": "array",
},
},
"type": "object"
},
"type": "array",
},
"sponsorships": {
"items": {
"properties": {
"primary": { "type": "boolean" },
"classification": { "type": "string", },
"name": {"type": "string" },
"entity_type": {
"enum": ["organization", "person", ""],
"type": "string", "blank": True,
},
"person_id": {"type": ["string", "null"] },
"organization_id": {"type": ["string", "null"] },
},
"type": "object"
},
"type": "array",
},
"related_bills": {
"items": {
"properties": {
"identifier": {"type": "string"},
"legislative_session": {"type": "string"},
"relation_type": {"enum": common.BILL_RELATION_TYPES, "type": "string"},
},
"type": "object"
},
"type": "array",
},
"versions": versions_or_documents,
"documents": versions_or_documents,
"sources": sources,
"extras": extras,
}
}
| bsd-3-clause | -4,360,110,510,837,721,600 | 33.823529 | 95 | 0.330448 | false |
EmmanuelJohnson/ssquiz | flask/lib/python2.7/site-packages/whoosh/query/__init__.py | 96 | 1843 | # Copyright 2012 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from whoosh.query.qcore import *
from whoosh.query.terms import *
from whoosh.query.compound import *
from whoosh.query.positional import *
from whoosh.query.ranges import *
from whoosh.query.wrappers import *
from whoosh.query.nested import *
from whoosh.query.qcolumns import *
from whoosh.query.spans import *
| bsd-3-clause | 6,773,817,968,131,685,000 | 50.194444 | 78 | 0.781877 | false |
robhudson/django | tests/template_tests/filter_tests/test_escapejs.py | 324 | 2055 | from __future__ import unicode_literals
from django.template.defaultfilters import escapejs_filter
from django.test import SimpleTestCase
from ..utils import setup
class EscapejsTests(SimpleTestCase):
@setup({'escapejs01': '{{ a|escapejs }}'})
def test_escapejs01(self):
output = self.engine.render_to_string('escapejs01', {'a': 'testing\r\njavascript \'string" <b>escaping</b>'})
self.assertEqual(output, 'testing\\u000D\\u000Ajavascript '
'\\u0027string\\u0022 \\u003Cb\\u003E'
'escaping\\u003C/b\\u003E')
@setup({'escapejs02': '{% autoescape off %}{{ a|escapejs }}{% endautoescape %}'})
def test_escapejs02(self):
output = self.engine.render_to_string('escapejs02', {'a': 'testing\r\njavascript \'string" <b>escaping</b>'})
self.assertEqual(output, 'testing\\u000D\\u000Ajavascript '
'\\u0027string\\u0022 \\u003Cb\\u003E'
'escaping\\u003C/b\\u003E')
class FunctionTests(SimpleTestCase):
def test_quotes(self):
self.assertEqual(
escapejs_filter('"double quotes" and \'single quotes\''),
'\\u0022double quotes\\u0022 and \\u0027single quotes\\u0027',
)
def test_backslashes(self):
self.assertEqual(escapejs_filter(r'\ : backslashes, too'), '\\u005C : backslashes, too')
def test_whitespace(self):
self.assertEqual(
escapejs_filter('and lots of whitespace: \r\n\t\v\f\b'),
'and lots of whitespace: \\u000D\\u000A\\u0009\\u000B\\u000C\\u0008',
)
def test_script(self):
self.assertEqual(
escapejs_filter(r'<script>and this</script>'),
'\\u003Cscript\\u003Eand this\\u003C/script\\u003E',
)
def test_paragraph_separator(self):
self.assertEqual(
escapejs_filter('paragraph separator:\u2029and line separator:\u2028'),
'paragraph separator:\\u2029and line separator:\\u2028',
)
| bsd-3-clause | 2,259,466,267,092,058,600 | 37.773585 | 117 | 0.601946 | false |
0111001101111010/open-health-inspection-api | venv/lib/python2.7/site-packages/pip/commands/install.py | 342 | 12694 | import os
import sys
import tempfile
import shutil
from pip.req import InstallRequirement, RequirementSet, parse_requirements
from pip.log import logger
from pip.locations import (src_prefix, virtualenv_no_global, distutils_scheme,
build_prefix)
from pip.basecommand import Command
from pip.index import PackageFinder
from pip.exceptions import InstallationError, CommandError, PreviousBuildDirError
from pip import cmdoptions
class InstallCommand(Command):
"""
Install packages from:
- PyPI (and other indexes) using requirement specifiers.
- VCS project urls.
- Local project directories.
- Local or remote source archives.
pip also supports installing from "requirements files", which provide
an easy way to specify a whole environment to be installed.
"""
name = 'install'
usage = """
%prog [options] <requirement specifier> ...
%prog [options] -r <requirements file> ...
%prog [options] [-e] <vcs project url> ...
%prog [options] [-e] <local project path> ...
%prog [options] <archive url/path> ..."""
summary = 'Install packages.'
bundle = False
def __init__(self, *args, **kw):
super(InstallCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'-e', '--editable',
dest='editables',
action='append',
default=[],
metavar='path/url',
help='Install a project in editable mode (i.e. setuptools "develop mode") from a local project path or a VCS url.')
cmd_opts.add_option(cmdoptions.requirements.make())
cmd_opts.add_option(cmdoptions.build_dir.make())
cmd_opts.add_option(
'-t', '--target',
dest='target_dir',
metavar='dir',
default=None,
help='Install packages into <dir>.')
cmd_opts.add_option(
'-d', '--download', '--download-dir', '--download-directory',
dest='download_dir',
metavar='dir',
default=None,
help="Download packages into <dir> instead of installing them, regardless of what's already installed.")
cmd_opts.add_option(cmdoptions.download_cache.make())
cmd_opts.add_option(
'--src', '--source', '--source-dir', '--source-directory',
dest='src_dir',
metavar='dir',
default=src_prefix,
help='Directory to check out editable projects into. '
'The default in a virtualenv is "<venv path>/src". '
'The default for global installs is "<current dir>/src".')
cmd_opts.add_option(
'-U', '--upgrade',
dest='upgrade',
action='store_true',
help='Upgrade all packages to the newest available version. '
'This process is recursive regardless of whether a dependency is already satisfied.')
cmd_opts.add_option(
'--force-reinstall',
dest='force_reinstall',
action='store_true',
help='When upgrading, reinstall all packages even if they are '
'already up-to-date.')
cmd_opts.add_option(
'-I', '--ignore-installed',
dest='ignore_installed',
action='store_true',
help='Ignore the installed packages (reinstalling instead).')
cmd_opts.add_option(cmdoptions.no_deps.make())
cmd_opts.add_option(
'--no-install',
dest='no_install',
action='store_true',
help="DEPRECATED. Download and unpack all packages, but don't actually install them.")
cmd_opts.add_option(
'--no-download',
dest='no_download',
action="store_true",
help="DEPRECATED. Don't download any packages, just install the ones already downloaded "
"(completes an install run with --no-install).")
cmd_opts.add_option(cmdoptions.install_options.make())
cmd_opts.add_option(cmdoptions.global_options.make())
cmd_opts.add_option(
'--user',
dest='use_user_site',
action='store_true',
help='Install using the user scheme.')
cmd_opts.add_option(
'--egg',
dest='as_egg',
action='store_true',
help="Install packages as eggs, not 'flat', like pip normally does. This option is not about installing *from* eggs. (WARNING: Because this option overrides pip's normal install logic, requirements files may not behave as expected.)")
cmd_opts.add_option(
'--root',
dest='root_path',
metavar='dir',
default=None,
help="Install everything relative to this alternate root directory.")
cmd_opts.add_option(
"--compile",
action="store_true",
dest="compile",
default=True,
help="Compile py files to pyc",
)
cmd_opts.add_option(
"--no-compile",
action="store_false",
dest="compile",
help="Do not compile py files to pyc",
)
cmd_opts.add_option(cmdoptions.use_wheel.make())
cmd_opts.add_option(cmdoptions.no_use_wheel.make())
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help="Include pre-release and development versions. By default, pip only finds stable versions.")
cmd_opts.add_option(cmdoptions.no_clean.make())
index_opts = cmdoptions.make_option_group(cmdoptions.index_group, self.parser)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def _build_package_finder(self, options, index_urls, session):
"""
Create a package finder appropriate to this install command.
This method is meant to be overridden by subclasses, not
called directly.
"""
return PackageFinder(find_links=options.find_links,
index_urls=index_urls,
use_wheel=options.use_wheel,
allow_external=options.allow_external,
allow_unverified=options.allow_unverified,
allow_all_external=options.allow_all_external,
allow_all_prereleases=options.pre,
process_dependency_links=
options.process_dependency_links,
session=session,
)
def run(self, options, args):
if (
options.no_install or
options.no_download or
(options.build_dir != build_prefix) or
options.no_clean
):
logger.deprecated('1.7', 'DEPRECATION: --no-install, --no-download, --build, '
'and --no-clean are deprecated. See https://github.com/pypa/pip/issues/906.')
if options.download_dir:
options.no_install = True
options.ignore_installed = True
options.build_dir = os.path.abspath(options.build_dir)
options.src_dir = os.path.abspath(options.src_dir)
install_options = options.install_options or []
if options.use_user_site:
if virtualenv_no_global():
raise InstallationError("Can not perform a '--user' install. User site-packages are not visible in this virtualenv.")
install_options.append('--user')
temp_target_dir = None
if options.target_dir:
options.ignore_installed = True
temp_target_dir = tempfile.mkdtemp()
options.target_dir = os.path.abspath(options.target_dir)
if os.path.exists(options.target_dir) and not os.path.isdir(options.target_dir):
raise CommandError("Target path exists but is not a directory, will not continue.")
install_options.append('--home=' + temp_target_dir)
global_options = options.global_options or []
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.notify('Ignoring indexes: %s' % ','.join(index_urls))
index_urls = []
if options.use_mirrors:
logger.deprecated("1.7",
"--use-mirrors has been deprecated and will be removed"
" in the future. Explicit uses of --index-url and/or "
"--extra-index-url is suggested.")
if options.mirrors:
logger.deprecated("1.7",
"--mirrors has been deprecated and will be removed in "
" the future. Explicit uses of --index-url and/or "
"--extra-index-url is suggested.")
index_urls += options.mirrors
session = self._build_session(options)
finder = self._build_package_finder(options, index_urls, session)
requirement_set = RequirementSet(
build_dir=options.build_dir,
src_dir=options.src_dir,
download_dir=options.download_dir,
download_cache=options.download_cache,
upgrade=options.upgrade,
as_egg=options.as_egg,
ignore_installed=options.ignore_installed,
ignore_dependencies=options.ignore_dependencies,
force_reinstall=options.force_reinstall,
use_user_site=options.use_user_site,
target_dir=temp_target_dir,
session=session,
pycompile=options.compile,
)
for name in args:
requirement_set.add_requirement(
InstallRequirement.from_line(name, None))
for name in options.editables:
requirement_set.add_requirement(
InstallRequirement.from_editable(name, default_vcs=options.default_vcs))
for filename in options.requirements:
for req in parse_requirements(filename, finder=finder, options=options, session=session):
requirement_set.add_requirement(req)
if not requirement_set.has_requirements:
opts = {'name': self.name}
if options.find_links:
msg = ('You must give at least one requirement to %(name)s '
'(maybe you meant "pip %(name)s %(links)s"?)' %
dict(opts, links=' '.join(options.find_links)))
else:
msg = ('You must give at least one requirement '
'to %(name)s (see "pip help %(name)s")' % opts)
logger.warn(msg)
return
try:
if not options.no_download:
requirement_set.prepare_files(finder, force_root_egg_info=self.bundle, bundle=self.bundle)
else:
requirement_set.locate_files()
if not options.no_install and not self.bundle:
requirement_set.install(install_options, global_options, root=options.root_path)
installed = ' '.join([req.name for req in
requirement_set.successfully_installed])
if installed:
logger.notify('Successfully installed %s' % installed)
elif not self.bundle:
downloaded = ' '.join([req.name for req in
requirement_set.successfully_downloaded])
if downloaded:
logger.notify('Successfully downloaded %s' % downloaded)
elif self.bundle:
requirement_set.create_bundle(self.bundle_filename)
logger.notify('Created bundle in %s' % self.bundle_filename)
except PreviousBuildDirError:
options.no_clean = True
raise
finally:
# Clean up
if (not options.no_clean) and ((not options.no_install) or options.download_dir):
requirement_set.cleanup_files(bundle=self.bundle)
if options.target_dir:
if not os.path.exists(options.target_dir):
os.makedirs(options.target_dir)
lib_dir = distutils_scheme('', home=temp_target_dir)['purelib']
for item in os.listdir(lib_dir):
shutil.move(
os.path.join(lib_dir, item),
os.path.join(options.target_dir, item)
)
shutil.rmtree(temp_target_dir)
return requirement_set
| gpl-2.0 | 4,031,533,123,472,115,700 | 39.426752 | 246 | 0.565385 | false |
glwu/python-for-android | python3-alpha/python3-src/Lib/test/test_posixpath.py | 49 | 21964 | import unittest
from test import support, test_genericpath
import posixpath
import os
import sys
from posixpath import realpath, abspath, dirname, basename
try:
import posix
except ImportError:
posix = None
# An absolute path to a temporary filename for testing. We can't rely on TESTFN
# being an absolute path, so we need this.
ABSTFN = abspath(support.TESTFN)
def skip_if_ABSTFN_contains_backslash(test):
"""
On Windows, posixpath.abspath still returns paths with backslashes
instead of posix forward slashes. If this is the case, several tests
fail, so skip them.
"""
found_backslash = '\\' in ABSTFN
msg = "ABSTFN is not a posix path - tests fail"
return [test, unittest.skip(msg)(test)][found_backslash]
def safe_rmdir(dirname):
try:
os.rmdir(dirname)
except OSError:
pass
class PosixPathTest(unittest.TestCase):
def setUp(self):
self.tearDown()
def tearDown(self):
for suffix in ["", "1", "2"]:
support.unlink(support.TESTFN + suffix)
safe_rmdir(support.TESTFN + suffix)
def test_join(self):
self.assertEqual(posixpath.join("/foo", "bar", "/bar", "baz"),
"/bar/baz")
self.assertEqual(posixpath.join("/foo", "bar", "baz"), "/foo/bar/baz")
self.assertEqual(posixpath.join("/foo/", "bar/", "baz/"),
"/foo/bar/baz/")
self.assertEqual(posixpath.join(b"/foo", b"bar", b"/bar", b"baz"),
b"/bar/baz")
self.assertEqual(posixpath.join(b"/foo", b"bar", b"baz"),
b"/foo/bar/baz")
self.assertEqual(posixpath.join(b"/foo/", b"bar/", b"baz/"),
b"/foo/bar/baz/")
self.assertRaises(TypeError, posixpath.join, b"bytes", "str")
self.assertRaises(TypeError, posixpath.join, "str", b"bytes")
def test_split(self):
self.assertEqual(posixpath.split("/foo/bar"), ("/foo", "bar"))
self.assertEqual(posixpath.split("/"), ("/", ""))
self.assertEqual(posixpath.split("foo"), ("", "foo"))
self.assertEqual(posixpath.split("////foo"), ("////", "foo"))
self.assertEqual(posixpath.split("//foo//bar"), ("//foo", "bar"))
self.assertEqual(posixpath.split(b"/foo/bar"), (b"/foo", b"bar"))
self.assertEqual(posixpath.split(b"/"), (b"/", b""))
self.assertEqual(posixpath.split(b"foo"), (b"", b"foo"))
self.assertEqual(posixpath.split(b"////foo"), (b"////", b"foo"))
self.assertEqual(posixpath.split(b"//foo//bar"), (b"//foo", b"bar"))
def splitextTest(self, path, filename, ext):
self.assertEqual(posixpath.splitext(path), (filename, ext))
self.assertEqual(posixpath.splitext("/" + path), ("/" + filename, ext))
self.assertEqual(posixpath.splitext("abc/" + path),
("abc/" + filename, ext))
self.assertEqual(posixpath.splitext("abc.def/" + path),
("abc.def/" + filename, ext))
self.assertEqual(posixpath.splitext("/abc.def/" + path),
("/abc.def/" + filename, ext))
self.assertEqual(posixpath.splitext(path + "/"),
(filename + ext + "/", ""))
path = bytes(path, "ASCII")
filename = bytes(filename, "ASCII")
ext = bytes(ext, "ASCII")
self.assertEqual(posixpath.splitext(path), (filename, ext))
self.assertEqual(posixpath.splitext(b"/" + path),
(b"/" + filename, ext))
self.assertEqual(posixpath.splitext(b"abc/" + path),
(b"abc/" + filename, ext))
self.assertEqual(posixpath.splitext(b"abc.def/" + path),
(b"abc.def/" + filename, ext))
self.assertEqual(posixpath.splitext(b"/abc.def/" + path),
(b"/abc.def/" + filename, ext))
self.assertEqual(posixpath.splitext(path + b"/"),
(filename + ext + b"/", b""))
def test_splitext(self):
self.splitextTest("foo.bar", "foo", ".bar")
self.splitextTest("foo.boo.bar", "foo.boo", ".bar")
self.splitextTest("foo.boo.biff.bar", "foo.boo.biff", ".bar")
self.splitextTest(".csh.rc", ".csh", ".rc")
self.splitextTest("nodots", "nodots", "")
self.splitextTest(".cshrc", ".cshrc", "")
self.splitextTest("...manydots", "...manydots", "")
self.splitextTest("...manydots.ext", "...manydots", ".ext")
self.splitextTest(".", ".", "")
self.splitextTest("..", "..", "")
self.splitextTest("........", "........", "")
self.splitextTest("", "", "")
def test_isabs(self):
self.assertIs(posixpath.isabs(""), False)
self.assertIs(posixpath.isabs("/"), True)
self.assertIs(posixpath.isabs("/foo"), True)
self.assertIs(posixpath.isabs("/foo/bar"), True)
self.assertIs(posixpath.isabs("foo/bar"), False)
self.assertIs(posixpath.isabs(b""), False)
self.assertIs(posixpath.isabs(b"/"), True)
self.assertIs(posixpath.isabs(b"/foo"), True)
self.assertIs(posixpath.isabs(b"/foo/bar"), True)
self.assertIs(posixpath.isabs(b"foo/bar"), False)
def test_basename(self):
self.assertEqual(posixpath.basename("/foo/bar"), "bar")
self.assertEqual(posixpath.basename("/"), "")
self.assertEqual(posixpath.basename("foo"), "foo")
self.assertEqual(posixpath.basename("////foo"), "foo")
self.assertEqual(posixpath.basename("//foo//bar"), "bar")
self.assertEqual(posixpath.basename(b"/foo/bar"), b"bar")
self.assertEqual(posixpath.basename(b"/"), b"")
self.assertEqual(posixpath.basename(b"foo"), b"foo")
self.assertEqual(posixpath.basename(b"////foo"), b"foo")
self.assertEqual(posixpath.basename(b"//foo//bar"), b"bar")
def test_dirname(self):
self.assertEqual(posixpath.dirname("/foo/bar"), "/foo")
self.assertEqual(posixpath.dirname("/"), "/")
self.assertEqual(posixpath.dirname("foo"), "")
self.assertEqual(posixpath.dirname("////foo"), "////")
self.assertEqual(posixpath.dirname("//foo//bar"), "//foo")
self.assertEqual(posixpath.dirname(b"/foo/bar"), b"/foo")
self.assertEqual(posixpath.dirname(b"/"), b"/")
self.assertEqual(posixpath.dirname(b"foo"), b"")
self.assertEqual(posixpath.dirname(b"////foo"), b"////")
self.assertEqual(posixpath.dirname(b"//foo//bar"), b"//foo")
def test_islink(self):
self.assertIs(posixpath.islink(support.TESTFN + "1"), False)
self.assertIs(posixpath.lexists(support.TESTFN + "2"), False)
f = open(support.TESTFN + "1", "wb")
try:
f.write(b"foo")
f.close()
self.assertIs(posixpath.islink(support.TESTFN + "1"), False)
if support.can_symlink():
os.symlink(support.TESTFN + "1", support.TESTFN + "2")
self.assertIs(posixpath.islink(support.TESTFN + "2"), True)
os.remove(support.TESTFN + "1")
self.assertIs(posixpath.islink(support.TESTFN + "2"), True)
self.assertIs(posixpath.exists(support.TESTFN + "2"), False)
self.assertIs(posixpath.lexists(support.TESTFN + "2"), True)
finally:
if not f.close():
f.close()
@staticmethod
def _create_file(filename):
with open(filename, 'wb') as f:
f.write(b'foo')
def test_samefile(self):
test_fn = support.TESTFN + "1"
self._create_file(test_fn)
self.assertTrue(posixpath.samefile(test_fn, test_fn))
self.assertRaises(TypeError, posixpath.samefile)
@unittest.skipIf(
sys.platform.startswith('win'),
"posixpath.samefile does not work on links in Windows")
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
def test_samefile_on_links(self):
test_fn1 = support.TESTFN + "1"
test_fn2 = support.TESTFN + "2"
self._create_file(test_fn1)
os.symlink(test_fn1, test_fn2)
self.assertTrue(posixpath.samefile(test_fn1, test_fn2))
os.remove(test_fn2)
self._create_file(test_fn2)
self.assertFalse(posixpath.samefile(test_fn1, test_fn2))
def test_samestat(self):
test_fn = support.TESTFN + "1"
self._create_file(test_fn)
test_fns = [test_fn]*2
stats = map(os.stat, test_fns)
self.assertTrue(posixpath.samestat(*stats))
@unittest.skipIf(
sys.platform.startswith('win'),
"posixpath.samestat does not work on links in Windows")
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
def test_samestat_on_links(self):
test_fn1 = support.TESTFN + "1"
test_fn2 = support.TESTFN + "2"
self._create_file(test_fn1)
test_fns = (test_fn1, test_fn2)
os.symlink(*test_fns)
stats = map(os.stat, test_fns)
self.assertTrue(posixpath.samestat(*stats))
os.remove(test_fn2)
self._create_file(test_fn2)
stats = map(os.stat, test_fns)
self.assertFalse(posixpath.samestat(*stats))
self.assertRaises(TypeError, posixpath.samestat)
def test_ismount(self):
self.assertIs(posixpath.ismount("/"), True)
self.assertIs(posixpath.ismount(b"/"), True)
def test_ismount_non_existent(self):
# Non-existent mountpoint.
self.assertIs(posixpath.ismount(ABSTFN), False)
try:
os.mkdir(ABSTFN)
self.assertIs(posixpath.ismount(ABSTFN), False)
finally:
safe_rmdir(ABSTFN)
@unittest.skipUnless(support.can_symlink(),
"Test requires symlink support")
def test_ismount_symlinks(self):
# Symlinks are never mountpoints.
try:
os.symlink("/", ABSTFN)
self.assertIs(posixpath.ismount(ABSTFN), False)
finally:
os.unlink(ABSTFN)
@unittest.skipIf(posix is None, "Test requires posix module")
def test_ismount_different_device(self):
# Simulate the path being on a different device from its parent by
# mocking out st_dev.
save_lstat = os.lstat
def fake_lstat(path):
st_ino = 0
st_dev = 0
if path == ABSTFN:
st_dev = 1
st_ino = 1
return posix.stat_result((0, st_ino, st_dev, 0, 0, 0, 0, 0, 0, 0))
try:
os.lstat = fake_lstat
self.assertIs(posixpath.ismount(ABSTFN), True)
finally:
os.lstat = save_lstat
def test_expanduser(self):
self.assertEqual(posixpath.expanduser("foo"), "foo")
self.assertEqual(posixpath.expanduser(b"foo"), b"foo")
try:
import pwd
except ImportError:
pass
else:
self.assertIsInstance(posixpath.expanduser("~/"), str)
self.assertIsInstance(posixpath.expanduser(b"~/"), bytes)
# if home directory == root directory, this test makes no sense
if posixpath.expanduser("~") != '/':
self.assertEqual(
posixpath.expanduser("~") + "/",
posixpath.expanduser("~/")
)
self.assertEqual(
posixpath.expanduser(b"~") + b"/",
posixpath.expanduser(b"~/")
)
self.assertIsInstance(posixpath.expanduser("~root/"), str)
self.assertIsInstance(posixpath.expanduser("~foo/"), str)
self.assertIsInstance(posixpath.expanduser(b"~root/"), bytes)
self.assertIsInstance(posixpath.expanduser(b"~foo/"), bytes)
with support.EnvironmentVarGuard() as env:
env['HOME'] = '/'
self.assertEqual(posixpath.expanduser("~"), "/")
# expanduser should fall back to using the password database
del env['HOME']
home = pwd.getpwuid(os.getuid()).pw_dir
self.assertEqual(posixpath.expanduser("~"), home)
def test_normpath(self):
self.assertEqual(posixpath.normpath(""), ".")
self.assertEqual(posixpath.normpath("/"), "/")
self.assertEqual(posixpath.normpath("//"), "//")
self.assertEqual(posixpath.normpath("///"), "/")
self.assertEqual(posixpath.normpath("///foo/.//bar//"), "/foo/bar")
self.assertEqual(posixpath.normpath("///foo/.//bar//.//..//.//baz"),
"/foo/baz")
self.assertEqual(posixpath.normpath("///..//./foo/.//bar"), "/foo/bar")
self.assertEqual(posixpath.normpath(b""), b".")
self.assertEqual(posixpath.normpath(b"/"), b"/")
self.assertEqual(posixpath.normpath(b"//"), b"//")
self.assertEqual(posixpath.normpath(b"///"), b"/")
self.assertEqual(posixpath.normpath(b"///foo/.//bar//"), b"/foo/bar")
self.assertEqual(posixpath.normpath(b"///foo/.//bar//.//..//.//baz"),
b"/foo/baz")
self.assertEqual(posixpath.normpath(b"///..//./foo/.//bar"),
b"/foo/bar")
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
@skip_if_ABSTFN_contains_backslash
def test_realpath_basic(self):
# Basic operation.
try:
os.symlink(ABSTFN+"1", ABSTFN)
self.assertEqual(realpath(ABSTFN), ABSTFN+"1")
finally:
support.unlink(ABSTFN)
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
@skip_if_ABSTFN_contains_backslash
def test_realpath_relative(self):
try:
os.symlink(posixpath.relpath(ABSTFN+"1"), ABSTFN)
self.assertEqual(realpath(ABSTFN), ABSTFN+"1")
finally:
support.unlink(ABSTFN)
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
@skip_if_ABSTFN_contains_backslash
def test_realpath_symlink_loops(self):
# Bug #930024, return the path unchanged if we get into an infinite
# symlink loop.
try:
old_path = abspath('.')
os.symlink(ABSTFN, ABSTFN)
self.assertEqual(realpath(ABSTFN), ABSTFN)
os.symlink(ABSTFN+"1", ABSTFN+"2")
os.symlink(ABSTFN+"2", ABSTFN+"1")
self.assertEqual(realpath(ABSTFN+"1"), ABSTFN+"1")
self.assertEqual(realpath(ABSTFN+"2"), ABSTFN+"2")
# Test using relative path as well.
os.chdir(dirname(ABSTFN))
self.assertEqual(realpath(basename(ABSTFN)), ABSTFN)
finally:
os.chdir(old_path)
support.unlink(ABSTFN)
support.unlink(ABSTFN+"1")
support.unlink(ABSTFN+"2")
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
@skip_if_ABSTFN_contains_backslash
def test_realpath_resolve_parents(self):
# We also need to resolve any symlinks in the parents of a relative
# path passed to realpath. E.g.: current working directory is
# /usr/doc with 'doc' being a symlink to /usr/share/doc. We call
# realpath("a"). This should return /usr/share/doc/a/.
try:
old_path = abspath('.')
os.mkdir(ABSTFN)
os.mkdir(ABSTFN + "/y")
os.symlink(ABSTFN + "/y", ABSTFN + "/k")
os.chdir(ABSTFN + "/k")
self.assertEqual(realpath("a"), ABSTFN + "/y/a")
finally:
os.chdir(old_path)
support.unlink(ABSTFN + "/k")
safe_rmdir(ABSTFN + "/y")
safe_rmdir(ABSTFN)
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
@skip_if_ABSTFN_contains_backslash
def test_realpath_resolve_before_normalizing(self):
# Bug #990669: Symbolic links should be resolved before we
# normalize the path. E.g.: if we have directories 'a', 'k' and 'y'
# in the following hierarchy:
# a/k/y
#
# and a symbolic link 'link-y' pointing to 'y' in directory 'a',
# then realpath("link-y/..") should return 'k', not 'a'.
try:
old_path = abspath('.')
os.mkdir(ABSTFN)
os.mkdir(ABSTFN + "/k")
os.mkdir(ABSTFN + "/k/y")
os.symlink(ABSTFN + "/k/y", ABSTFN + "/link-y")
# Absolute path.
self.assertEqual(realpath(ABSTFN + "/link-y/.."), ABSTFN + "/k")
# Relative path.
os.chdir(dirname(ABSTFN))
self.assertEqual(realpath(basename(ABSTFN) + "/link-y/.."),
ABSTFN + "/k")
finally:
os.chdir(old_path)
support.unlink(ABSTFN + "/link-y")
safe_rmdir(ABSTFN + "/k/y")
safe_rmdir(ABSTFN + "/k")
safe_rmdir(ABSTFN)
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
@skip_if_ABSTFN_contains_backslash
def test_realpath_resolve_first(self):
# Bug #1213894: The first component of the path, if not absolute,
# must be resolved too.
try:
old_path = abspath('.')
os.mkdir(ABSTFN)
os.mkdir(ABSTFN + "/k")
os.symlink(ABSTFN, ABSTFN + "link")
os.chdir(dirname(ABSTFN))
base = basename(ABSTFN)
self.assertEqual(realpath(base + "link"), ABSTFN)
self.assertEqual(realpath(base + "link/k"), ABSTFN + "/k")
finally:
os.chdir(old_path)
support.unlink(ABSTFN + "link")
safe_rmdir(ABSTFN + "/k")
safe_rmdir(ABSTFN)
def test_relpath(self):
(real_getcwd, os.getcwd) = (os.getcwd, lambda: r"/home/user/bar")
try:
curdir = os.path.split(os.getcwd())[-1]
self.assertRaises(ValueError, posixpath.relpath, "")
self.assertEqual(posixpath.relpath("a"), "a")
self.assertEqual(posixpath.relpath(posixpath.abspath("a")), "a")
self.assertEqual(posixpath.relpath("a/b"), "a/b")
self.assertEqual(posixpath.relpath("../a/b"), "../a/b")
self.assertEqual(posixpath.relpath("a", "../b"), "../"+curdir+"/a")
self.assertEqual(posixpath.relpath("a/b", "../c"),
"../"+curdir+"/a/b")
self.assertEqual(posixpath.relpath("a", "b/c"), "../../a")
self.assertEqual(posixpath.relpath("a", "a"), ".")
self.assertEqual(posixpath.relpath("/foo/bar/bat", "/x/y/z"), '../../../foo/bar/bat')
self.assertEqual(posixpath.relpath("/foo/bar/bat", "/foo/bar"), 'bat')
self.assertEqual(posixpath.relpath("/foo/bar/bat", "/"), 'foo/bar/bat')
self.assertEqual(posixpath.relpath("/", "/foo/bar/bat"), '../../..')
self.assertEqual(posixpath.relpath("/foo/bar/bat", "/x"), '../foo/bar/bat')
self.assertEqual(posixpath.relpath("/x", "/foo/bar/bat"), '../../../x')
self.assertEqual(posixpath.relpath("/", "/"), '.')
self.assertEqual(posixpath.relpath("/a", "/a"), '.')
self.assertEqual(posixpath.relpath("/a/b", "/a/b"), '.')
finally:
os.getcwd = real_getcwd
def test_relpath_bytes(self):
(real_getcwdb, os.getcwdb) = (os.getcwdb, lambda: br"/home/user/bar")
try:
curdir = os.path.split(os.getcwdb())[-1]
self.assertRaises(ValueError, posixpath.relpath, b"")
self.assertEqual(posixpath.relpath(b"a"), b"a")
self.assertEqual(posixpath.relpath(posixpath.abspath(b"a")), b"a")
self.assertEqual(posixpath.relpath(b"a/b"), b"a/b")
self.assertEqual(posixpath.relpath(b"../a/b"), b"../a/b")
self.assertEqual(posixpath.relpath(b"a", b"../b"),
b"../"+curdir+b"/a")
self.assertEqual(posixpath.relpath(b"a/b", b"../c"),
b"../"+curdir+b"/a/b")
self.assertEqual(posixpath.relpath(b"a", b"b/c"), b"../../a")
self.assertEqual(posixpath.relpath(b"a", b"a"), b".")
self.assertEqual(posixpath.relpath(b"/foo/bar/bat", b"/x/y/z"), b'../../../foo/bar/bat')
self.assertEqual(posixpath.relpath(b"/foo/bar/bat", b"/foo/bar"), b'bat')
self.assertEqual(posixpath.relpath(b"/foo/bar/bat", b"/"), b'foo/bar/bat')
self.assertEqual(posixpath.relpath(b"/", b"/foo/bar/bat"), b'../../..')
self.assertEqual(posixpath.relpath(b"/foo/bar/bat", b"/x"), b'../foo/bar/bat')
self.assertEqual(posixpath.relpath(b"/x", b"/foo/bar/bat"), b'../../../x')
self.assertEqual(posixpath.relpath(b"/", b"/"), b'.')
self.assertEqual(posixpath.relpath(b"/a", b"/a"), b'.')
self.assertEqual(posixpath.relpath(b"/a/b", b"/a/b"), b'.')
self.assertRaises(TypeError, posixpath.relpath, b"bytes", "str")
self.assertRaises(TypeError, posixpath.relpath, "str", b"bytes")
finally:
os.getcwdb = real_getcwdb
def test_sameopenfile(self):
fname = support.TESTFN + "1"
with open(fname, "wb") as a, open(fname, "wb") as b:
self.assertTrue(posixpath.sameopenfile(a.fileno(), b.fileno()))
class PosixCommonTest(test_genericpath.CommonTest):
pathmodule = posixpath
attributes = ['relpath', 'samefile', 'sameopenfile', 'samestat']
def test_main():
support.run_unittest(PosixPathTest, PosixCommonTest)
if __name__=="__main__":
test_main()
| apache-2.0 | -7,113,016,781,189,107,000 | 41.238462 | 100 | 0.558414 | false |
tysonclugg/django | django/contrib/gis/gdal/srs.py | 72 | 11540 | """
The Spatial Reference class, represents OGR Spatial Reference objects.
Example:
>>> from django.contrib.gis.gdal import SpatialReference
>>> srs = SpatialReference('WGS84')
>>> print(srs)
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
TOWGS84[0,0,0,0,0,0,0],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]
>>> print(srs.proj)
+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs
>>> print(srs.ellipsoid)
(6378137.0, 6356752.3142451793, 298.25722356300003)
>>> print(srs.projected, srs.geographic)
False True
>>> srs.import_epsg(32140)
>>> print(srs.name)
NAD83 / Texas South Central
"""
from ctypes import byref, c_char_p, c_int
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import SRSException
from django.contrib.gis.gdal.prototypes import srs as capi
from django.utils.encoding import force_bytes, force_text
class SpatialReference(GDALBase):
"""
A wrapper for the OGRSpatialReference object. According to the GDAL Web site,
the SpatialReference object "provide[s] services to represent coordinate
systems (projections and datums) and to transform between them."
"""
destructor = capi.release_srs
def __init__(self, srs_input='', srs_type='user'):
"""
Create a GDAL OSR Spatial Reference object from the given input.
The input may be string of OGC Well Known Text (WKT), an integer
EPSG code, a PROJ.4 string, and/or a projection "well known" shorthand
string (one of 'WGS84', 'WGS72', 'NAD27', 'NAD83').
"""
if srs_type == 'wkt':
self.ptr = capi.new_srs(c_char_p(b''))
self.import_wkt(srs_input)
return
elif isinstance(srs_input, str):
try:
# If SRID is a string, e.g., '4326', then make acceptable
# as user input.
srid = int(srs_input)
srs_input = 'EPSG:%d' % srid
except ValueError:
pass
elif isinstance(srs_input, int):
# EPSG integer code was input.
srs_type = 'epsg'
elif isinstance(srs_input, self.ptr_type):
srs = srs_input
srs_type = 'ogr'
else:
raise TypeError('Invalid SRS type "%s"' % srs_type)
if srs_type == 'ogr':
# Input is already an SRS pointer.
srs = srs_input
else:
# Creating a new SRS pointer, using the string buffer.
buf = c_char_p(b'')
srs = capi.new_srs(buf)
# If the pointer is NULL, throw an exception.
if not srs:
raise SRSException('Could not create spatial reference from: %s' % srs_input)
else:
self.ptr = srs
# Importing from either the user input string or an integer SRID.
if srs_type == 'user':
self.import_user_input(srs_input)
elif srs_type == 'epsg':
self.import_epsg(srs_input)
def __getitem__(self, target):
"""
Return the value of the given string attribute node, None if the node
doesn't exist. Can also take a tuple as a parameter, (target, child),
where child is the index of the attribute in the WKT. For example:
>>> wkt = 'GEOGCS["WGS 84", DATUM["WGS_1984, ... AUTHORITY["EPSG","4326"]]'
>>> srs = SpatialReference(wkt) # could also use 'WGS84', or 4326
>>> print(srs['GEOGCS'])
WGS 84
>>> print(srs['DATUM'])
WGS_1984
>>> print(srs['AUTHORITY'])
EPSG
>>> print(srs['AUTHORITY', 1]) # The authority value
4326
>>> print(srs['TOWGS84', 4]) # the fourth value in this wkt
0
>>> print(srs['UNIT|AUTHORITY']) # For the units authority, have to use the pipe symbole.
EPSG
>>> print(srs['UNIT|AUTHORITY', 1]) # The authority value for the units
9122
"""
if isinstance(target, tuple):
return self.attr_value(*target)
else:
return self.attr_value(target)
def __str__(self):
"Use 'pretty' WKT."
return self.pretty_wkt
# #### SpatialReference Methods ####
def attr_value(self, target, index=0):
"""
The attribute value for the given target node (e.g. 'PROJCS'). The index
keyword specifies an index of the child node to return.
"""
if not isinstance(target, str) or not isinstance(index, int):
raise TypeError
return capi.get_attr_value(self.ptr, force_bytes(target), index)
def auth_name(self, target):
"Return the authority name for the given string target node."
return capi.get_auth_name(self.ptr, force_bytes(target))
def auth_code(self, target):
"Return the authority code for the given string target node."
return capi.get_auth_code(self.ptr, force_bytes(target))
def clone(self):
"Return a clone of this SpatialReference object."
return SpatialReference(capi.clone_srs(self.ptr))
def from_esri(self):
"Morph this SpatialReference from ESRI's format to EPSG."
capi.morph_from_esri(self.ptr)
def identify_epsg(self):
"""
This method inspects the WKT of this SpatialReference, and will
add EPSG authority nodes where an EPSG identifier is applicable.
"""
capi.identify_epsg(self.ptr)
def to_esri(self):
"Morph this SpatialReference to ESRI's format."
capi.morph_to_esri(self.ptr)
def validate(self):
"Check to see if the given spatial reference is valid."
capi.srs_validate(self.ptr)
# #### Name & SRID properties ####
@property
def name(self):
"Return the name of this Spatial Reference."
if self.projected:
return self.attr_value('PROJCS')
elif self.geographic:
return self.attr_value('GEOGCS')
elif self.local:
return self.attr_value('LOCAL_CS')
else:
return None
@property
def srid(self):
"Return the SRID of top-level authority, or None if undefined."
try:
return int(self.attr_value('AUTHORITY', 1))
except (TypeError, ValueError):
return None
# #### Unit Properties ####
@property
def linear_name(self):
"Return the name of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return name
@property
def linear_units(self):
"Return the value of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return units
@property
def angular_name(self):
"Return the name of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return name
@property
def angular_units(self):
"Return the value of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return units
@property
def units(self):
"""
Return a 2-tuple of the units value and the units name. Automatically
determine whether to return the linear or angular units.
"""
units, name = None, None
if self.projected or self.local:
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
elif self.geographic:
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
if name is not None:
name = force_text(name)
return (units, name)
# #### Spheroid/Ellipsoid Properties ####
@property
def ellipsoid(self):
"""
Return a tuple of the ellipsoid parameters:
(semimajor axis, semiminor axis, and inverse flattening)
"""
return (self.semi_major, self.semi_minor, self.inverse_flattening)
@property
def semi_major(self):
"Return the Semi Major Axis for this Spatial Reference."
return capi.semi_major(self.ptr, byref(c_int()))
@property
def semi_minor(self):
"Return the Semi Minor Axis for this Spatial Reference."
return capi.semi_minor(self.ptr, byref(c_int()))
@property
def inverse_flattening(self):
"Return the Inverse Flattening for this Spatial Reference."
return capi.invflattening(self.ptr, byref(c_int()))
# #### Boolean Properties ####
@property
def geographic(self):
"""
Return True if this SpatialReference is geographic
(root node is GEOGCS).
"""
return bool(capi.isgeographic(self.ptr))
@property
def local(self):
"Return True if this SpatialReference is local (root node is LOCAL_CS)."
return bool(capi.islocal(self.ptr))
@property
def projected(self):
"""
Return True if this SpatialReference is a projected coordinate system
(root node is PROJCS).
"""
return bool(capi.isprojected(self.ptr))
# #### Import Routines #####
def import_epsg(self, epsg):
"Import the Spatial Reference from the EPSG code (an integer)."
capi.from_epsg(self.ptr, epsg)
def import_proj(self, proj):
"Import the Spatial Reference from a PROJ.4 string."
capi.from_proj(self.ptr, proj)
def import_user_input(self, user_input):
"Import the Spatial Reference from the given user input string."
capi.from_user_input(self.ptr, force_bytes(user_input))
def import_wkt(self, wkt):
"Import the Spatial Reference from OGC WKT (string)"
capi.from_wkt(self.ptr, byref(c_char_p(force_bytes(wkt))))
def import_xml(self, xml):
"Import the Spatial Reference from an XML string."
capi.from_xml(self.ptr, xml)
# #### Export Properties ####
@property
def wkt(self):
"Return the WKT representation of this Spatial Reference."
return capi.to_wkt(self.ptr, byref(c_char_p()))
@property
def pretty_wkt(self, simplify=0):
"Return the 'pretty' representation of the WKT."
return capi.to_pretty_wkt(self.ptr, byref(c_char_p()), simplify)
@property
def proj(self):
"Return the PROJ.4 representation for this Spatial Reference."
return capi.to_proj(self.ptr, byref(c_char_p()))
@property
def proj4(self):
"Alias for proj()."
return self.proj
@property
def xml(self, dialect=''):
"Return the XML representation of this Spatial Reference."
return capi.to_xml(self.ptr, byref(c_char_p()), force_bytes(dialect))
class CoordTransform(GDALBase):
"The coordinate system transformation object."
destructor = capi.destroy_ct
def __init__(self, source, target):
"Initialize on a source and target SpatialReference objects."
if not isinstance(source, SpatialReference) or not isinstance(target, SpatialReference):
raise TypeError('source and target must be of type SpatialReference')
self.ptr = capi.new_ct(source._ptr, target._ptr)
self._srs1_name = source.name
self._srs2_name = target.name
def __str__(self):
return 'Transform from "%s" to "%s"' % (self._srs1_name, self._srs2_name)
| bsd-3-clause | 1,420,262,175,175,315,700 | 33.447761 | 97 | 0.603553 | false |
arjclark/cylc | lib/jinja2/__init__.py | 71 | 2614 | # -*- coding: utf-8 -*-
"""
jinja2
~~~~~~
Jinja2 is a template engine written in pure Python. It provides a
Django inspired non-XML syntax but supports inline expressions and
an optional sandboxed environment.
Nutshell
--------
Here a small example of a Jinja2 template::
{% extends 'base.html' %}
{% block title %}Memberlist{% endblock %}
{% block content %}
<ul>
{% for user in users %}
<li><a href="{{ user.url }}">{{ user.username }}</a></li>
{% endfor %}
</ul>
{% endblock %}
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
__docformat__ = 'restructuredtext en'
__version__ = '2.10'
# high level interface
from jinja2.environment import Environment, Template
# loaders
from jinja2.loaders import BaseLoader, FileSystemLoader, PackageLoader, \
DictLoader, FunctionLoader, PrefixLoader, ChoiceLoader, \
ModuleLoader
# bytecode caches
from jinja2.bccache import BytecodeCache, FileSystemBytecodeCache, \
MemcachedBytecodeCache
# undefined types
from jinja2.runtime import Undefined, DebugUndefined, StrictUndefined, \
make_logging_undefined
# exceptions
from jinja2.exceptions import TemplateError, UndefinedError, \
TemplateNotFound, TemplatesNotFound, TemplateSyntaxError, \
TemplateAssertionError, TemplateRuntimeError
# decorators and public utilities
from jinja2.filters import environmentfilter, contextfilter, \
evalcontextfilter
from jinja2.utils import Markup, escape, clear_caches, \
environmentfunction, evalcontextfunction, contextfunction, \
is_undefined, select_autoescape
__all__ = [
'Environment', 'Template', 'BaseLoader', 'FileSystemLoader',
'PackageLoader', 'DictLoader', 'FunctionLoader', 'PrefixLoader',
'ChoiceLoader', 'BytecodeCache', 'FileSystemBytecodeCache',
'MemcachedBytecodeCache', 'Undefined', 'DebugUndefined',
'StrictUndefined', 'TemplateError', 'UndefinedError', 'TemplateNotFound',
'TemplatesNotFound', 'TemplateSyntaxError', 'TemplateAssertionError',
'TemplateRuntimeError',
'ModuleLoader', 'environmentfilter', 'contextfilter', 'Markup', 'escape',
'environmentfunction', 'contextfunction', 'clear_caches', 'is_undefined',
'evalcontextfilter', 'evalcontextfunction', 'make_logging_undefined',
'select_autoescape',
]
def _patch_async():
from jinja2.utils import have_async_gen
if have_async_gen:
from jinja2.asyncsupport import patch_all
patch_all()
_patch_async()
del _patch_async
| gpl-3.0 | -4,608,557,508,708,200,000 | 30.493976 | 77 | 0.692425 | false |
JudoWill/ResearchNotebooks | DownloadMicroData.py | 1 | 1738 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
import os, os.path
import concurrent.futures
import csv
import urllib.request
import shutil
import gzip
os.chdir('/home/will/AutoMicroAnal/')
# <codecell>
with open('MicroarraySamples.tsv') as handle:
microdata = list(csv.DictReader(handle, delimiter = '\t'))
# <codecell>
def get_fileurl(supurl):
#print(supurl)
resp = urllib.request.urlopen(supurl)
for line in resp:
fname = str(line.split()[-1])
if fname.lower().endswith(".cel.gz'"):
#print('returning')
return supurl + fname[2:-1]
return None
def process_row(row):
supurl = row['URL'] + 'suppl/'
tmpfile = '/tmp/' + row['Sample Accession'] + '.CEL.gz'
finalfile = '/home/will/AutoMicroAnal/microadata/' + row['Sample Accession'] + '.CEL'
if os.path.exists(finalfile):
return None
fileurl = get_fileurl(supurl)
#print(fileurl)
if fileurl is None:
return fileurl
try:
resp = urllib.request.urlopen(fileurl)
with open(tmpfile, 'wb') as handle:
handle.write(resp.read())
except urllib.request.URLError:
return fileurl
with gzip.open(tmpfile) as zhandle:
with open(finalfile, 'wb') as handle:
handle.write(zhandle.read())
os.remove(tmpfile)
return None
# <codecell>
gp133As = [row for row in microdata if row['Platform'] == 'GPL96']
# <codecell>
for num, row in enumerate(gp133As):
try:
res = process_row(row)
except:
print('skipping')
continue
if (num == 0) | (num == 5) | (num == 20) | (num % 500 == 0):
print(num)
if res:
print(res)
# <codecell>
# <codecell>
| mit | 2,532,407,197,985,833,000 | 21.571429 | 89 | 0.598389 | false |
collex100/odoo | addons/account_check_writing/account.py | 379 | 2032 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv,fields
class account_journal(osv.osv):
_inherit = "account.journal"
_columns = {
'allow_check_writing': fields.boolean('Allow Check writing', help='Check this if the journal is to be used for writing checks.'),
'use_preprint_check': fields.boolean('Use Preprinted Check', help='Check if you use a preformated sheet for check'),
}
class res_company(osv.osv):
_inherit = "res.company"
_columns = {
'check_layout': fields.selection([
('top', 'Check on Top'),
('middle', 'Check in middle'),
('bottom', 'Check on bottom'),
],"Check Layout",
help="Check on top is compatible with Quicken, QuickBooks and Microsoft Money. Check in middle is compatible with Peachtree, ACCPAC and DacEasy. Check on bottom is compatible with Peachtree, ACCPAC and DacEasy only" ),
}
_defaults = {
'check_layout' : lambda *a: 'top',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 8,210,801,479,088,206,000 | 41.333333 | 231 | 0.613189 | false |
iemejia/incubator-beam | sdks/python/apache_beam/runners/portability/flink_uber_jar_job_server.py | 1 | 8979 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A job server submitting portable pipelines as uber jars to Flink."""
# pytype: skip-file
from __future__ import absolute_import
from __future__ import print_function
import logging
import os
import tempfile
import time
import urllib
import requests
from google.protobuf import json_format
from apache_beam.options import pipeline_options
from apache_beam.portability.api import beam_job_api_pb2
from apache_beam.runners.portability import abstract_job_service
from apache_beam.runners.portability import job_server
_LOGGER = logging.getLogger(__name__)
class FlinkUberJarJobServer(abstract_job_service.AbstractJobServiceServicer):
"""A Job server which submits a self-contained Jar to a Flink cluster.
The jar contains the Beam pipeline definition, dependencies, and
the pipeline artifacts.
"""
def __init__(self, master_url, options):
super(FlinkUberJarJobServer, self).__init__()
self._master_url = master_url
self._executable_jar = (
options.view_as(
pipeline_options.FlinkRunnerOptions).flink_job_server_jar)
self._artifact_port = (
options.view_as(pipeline_options.JobServerOptions).artifact_port)
self._temp_dir = tempfile.mkdtemp(prefix='apache-beam-flink')
def start(self):
return self
def stop(self):
pass
def executable_jar(self):
if self._executable_jar:
if not os.path.exists(self._executable_jar):
parsed = urllib.parse.urlparse(self._executable_jar)
if not parsed.scheme:
raise ValueError(
'Unable to parse jar URL "%s". If using a full URL, make sure '
'the scheme is specified. If using a local file path, make sure '
'the file exists; you may have to first build the job server '
'using `./gradlew runners:flink:%s:job-server:shadowJar`.' %
(self._executable_jar, self._flink_version))
url = self._executable_jar
else:
url = job_server.JavaJarJobServer.path_to_beam_jar(
'runners:flink:%s:job-server:shadowJar' % self.flink_version())
return job_server.JavaJarJobServer.local_jar(url)
def flink_version(self):
full_version = requests.get('%s/v1/config' %
self._master_url).json()['flink-version']
# Only return up to minor version.
return '.'.join(full_version.split('.')[:2])
def create_beam_job(self, job_id, job_name, pipeline, options):
return FlinkBeamJob(
self._master_url,
self.executable_jar(),
job_id,
job_name,
pipeline,
options,
artifact_port=self._artifact_port)
def GetJobMetrics(self, request, context=None):
if request.job_id not in self._jobs:
raise LookupError("Job {} does not exist".format(request.job_id))
metrics_text = self._jobs[request.job_id].get_metrics()
response = beam_job_api_pb2.GetJobMetricsResponse()
json_format.Parse(metrics_text, response)
return response
class FlinkBeamJob(abstract_job_service.UberJarBeamJob):
"""Runs a single Beam job on Flink by staging all contents into a Jar
and uploading it via the Flink Rest API."""
def __init__(
self,
master_url,
executable_jar,
job_id,
job_name,
pipeline,
options,
artifact_port=0):
super(FlinkBeamJob, self).__init__(
executable_jar,
job_id,
job_name,
pipeline,
options,
artifact_port=artifact_port)
self._master_url = master_url
def request(self, method, path, expected_status=200, **kwargs):
url = '%s/%s' % (self._master_url, path)
response = method(url, **kwargs)
if response.status_code != expected_status:
raise RuntimeError(
"Request to %s failed with status %d: %s" %
(url, response.status_code, response.text))
if response.text:
return response.json()
def get(self, path, **kwargs):
return self.request(requests.get, path, **kwargs)
def post(self, path, **kwargs):
return self.request(requests.post, path, **kwargs)
def delete(self, path, **kwargs):
return self.request(requests.delete, path, **kwargs)
def run(self):
self._stop_artifact_service()
# Upload the jar and start the job.
with open(self._jar, 'rb') as jar_file:
self._flink_jar_id = self.post(
'v1/jars/upload',
files={'jarfile': ('beam.jar', jar_file)})['filename'].split('/')[-1]
self._jar_uploaded = True
self._flink_job_id = self.post(
'v1/jars/%s/run' % self._flink_jar_id,
json={
'entryClass': 'org.apache.beam.runners.flink.FlinkPipelineRunner'
})['jobid']
os.unlink(self._jar)
_LOGGER.info('Started Flink job as %s' % self._flink_job_id)
def cancel(self):
self.post('v1/%s/stop' % self._flink_job_id, expected_status=202)
self.delete_jar()
def delete_jar(self):
if self._jar_uploaded:
self._jar_uploaded = False
try:
self.delete('v1/jars/%s' % self._flink_jar_id)
except Exception:
_LOGGER.info(
'Error deleting jar %s' % self._flink_jar_id, exc_info=True)
def _get_state(self):
"""Query flink to get the current state.
:return: tuple of int and Timestamp or None
timestamp will be None if the state has not changed since the last query.
"""
# For just getting the status, execution-result seems cheaper.
flink_status = self.get('v1/jobs/%s/execution-result' %
self._flink_job_id)['status']['id']
if flink_status == 'COMPLETED':
flink_status = self.get('v1/jobs/%s' % self._flink_job_id)['state']
beam_state = {
'CREATED': beam_job_api_pb2.JobState.STARTING,
'RUNNING': beam_job_api_pb2.JobState.RUNNING,
'FAILING': beam_job_api_pb2.JobState.RUNNING,
'FAILED': beam_job_api_pb2.JobState.FAILED,
'CANCELLING': beam_job_api_pb2.JobState.CANCELLING,
'CANCELED': beam_job_api_pb2.JobState.CANCELLED,
'FINISHED': beam_job_api_pb2.JobState.DONE,
'RESTARTING': beam_job_api_pb2.JobState.RUNNING,
'SUSPENDED': beam_job_api_pb2.JobState.RUNNING,
'RECONCILING': beam_job_api_pb2.JobState.RUNNING,
'IN_PROGRESS': beam_job_api_pb2.JobState.RUNNING,
'COMPLETED': beam_job_api_pb2.JobState.DONE,
}.get(flink_status, beam_job_api_pb2.JobState.UNSPECIFIED)
if self.is_terminal_state(beam_state):
self.delete_jar()
# update the state history if it has changed
return beam_state, self.set_state(beam_state)
def get_state(self):
state, timestamp = self._get_state()
if timestamp is None:
# state has not changed since it was last checked: use previous timestamp
return super(FlinkBeamJob, self).get_state()
else:
return state, timestamp
def get_state_stream(self):
def _state_iter():
sleep_secs = 1.0
while True:
yield self.get_state()
sleep_secs = min(60, sleep_secs * 1.2)
time.sleep(sleep_secs)
for state, timestamp in self.with_state_history(_state_iter()):
yield state, timestamp
if self.is_terminal_state(state):
break
def get_message_stream(self):
for state, timestamp in self.get_state_stream():
if self.is_terminal_state(state):
response = self.get('v1/jobs/%s/exceptions' % self._flink_job_id)
for ix, exc in enumerate(response['all-exceptions']):
yield beam_job_api_pb2.JobMessage(
message_id='message%d' % ix,
time=str(exc['timestamp']),
importance=beam_job_api_pb2.JobMessage.MessageImportance.
JOB_MESSAGE_ERROR,
message_text=exc['exception'])
yield state, timestamp
break
else:
yield state, timestamp
def get_metrics(self):
accumulators = self.get('v1/jobs/%s/accumulators' %
self._flink_job_id)['user-task-accumulators']
for accumulator in accumulators:
if accumulator['name'] == '__metricscontainers':
return accumulator['value']
raise LookupError(
"Found no metrics container for job {}".format(self._flink_job_id))
| apache-2.0 | 7,720,481,441,974,997,000 | 34.916 | 79 | 0.650741 | false |
roxyboy/scikit-learn | sklearn/mixture/tests/test_dpgmm.py | 261 | 4490 | import unittest
import sys
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less, assert_equal
from sklearn.mixture.tests.test_gmm import GMMTester
from sklearn.externals.six.moves import cStringIO as StringIO
np.seterr(all='warn')
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
def test_verbose_boolean():
# checks that the output for the verbose output is the same
# for the flag values '1' and 'True'
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm_bool = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=True)
dpgmm_int = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
# generate output with the boolean flag
dpgmm_bool.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
bool_output = verbose_output.readline()
# generate output with the int flag
dpgmm_int.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
int_output = verbose_output.readline()
assert_equal(bool_output, int_output)
finally:
sys.stdout = old_stdout
def test_verbose_first_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
assert np.allclose(v, log_normalize(a), rtol=0.01)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
| bsd-3-clause | -6,615,838,260,987,691,000 | 27.598726 | 75 | 0.636526 | false |
coderfi/ansible-modules-extras | packaging/macports.py | 61 | 6679 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Jimmy Tang <[email protected]>
# Based on okpg (Patrick Pelletier <[email protected]>), pacman
# (Afterburn) and pkgin (Shaun Zinck) modules
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: macports
author: Jimmy Tang
short_description: Package manager for MacPorts
description:
- Manages MacPorts packages
version_added: "1.1"
options:
name:
description:
- name of package to install/remove
required: true
state:
description:
- state of the package
choices: [ 'present', 'absent', 'active', 'inactive' ]
required: false
default: present
update_cache:
description:
- update the package db first
required: false
default: "no"
choices: [ "yes", "no" ]
notes: []
'''
EXAMPLES = '''
- macports: name=foo state=present
- macports: name=foo state=present update_cache=yes
- macports: name=foo state=absent
- macports: name=foo state=active
- macports: name=foo state=inactive
'''
import pipes
def update_package_db(module, port_path):
""" Updates packages list. """
rc, out, err = module.run_command("%s sync" % port_path)
if rc != 0:
module.fail_json(msg="could not update package db")
def query_package(module, port_path, name, state="present"):
""" Returns whether a package is installed or not. """
if state == "present":
rc, out, err = module.run_command("%s installed | grep -q ^.*%s" % (pipes.quote(port_path), pipes.quote(name)), use_unsafe_shell=True)
if rc == 0:
return True
return False
elif state == "active":
rc, out, err = module.run_command("%s installed %s | grep -q active" % (pipes.quote(port_path), pipes.quote(name)), use_unsafe_shell=True)
if rc == 0:
return True
return False
def remove_packages(module, port_path, packages):
""" Uninstalls one or more packages if installed. """
remove_c = 0
# Using a for loop incase of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
if not query_package(module, port_path, package):
continue
rc, out, err = module.run_command("%s uninstall %s" % (port_path, package))
if query_package(module, port_path, package):
module.fail_json(msg="failed to remove %s: %s" % (package, out))
remove_c += 1
if remove_c > 0:
module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
module.exit_json(changed=False, msg="package(s) already absent")
def install_packages(module, port_path, packages):
""" Installs one or more packages if not already installed. """
install_c = 0
for package in packages:
if query_package(module, port_path, package):
continue
rc, out, err = module.run_command("%s install %s" % (port_path, package))
if not query_package(module, port_path, package):
module.fail_json(msg="failed to install %s: %s" % (package, out))
install_c += 1
if install_c > 0:
module.exit_json(changed=True, msg="installed %s package(s)" % (install_c))
module.exit_json(changed=False, msg="package(s) already present")
def activate_packages(module, port_path, packages):
""" Activate a package if it's inactive. """
activate_c = 0
for package in packages:
if not query_package(module, port_path, package):
module.fail_json(msg="failed to activate %s, package(s) not present" % (package))
if query_package(module, port_path, package, state="active"):
continue
rc, out, err = module.run_command("%s activate %s" % (port_path, package))
if not query_package(module, port_path, package, state="active"):
module.fail_json(msg="failed to activate %s: %s" % (package, out))
activate_c += 1
if activate_c > 0:
module.exit_json(changed=True, msg="activated %s package(s)" % (activate_c))
module.exit_json(changed=False, msg="package(s) already active")
def deactivate_packages(module, port_path, packages):
""" Deactivate a package if it's active. """
deactivated_c = 0
for package in packages:
if not query_package(module, port_path, package):
module.fail_json(msg="failed to activate %s, package(s) not present" % (package))
if not query_package(module, port_path, package, state="active"):
continue
rc, out, err = module.run_command("%s deactivate %s" % (port_path, package))
if query_package(module, port_path, package, state="active"):
module.fail_json(msg="failed to deactivated %s: %s" % (package, out))
deactivated_c += 1
if deactivated_c > 0:
module.exit_json(changed=True, msg="deactivated %s package(s)" % (deactivated_c))
module.exit_json(changed=False, msg="package(s) already inactive")
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(aliases=["pkg"], required=True),
state = dict(default="present", choices=["present", "installed", "absent", "removed", "active", "inactive"]),
update_cache = dict(default="no", aliases=["update-cache"], type='bool')
)
)
port_path = module.get_bin_path('port', True, ['/opt/local/bin'])
p = module.params
if p["update_cache"]:
update_package_db(module, port_path)
pkgs = p["name"].split(",")
if p["state"] in ["present", "installed"]:
install_packages(module, port_path, pkgs)
elif p["state"] in ["absent", "removed"]:
remove_packages(module, port_path, pkgs)
elif p["state"] == "active":
activate_packages(module, port_path, pkgs)
elif p["state"] == "inactive":
deactivate_packages(module, port_path, pkgs)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 | -5,207,258,688,369,111,000 | 29.778802 | 146 | 0.630034 | false |
ShawnPengxy/Flask-madeBlog | site-packages/httpie/output.py | 5 | 16098 | """Output streaming, processing and formatting.
"""
import json
import xml.dom.minidom
from functools import partial
from itertools import chain
import pygments
from pygments import token, lexer
from pygments.styles import get_style_by_name, STYLE_MAP
from pygments.lexers import get_lexer_for_mimetype, get_lexer_by_name
from pygments.formatters.terminal import TerminalFormatter
from pygments.formatters.terminal256 import Terminal256Formatter
from pygments.util import ClassNotFound
from .compat import is_windows
from .solarized import Solarized256Style
from .models import HTTPRequest, HTTPResponse, Environment
from .input import (OUT_REQ_BODY, OUT_REQ_HEAD,
OUT_RESP_HEAD, OUT_RESP_BODY)
# The default number of spaces to indent when pretty printing
DEFAULT_INDENT = 4
# Colors on Windows via colorama don't look that
# great and fruity seems to give the best result there.
AVAILABLE_STYLES = set(STYLE_MAP.keys())
AVAILABLE_STYLES.add('solarized')
DEFAULT_STYLE = 'solarized' if not is_windows else 'fruity'
BINARY_SUPPRESSED_NOTICE = (
b'\n'
b'+-----------------------------------------+\n'
b'| NOTE: binary data not shown in terminal |\n'
b'+-----------------------------------------+'
)
class BinarySuppressedError(Exception):
"""An error indicating that the body is binary and won't be written,
e.g., for terminal output)."""
message = BINARY_SUPPRESSED_NOTICE
###############################################################################
# Output Streams
###############################################################################
def write(stream, outfile, flush):
"""Write the output stream."""
try:
# Writing bytes so we use the buffer interface (Python 3).
buf = outfile.buffer
except AttributeError:
buf = outfile
for chunk in stream:
buf.write(chunk)
if flush:
outfile.flush()
def write_with_colors_win_py3(stream, outfile, flush):
"""Like `write`, but colorized chunks are written as text
directly to `outfile` to ensure it gets processed by colorama.
Applies only to Windows with Python 3 and colorized terminal output.
"""
color = b'\x1b['
encoding = outfile.encoding
for chunk in stream:
if color in chunk:
outfile.write(chunk.decode(encoding))
else:
outfile.buffer.write(chunk)
if flush:
outfile.flush()
def build_output_stream(args, env, request, response):
"""Build and return a chain of iterators over the `request`-`response`
exchange each of which yields `bytes` chunks.
"""
req_h = OUT_REQ_HEAD in args.output_options
req_b = OUT_REQ_BODY in args.output_options
resp_h = OUT_RESP_HEAD in args.output_options
resp_b = OUT_RESP_BODY in args.output_options
req = req_h or req_b
resp = resp_h or resp_b
output = []
Stream = get_stream_type(env, args)
if req:
output.append(Stream(
msg=HTTPRequest(request),
with_headers=req_h,
with_body=req_b))
if req_b and resp:
# Request/Response separator.
output.append([b'\n\n'])
if resp:
output.append(Stream(
msg=HTTPResponse(response),
with_headers=resp_h,
with_body=resp_b))
if env.stdout_isatty and resp_b:
# Ensure a blank line after the response body.
# For terminal output only.
output.append([b'\n\n'])
return chain(*output)
def get_stream_type(env, args):
"""Pick the right stream type based on `env` and `args`.
Wrap it in a partial with the type-specific args so that
we don't need to think what stream we are dealing with.
"""
if not env.stdout_isatty and not args.prettify:
Stream = partial(
RawStream,
chunk_size=RawStream.CHUNK_SIZE_BY_LINE
if args.stream
else RawStream.CHUNK_SIZE
)
elif args.prettify:
Stream = partial(
PrettyStream if args.stream else BufferedPrettyStream,
env=env,
processor=OutputProcessor(
env=env, groups=args.prettify, pygments_style=args.style),
)
else:
Stream = partial(EncodedStream, env=env)
return Stream
class BaseStream(object):
"""Base HTTP message output stream class."""
def __init__(self, msg, with_headers=True, with_body=True,
on_body_chunk_downloaded=None):
"""
:param msg: a :class:`models.HTTPMessage` subclass
:param with_headers: if `True`, headers will be included
:param with_body: if `True`, body will be included
"""
assert with_headers or with_body
self.msg = msg
self.with_headers = with_headers
self.with_body = with_body
self.on_body_chunk_downloaded = on_body_chunk_downloaded
def _get_headers(self):
"""Return the headers' bytes."""
return self.msg.headers.encode('ascii')
def _iter_body(self):
"""Return an iterator over the message body."""
raise NotImplementedError()
def __iter__(self):
"""Return an iterator over `self.msg`."""
if self.with_headers:
yield self._get_headers()
yield b'\r\n\r\n'
if self.with_body:
try:
for chunk in self._iter_body():
yield chunk
if self.on_body_chunk_downloaded:
self.on_body_chunk_downloaded(chunk)
except BinarySuppressedError as e:
if self.with_headers:
yield b'\n'
yield e.message
class RawStream(BaseStream):
"""The message is streamed in chunks with no processing."""
CHUNK_SIZE = 1024 * 100
CHUNK_SIZE_BY_LINE = 1
def __init__(self, chunk_size=CHUNK_SIZE, **kwargs):
super(RawStream, self).__init__(**kwargs)
self.chunk_size = chunk_size
def _iter_body(self):
return self.msg.iter_body(self.chunk_size)
class EncodedStream(BaseStream):
"""Encoded HTTP message stream.
The message bytes are converted to an encoding suitable for
`self.env.stdout`. Unicode errors are replaced and binary data
is suppressed. The body is always streamed by line.
"""
CHUNK_SIZE = 1
def __init__(self, env=Environment(), **kwargs):
super(EncodedStream, self).__init__(**kwargs)
if env.stdout_isatty:
# Use the encoding supported by the terminal.
output_encoding = getattr(env.stdout, 'encoding', None)
else:
# Preserve the message encoding.
output_encoding = self.msg.encoding
# Default to utf8 when unsure.
self.output_encoding = output_encoding or 'utf8'
def _iter_body(self):
for line, lf in self.msg.iter_lines(self.CHUNK_SIZE):
if b'\0' in line:
raise BinarySuppressedError()
yield line.decode(self.msg.encoding)\
.encode(self.output_encoding, 'replace') + lf
class PrettyStream(EncodedStream):
"""In addition to :class:`EncodedStream` behaviour, this stream applies
content processing.
Useful for long-lived HTTP responses that stream by lines
such as the Twitter streaming API.
"""
CHUNK_SIZE = 1
def __init__(self, processor, **kwargs):
super(PrettyStream, self).__init__(**kwargs)
self.processor = processor
def _get_headers(self):
return self.processor.process_headers(
self.msg.headers).encode(self.output_encoding)
def _iter_body(self):
for line, lf in self.msg.iter_lines(self.CHUNK_SIZE):
if b'\0' in line:
raise BinarySuppressedError()
yield self._process_body(line) + lf
def _process_body(self, chunk):
return (self.processor
.process_body(
content=chunk.decode(self.msg.encoding, 'replace'),
content_type=self.msg.content_type,
encoding=self.msg.encoding)
.encode(self.output_encoding, 'replace'))
class BufferedPrettyStream(PrettyStream):
"""The same as :class:`PrettyStream` except that the body is fully
fetched before it's processed.
Suitable regular HTTP responses.
"""
CHUNK_SIZE = 1024 * 10
def _iter_body(self):
# Read the whole body before prettifying it,
# but bail out immediately if the body is binary.
body = bytearray()
for chunk in self.msg.iter_body(self.CHUNK_SIZE):
if b'\0' in chunk:
raise BinarySuppressedError()
body.extend(chunk)
yield self._process_body(body)
###############################################################################
# Processing
###############################################################################
class HTTPLexer(lexer.RegexLexer):
"""Simplified HTTP lexer for Pygments.
It only operates on headers and provides a stronger contrast between
their names and values than the original one bundled with Pygments
(:class:`pygments.lexers.text import HttpLexer`), especially when
Solarized color scheme is used.
"""
name = 'HTTP'
aliases = ['http']
filenames = ['*.http']
tokens = {
'root': [
# Request-Line
(r'([A-Z]+)( +)([^ ]+)( +)(HTTP)(/)(\d+\.\d+)',
lexer.bygroups(
token.Name.Function,
token.Text,
token.Name.Namespace,
token.Text,
token.Keyword.Reserved,
token.Operator,
token.Number
)),
# Response Status-Line
(r'(HTTP)(/)(\d+\.\d+)( +)(\d{3})( +)(.+)',
lexer.bygroups(
token.Keyword.Reserved, # 'HTTP'
token.Operator, # '/'
token.Number, # Version
token.Text,
token.Number, # Status code
token.Text,
token.Name.Exception, # Reason
)),
# Header
(r'(.*?)( *)(:)( *)(.+)', lexer.bygroups(
token.Name.Attribute, # Name
token.Text,
token.Operator, # Colon
token.Text,
token.String # Value
))
]
}
class BaseProcessor(object):
"""Base, noop output processor class."""
enabled = True
def __init__(self, env=Environment(), **kwargs):
"""
:param env: an class:`Environment` instance
:param kwargs: additional keyword argument that some
processor might require.
"""
self.env = env
self.kwargs = kwargs
def process_headers(self, headers):
"""Return processed `headers`
:param headers: The headers as text.
"""
return headers
def process_body(self, content, content_type, subtype, encoding):
"""Return processed `content`.
:param content: The body content as text
:param content_type: Full content type, e.g., 'application/atom+xml'.
:param subtype: E.g. 'xml'.
:param encoding: The original content encoding.
"""
return content
class JSONProcessor(BaseProcessor):
"""JSON body processor."""
def process_body(self, content, content_type, subtype, encoding):
if subtype == 'json':
try:
# Indent the JSON data, sort keys by name, and
# avoid unicode escapes to improve readability.
content = json.dumps(json.loads(content),
sort_keys=True,
ensure_ascii=False,
indent=DEFAULT_INDENT)
except ValueError:
# Invalid JSON but we don't care.
pass
return content
class XMLProcessor(BaseProcessor):
"""XML body processor."""
# TODO: tests
def process_body(self, content, content_type, subtype, encoding):
if subtype == 'xml':
try:
# Pretty print the XML
doc = xml.dom.minidom.parseString(content.encode(encoding))
content = doc.toprettyxml(indent=' ' * DEFAULT_INDENT)
except xml.parsers.expat.ExpatError:
# Ignore invalid XML errors (skips attempting to pretty print)
pass
return content
class PygmentsProcessor(BaseProcessor):
"""A processor that applies syntax-highlighting using Pygments
to the headers, and to the body as well if its content type is recognized.
"""
def __init__(self, *args, **kwargs):
super(PygmentsProcessor, self).__init__(*args, **kwargs)
# Cache that speeds up when we process streamed body by line.
self.lexers_by_type = {}
if not self.env.colors:
self.enabled = False
return
try:
style = get_style_by_name(
self.kwargs.get('pygments_style', DEFAULT_STYLE))
except ClassNotFound:
style = Solarized256Style
if self.env.is_windows or self.env.colors == 256:
fmt_class = Terminal256Formatter
else:
fmt_class = TerminalFormatter
self.formatter = fmt_class(style=style)
def process_headers(self, headers):
return pygments.highlight(
headers, HTTPLexer(), self.formatter).strip()
def process_body(self, content, content_type, subtype, encoding):
try:
lexer = self.lexers_by_type.get(content_type)
if not lexer:
try:
lexer = get_lexer_for_mimetype(content_type)
except ClassNotFound:
lexer = get_lexer_by_name(subtype)
self.lexers_by_type[content_type] = lexer
except ClassNotFound:
pass
else:
content = pygments.highlight(content, lexer, self.formatter)
return content.strip()
class HeadersProcessor(BaseProcessor):
"""Sorts headers by name retaining relative order of multiple headers
with the same name.
"""
def process_headers(self, headers):
lines = headers.splitlines()
headers = sorted(lines[1:], key=lambda h: h.split(':')[0])
return '\r\n'.join(lines[:1] + headers)
class OutputProcessor(object):
"""A delegate class that invokes the actual processors."""
installed_processors = {
'format': [
HeadersProcessor,
JSONProcessor,
XMLProcessor
],
'colors': [
PygmentsProcessor
]
}
def __init__(self, groups, env=Environment(), **kwargs):
"""
:param env: a :class:`models.Environment` instance
:param groups: the groups of processors to be applied
:param kwargs: additional keyword arguments for processors
"""
self.processors = []
for group in groups:
for cls in self.installed_processors[group]:
processor = cls(env, **kwargs)
if processor.enabled:
self.processors.append(processor)
def process_headers(self, headers):
for processor in self.processors:
headers = processor.process_headers(headers)
return headers
def process_body(self, content, content_type, encoding):
# e.g., 'application/atom+xml'
content_type = content_type.split(';')[0]
# e.g., 'xml'
subtype = content_type.split('/')[-1].split('+')[-1]
for processor in self.processors:
content = processor.process_body(
content,
content_type,
subtype,
encoding
)
return content
| mit | 6,310,733,805,136,003,000 | 29.604563 | 79 | 0.567338 | false |
EttusResearch/gnuradio | gr-channels/python/channels/qa_channel_model.py | 47 | 1900 | #!/usr/bin/env python
#
# Copyright 2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, analog, blocks, channels
import math
class test_channel_model(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_000(self):
N = 1000 # number of samples to use
fs = 1000 # baseband sampling rate
freq = 100
signal = analog.sig_source_c(fs, analog.GR_SIN_WAVE, freq, 1)
head = blocks.head(gr.sizeof_gr_complex, N)
op = channels.channel_model(0.0, 0.0, 1.0, [1,], 0)
snk = blocks.vector_sink_c()
snk1 = blocks.vector_sink_c()
op.set_noise_voltage(0.0)
op.set_frequency_offset(0.0)
op.set_taps([1,])
op.set_timing_offset(1.0)
self.tb.connect(signal, head, op, snk)
self.tb.connect(op, snk1)
self.tb.run()
dst_data = snk.data()
exp_data = snk1.data()
self.assertComplexTuplesAlmostEqual(exp_data, dst_data, 5)
if __name__ == '__main__':
gr_unittest.run(test_channel_model, "test_channel_model.xml")
| gpl-3.0 | -632,478,174,958,866,300 | 31.20339 | 70 | 0.654211 | false |
wwright2/dcim3-angstrom1 | sources/openembedded-core/meta/lib/oeqa/utils/commands.py | 2 | 4475 | # Copyright (c) 2013-2014 Intel Corporation
#
# Released under the MIT license (see COPYING.MIT)
# DESCRIPTION
# This module is mainly used by scripts/oe-selftest and modules under meta/oeqa/selftest
# It provides a class and methods for running commands on the host in a convienent way for tests.
import os
import sys
import signal
import subprocess
import threading
import logging
from oeqa.utils import CommandError
from oeqa.utils import ftools
class Command(object):
def __init__(self, command, bg=False, timeout=None, data=None, **options):
self.defaultopts = {
"stdout": subprocess.PIPE,
"stderr": subprocess.STDOUT,
"stdin": None,
"shell": False,
"bufsize": -1,
}
self.cmd = command
self.bg = bg
self.timeout = timeout
self.data = data
self.options = dict(self.defaultopts)
if isinstance(self.cmd, basestring):
self.options["shell"] = True
if self.data:
self.options['stdin'] = subprocess.PIPE
self.options.update(options)
self.status = None
self.output = None
self.error = None
self.thread = None
self.log = logging.getLogger("utils.commands")
def run(self):
self.process = subprocess.Popen(self.cmd, **self.options)
def commThread():
self.output, self.error = self.process.communicate(self.data)
self.thread = threading.Thread(target=commThread)
self.thread.start()
self.log.debug("Running command '%s'" % self.cmd)
if not self.bg:
self.thread.join(self.timeout)
self.stop()
def stop(self):
if self.thread.isAlive():
self.process.terminate()
# let's give it more time to terminate gracefully before killing it
self.thread.join(5)
if self.thread.isAlive():
self.process.kill()
self.thread.join()
self.output = self.output.rstrip()
self.status = self.process.poll()
self.log.debug("Command '%s' returned %d as exit code." % (self.cmd, self.status))
# logging the complete output is insane
# bitbake -e output is really big
# and makes the log file useless
if self.status:
lout = "\n".join(self.output.splitlines()[-20:])
self.log.debug("Last 20 lines:\n%s" % lout)
class Result(object):
pass
def runCmd(command, ignore_status=False, timeout=None, assert_error=True, **options):
result = Result()
cmd = Command(command, timeout=timeout, **options)
cmd.run()
result.command = command
result.status = cmd.status
result.output = cmd.output
result.pid = cmd.process.pid
if result.status and not ignore_status:
if assert_error:
raise AssertionError("Command '%s' returned non-zero exit status %d:\n%s" % (command, result.status, result.output))
else:
raise CommandError(result.status, command, result.output)
return result
def bitbake(command, ignore_status=False, timeout=None, postconfig=None, **options):
if postconfig:
postconfig_file = os.path.join(os.environ.get('BUILDDIR'), 'oeqa-post.conf')
ftools.write_file(postconfig_file, postconfig)
extra_args = "-R %s" % postconfig_file
else:
extra_args = ""
if isinstance(command, basestring):
cmd = "bitbake " + extra_args + " " + command
else:
cmd = [ "bitbake" ] + [a for a in (command + extra_args.split(" ")) if a not in [""]]
try:
return runCmd(cmd, ignore_status, timeout, **options)
finally:
if postconfig:
os.remove(postconfig_file)
def get_bb_env(target=None, postconfig=None):
if target:
return bitbake("-e %s" % target, postconfig=postconfig).output
else:
return bitbake("-e", postconfig=postconfig).output
def get_bb_var(var, target=None, postconfig=None):
val = None
bbenv = get_bb_env(target, postconfig=postconfig)
for line in bbenv.splitlines():
if line.startswith(var + "="):
val = line.split('=')[1]
val = val.replace('\"','')
break
return val
def get_test_layer():
layers = get_bb_var("BBLAYERS").split()
testlayer = None
for l in layers:
if "/meta-selftest" in l and os.path.isdir(l):
testlayer = l
break
return testlayer
| mit | 8,610,164,888,131,532,000 | 28.058442 | 128 | 0.610503 | false |
technic-tec/onedrive-d-old | onedrive_d/od_onedrive_api.py | 2 | 23966 | #!/usr/bin/python3
"""
OneDrive REST API for onedrive_d.
Refer to http://msdn.microsoft.com/en-us/library/dn659752.aspx
Notes:
* The API object can be called by any arbitrary thread in the program.
* Call get_instance() will realize a API singleton object.
* When there is network issue at an API call, the calling thread is put to sleep
and thread manager will wake it up when the network seems fine. When the caller
is waken up, it will retry the function that failed before.
* When refresh_token is set, API will try to get new access_token automatically
and retry the function call later.
Bullets 3 and 4 are like interrupt handling.
"""
import os
import json
import urllib
import functools
import fcntl
# import imghdr
import requests
# for debugging
from time import sleep
from . import od_glob
from . import od_thread_manager
api_instance = None
def get_instance():
global api_instance
if api_instance is None:
api_instance = OneDriveAPI(od_glob.APP_CLIENT_ID, od_glob.APP_CLIENT_SECRET)
return api_instance
class OneDriveAPIException(Exception):
def __init__(self, args=None):
super().__init__()
if args is None:
pass
elif 'error_description' in args:
self.errno = args['error']
self.message = args['error_description']
elif 'error' in args and 'code' in args['error']:
args = args['error']
self.errno = args['code']
self.message = args['message']
else:
self.errno = 0
self.message = ''
def __str__(self):
return self.message + ' (' + self.errno + ')'
class OneDriveAuthError(OneDriveAPIException):
"""
Raised when authentication fails.
"""
pass
class OneDriveServerInternalError(OneDriveAPIException):
pass
class OneDriveValueError(OneDriveAPIException):
"""
Raised when input to OneDriveAPI is invalid.
"""
pass
class OneDriveAPI:
CLIENT_SCOPE = ['wl.skydrive', 'wl.skydrive_update', 'wl.offline_access']
REDIRECT_URI = 'https://login.live.com/oauth20_desktop.srf'
OAUTH_AUTHORIZE_URI = 'https://login.live.com/oauth20_authorize.srf?'
OAUTH_TOKEN_URI = 'https://login.live.com/oauth20_token.srf'
OAUTH_SIGNOUT_URI = 'https://login.live.com/oauth20_logout.srf'
API_URI = 'https://apis.live.net/v5.0/'
FOLDER_TYPES = ['folder', 'album']
UNSUPPORTED_TYPES = ['notebook']
ROOT_ENTRY_ID = 'me/skydrive'
logger = od_glob.get_logger()
threadman = od_thread_manager.get_instance()
def __init__(self, client_id, client_secret, client_scope=CLIENT_SCOPE, redirect_uri=REDIRECT_URI):
self.client_access_token = None
self.client_refresh_token = None
self.client_id = client_id
self.client_secret = client_secret
self.client_scope = client_scope
self.client_redirect_uri = redirect_uri
self.http_client = requests.Session()
def parse_response(self, request, error, ok_status=requests.codes.ok):
ret = request.json()
if request.status_code != ok_status:
if 'code' in ret['error']:
if ret['error']['code'] == 'request_token_expired':
raise OneDriveAuthError(ret)
elif ret['error']['code'] == 'server_internal_error':
raise OneDriveServerInternalError(ret)
raise error(ret)
return ret
def auto_recover_auth_error(self):
"""
Note that this function still throws exceptions.
"""
if self.client_refresh_token is None:
raise OneDriveAuthError()
refreshed_token_set = self.refresh_token(self.client_refresh_token)
od_glob.get_config_instance().set_access_token(refreshed_token_set)
self.logger.info('auto refreshed API token in face of auth error.')
def get_auth_uri(self, display='touch', locale='en', state=''):
"""
Use the code returned in the final redirect URL to exchange for
an access token
http://msdn.microsoft.com/en-us/library/dn659750.aspx
"""
params = {
'client_id': self.client_id,
'scope': ' '.join(self.client_scope),
'response_type': 'code',
'redirect_uri': self.client_redirect_uri,
'display': display,
'locale': locale
}
if state != '':
params['state'] = state
return OneDriveAPI.OAUTH_AUTHORIZE_URI + urllib.parse.urlencode(params)
def is_signed_in(self):
return self.access_token is not None
def set_user_id(self, id):
self.user_id = id
def set_access_token(self, token):
self.client_access_token = token
self.http_client.headers.update({'Authorization': 'Bearer ' + token})
def set_refresh_token(self, token):
self.client_refresh_token = token
def get_access_token(self, code=None, uri=None):
"""
http://msdn.microsoft.com/en-us/library/dn659750.aspx
return a dict with keys token_type, expires_in, scope,
access_token, refresh_token, authentication_token
"""
if uri is not None and '?' in uri:
qs_dict = urllib.parse.parse_qs(uri.split('?')[1])
if 'code' in qs_dict:
code = qs_dict['code']
if code is None:
raise OneDriveValueError(
{'error': 'access_code_not_found', 'error_description': 'The access code is not specified.'})
params = {
"client_id": self.client_id,
"client_secret": self.client_secret,
"redirect_uri": self.client_redirect_uri,
"code": code,
"grant_type": "authorization_code"
}
try:
request = requests.post(
OneDriveAPI.OAUTH_TOKEN_URI, data=params, verify=False)
response = self.parse_response(request, OneDriveAPIException)
self.set_access_token(response['access_token'])
self.set_refresh_token(response['refresh_token'])
self.set_user_id(response['user_id'])
return response
except requests.exceptions.ConnectionError:
self.logger.info('network connection error.')
self.threadman.hang_caller()
return self.get_access_token(code, uri)
def refresh_token(self, token):
params = {
"client_id": self.client_id,
"client_secret": self.client_secret,
"redirect_uri": self.client_redirect_uri,
"refresh_token": token,
"grant_type": 'refresh_token'
}
while True:
try:
request = requests.post(OneDriveAPI.OAUTH_TOKEN_URI, data=params)
response = self.parse_response(request, OneDriveAPIException)
self.set_access_token(response['access_token'])
self.set_refresh_token(response['refresh_token'])
self.set_user_id(response['user_id'])
return response
except requests.exceptions.ConnectionError:
self.logger.info('network connection error.')
self.threadman.hang_caller()
def sign_out(self):
while True:
try:
r = self.http_client.get(OneDriveAPI.OAUTH_SIGNOUT_URI + '?client_id=' + self.client_id + '&redirect_uri=' + self.client_redirect_uri)
return self.parse_response(r, OneDriveAuthError)
except OneDriveAuthError:
self.auto_recover_auth_error()
except requests.exceptions.ConnectionError:
self.logger.info('network connection error.')
self.threadman.hang_caller()
def get_recent_docs(self):
raise NotImplementedError('get_recent_docs is not implemented.')
def get_quota(self, user_id='me'):
while True:
try:
r = self.http_client.get(OneDriveAPI.API_URI + user_id + '/skydrive/quota')
return self.parse_response(r, OneDriveAPIException)
except OneDriveAuthError:
self.auto_recover_auth_error()
except requests.exceptions.ConnectionError:
self.logger.info('network connection error.')
self.threadman.hang_caller()
def get_root_entry_name(self):
return self.ROOT_ENTRY_ID
def get_property(self, entry_id='me/skydrive'):
try:
r = self.http_client.get(OneDriveAPI.API_URI + entry_id)
return self.parse_response(r, OneDriveAPIException)
except OneDriveAuthError:
self.auto_recover_auth_error()
return self.get_property(entry_id)
except requests.exceptions.ConnectionError:
self.logger.info('network connection error.')
self.threadman.hang_caller()
return self.get_property(entry_id)
def set_property(self, entry_id, **kwargs):
"""
Different types of files have different RW fields.
Refer to http://msdn.microsoft.com/en-us/library/dn631831.aspx.
Example:
self.set_property(your_id, name = 'new name', description = 'new desc')
"""
headers = {
'Content-Type': 'application/json',
}
while True:
try:
r = self.http_client.put(
OneDriveAPI.API_URI + entry_id, data=json.dumps(kwargs), headers=headers)
return self.parse_response(r, OneDriveAPIException)
except OneDriveAuthError:
self.auto_recover_auth_error()
except requests.exceptions.ConnectionError:
self.logger.info('network connection error.')
self.threadman.hang_caller()
def get_link(self, entry_id, type='r'):
"""
Return a link to share the entry.
@param type: one of 'r' (default), 'rw', 'e' (short for 'embed').
"""
if type == 'r':
type = 'shared_read_link'
elif type == 'rw':
type = 'shared_edit_link'
else:
type = 'embed'
while True:
try:
r = self.http_client.get(OneDriveAPI.API_URI + entry_id + '/' + type)
return self.parse_response(r, OneDriveAPIException)['source']
except OneDriveAuthError:
self.auto_recover_auth_error()
except requests.exceptions.ConnectionError:
self.logger.info('network connection error.')
self.threadman.hang_caller()
def list_entries(self, folder_id='me/skydrive', type='files'):
"""
@param type: 'files' (default) for all files. 'shared' for shared files (used internally).
"""
while True:
try:
r = self.http_client.get(OneDriveAPI.API_URI + folder_id + '/' + type)
return self.parse_response(r, OneDriveAPIException)['data']
except OneDriveAuthError:
self.auto_recover_auth_error()
except requests.exceptions.ConnectionError:
self.logger.info('network connection error.')
self.threadman.hang_caller()
except OneDriveServerInternalError as e:
self.logger.error(e)
self.threadman.hang_caller()
def list_shared_entries(self, user_id='me'):
return self.list_entries(user_id + '/skydrive', 'shared')
def mkdir(self, folder_name, parent_id='me/skydrive'):
if parent_id == '/':
parent_id = 'me/skydrive' # fix parent_id alias
data = {'name': folder_name}
headers = {'Content-Type': 'application/json'}
uri = OneDriveAPI.API_URI + parent_id
while True:
try:
r = self.http_client.post(uri, data=json.dumps(data), headers=headers)
return self.parse_response(r, OneDriveAPIException, requests.codes.created)
except OneDriveAuthError:
self.auto_recover_auth_error()
except requests.exceptions.ConnectionError:
self.logger.info('network connection error.')
self.threadman.hang_caller()
def cp(self, target_id, dest_folder_id, overwrite=True, type='COPY'):
"""
Return an entry dict if opeation succeeds.
@param overwrite: whether or not to overwrite an existing entry. True, False, None (ChooseNewName).
"""
if overwrite is None:
overwrite = 'ChooseNewName'
data = {'destination': dest_folder_id}
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + self.client_access_token
}
uri = OneDriveAPI.API_URI + target_id + '?overwrite=' + str(overwrite)
req = requests.Request(
type, uri, data=json.dumps(data), headers=headers).prepare()
while True:
try:
r = self.http_client.send(req)
return self.parse_response(r, OneDriveAPIException, requests.codes.created)
except OneDriveAuthError:
self.auto_recover_auth_error()
except requests.exceptions.ConnectionError:
self.logger.info('network connection error.')
self.threadman.hang_caller()
except OneDriveServerInternalError as e:
self.logger.error(e)
self.threadman.hang_caller()
def mv(self, target_id, dest_folder_id, overwrite=True):
return self.cp(target_id, dest_folder_id, overwrite, 'MOVE')
def bits_put(self, name, folder_id, local_path=None, block_size=1048576):
"""
Upload a large file with Microsoft BITS API.
A detailed document: https://gist.github.com/rgregg/37ba8929768a62131e85
Official document: https://msdn.microsoft.com/en-us/library/aa362821%28v=vs.85%29.aspx
@param name: remote file name
@param folder_id: the folder_id returned by Live API
@param local_path: the local path of the file to upload
@param remote_path (X): the remote path to put the file.
@return None if an unrecoverable error occurs; or a file property dict.
"""
# get file size
try:
source_size = os.path.getsize(local_path)
except:
self.logger.error("cannot get file size of \"" + local_path + "\"")
return None
# produce request url
if '!' in folder_id:
# subfolder
bits_folder_id = folder_id.split('.')[-1]
url = "https://cid-" + self.user_id + \
".users.storage.live.com/items/" + bits_folder_id + "/" + name
elif folder_id != '':
# root folder
user_id = folder_id.split('.')[-1]
url = "https://cid-" + user_id + \
".users.storage.live.com/users/0x" + user_id + "/LiveFolders/" + name
# elif remote_path is not None:
# url = "https://cid-" + user_id + ".users.storage.live.com/users/0x" + user_id + "/LiveFolders/" + remote_path
else:
self.logger.error("cannot request BITS. folder_id is invalid.")
return None
# force refresh access token to get largest expiration time
try:
self.auto_recover_auth_error()
except Exception as e:
self.logger.error(e)
return None
# BITS: Create-Session
headers = {
'X-Http-Method-Override': 'BITS_POST',
'Content-Length': 0,
'BITS-Packet-Type': 'Create-Session',
'BITS-Supported-Protocols': '{7df0354d-249b-430f-820d-3d2a9bef4931}'
}
self.logger.debug('getting session token for BITS upload...')
while True:
try:
response = self.http_client.request('post', url, headers=headers)
if response.status_code != 201:
if 'www-authenticate' in response.headers and 'invalid_token' in response.headers['www-authenticate']:
response.close()
raise OneDriveAuthError()
else:
# unknown error should be further analyzed
self.logger.debug("failed BITS Create-Session request to upload \"%s\". HTTP %d.", local_path, response.status_code)
self.logger.debug(response.headers)
response.close()
return None
session_id = response.headers['bits-session-id']
response.close()
break
except OneDriveAuthError:
self.auto_recover_auth_error()
except requests.exceptions.ConnectionError:
self.logger.info('network connection error.')
self.threadman.hang_caller()
del headers
# BITS: upload file by blocks
# The autnentication of this part relies on session_id, not access_token.
self.logger.debug('uploading file "%s".', local_path)
source_file = open(local_path, 'rb')
fcntl.lockf(source_file, fcntl.LOCK_SH)
source_cursor = 0
while source_cursor < source_size:
try:
target_cursor = min(source_cursor + block_size, source_size) - 1
source_file.seek(source_cursor)
data = source_file.read(target_cursor - source_cursor + 1)
self.logger.debug("uploading block %d - %d (total: %d B)", source_cursor, target_cursor, source_size)
response = self.http_client.request('post', url, data=data, headers={
'X-Http-Method-Override': 'BITS_POST',
'BITS-Packet-Type': 'Fragment',
'BITS-Session-Id': session_id,
'Content-Range': 'bytes {}-{}/{}'.format(source_cursor, target_cursor, source_size)
})
if response.status_code != requests.codes.ok:
# unknown error. better log it for future analysis
self.logger.debug('an error occurred uploading the block. HTTP %d.', response.status_code)
self.logger.debug(response.headers)
response.close()
fcntl.lockf(source_file, fcntl.LOCK_UN)
source_file.close()
# should I cancel session? https://msdn.microsoft.com/en-us/library/aa362829%28v=vs.85%29.aspx
return None
else:
source_cursor = int(response.headers['bits-received-content-range'])
response.close()
del data
# sleep(1)
except requests.exceptions.ConnectionError:
self.logger.info('network connection error.')
del data
self.threadman.hang_caller()
fcntl.lockf(source_file, fcntl.LOCK_UN)
source_file.close()
# refresh token if expired
if od_glob.get_config_instance().is_token_expired():
try:
self.auto_recover_auth_error()
except Exception as e:
# this branch is horrible
self.logger.error(e)
return None
# BITS: close session
self.logger.debug('BITS upload completed. Closing session...')
headers = {
'X-Http-Method-Override': 'BITS_POST',
'BITS-Packet-Type': 'Close-Session',
'BITS-Session-Id': session_id,
'Content-Length': 0
}
while True:
try:
response = self.http_client.request('post', url, headers=headers)
if response.status_code != requests.codes.ok and response.status_code != requests.codes.created:
# when token expires, server return HTTP 500
# www-authenticate: 'Bearer realm="OneDriveAPI", error="expired_token", error_description="Auth token expired. Try refreshing."'
if 'www-authenticate' in response.headers and 'expired_token' in response.headers['www-authenticate']: # 'invalid_token' in response.headers['www-authenticate']:
response.close()
raise OneDriveAuthError()
else:
# however, when the token is changed,
# we will get HTTP 500 with 'x-clienterrorcode': 'UploadSessionNotFound'
self.logger.debug('An error occurred when closing BITS session. HTTP %d', response.status_code)
self.logger.debug(response.headers)
response.close()
return None
res_id = response.headers['x-resource-id']
response.close()
self.logger.debug('BITS session successfully closed.')
return self.get_property('file.' + res_id[:res_id.index('!')] + '.' + res_id)
except OneDriveAuthError:
self.auto_recover_auth_error()
except requests.exceptions.ConnectionError:
self.logger.info('network connection error.')
self.threadman.hang_caller()
def put(self, name, folder_id='me/skydrive', upload_location=None, local_path=None, data=None, overwrite=True):
"""
Upload the file or data to a path.
Returns a dict with keys 'source', 'name', and 'id'
@param name: the new name used for the uploaded FILE. Assuming the name is NTFS-compatible.
@param folder_id: the parent folder of the entry to upload. Default: root folder.
@param upload_location: OneDrive upload_location URL. If given, folder_id is ignored.
@param local_path: the local path of the FILE.
@param data: the data of the entry. If given, local_path is ignored.
@param overwrite: whether or not to overwrite existing files, if any.
To put an empty file, either local_path points to an empty file or data is set ''.
To upload a dir, check if it exists, and then send recursive puts to upload its files.
Another issue is timestamp correction.
"""
uri = OneDriveAPI.API_URI
if upload_location is not None:
uri += upload_location # already ends with '/'
else:
uri += folder_id + '/files/'
if name == '':
raise OneDriveValueError(
{'error': 'empty_name', 'error_description': 'The file name cannot be empty.'})
uri += name
d = {
'downsize_photo_uploads': False,
'overwrite': overwrite
}
uri += '?' + urllib.parse.urlencode(d)
if data is not None:
pass
elif local_path is not None:
if not os.path.isfile(local_path):
raise OneDriveValueError(
{'error': 'wrong_file_type', 'error_description': 'The local path "' + local_path + '" is not a file.'})
else:
data = open(local_path, 'rb')
else:
raise OneDriveValueError(
{'error': 'upload_null_content', 'error_description': 'local_path and data cannot both be null.'})
while True:
try:
r = self.http_client.put(uri, data=data)
ret = r.json()
if r.status_code != requests.codes.ok and r.status_code != requests.codes.created:
# TODO: try testing this
if 'error' in ret and 'code' in ret['error'] and ret['error']['code'] == 'request_token_expired':
raise OneDriveAuthError(ret)
else:
raise OneDriveAPIException(ret)
return self.get_property(ret['id'])
except OneDriveAuthError:
self.auto_recover_auth_error()
except requests.exceptions.ConnectionError:
self.logger.info('network connection error.')
self.threadman.hang_caller()
except OneDriveServerInternalError as e:
self.logger.error(e)
self.threadman.hang_caller()
def get_by_blocks(self, entry_id, local_path, file_size, block_size):
try:
f = open(local_path, 'wb')
except OSError as e:
self.logger.error(e)
return False
self.logger.debug('download file to "' + local_path + '"...')
# fcntl.lockf(f, fcntl.LOCK_SH)
cursor = 0
while cursor < file_size:
self.logger.debug('current cursor: ' + str(cursor))
try:
target = min(cursor + block_size, file_size) - 1
r = self.http_client.get(OneDriveAPI.API_URI + entry_id + '/content',
headers={
'Range': 'bytes={0}-{1}'.format(cursor, target)
})
if r.status_code == requests.codes.ok or r.status_code == requests.codes.partial:
# sample data: 'bytes 12582912-12927920/12927921'
range_unit, range_str = r.headers['content-range'].split(' ')
range_range, range_total = range_str.split('/')
range_from, range_to = range_range.split('-')
f.write(r.content)
cursor = int(range_to) + 1
r.close()
else:
if 'www-authenticate' in r.headers and 'invalid_token' in r.headers['www-authenticate']:
raise OneDriveAuthError()
else:
self.logger.debug('failed downloading block. HTTP %d.', r.status_code)
self.logger.debug(r.headers)
self.logger.debug(r.content)
return False
# return False
except OneDriveAuthError:
self.auto_recover_auth_error()
except requests.exceptions.ConnectionError:
self.logger.info('network connection error.')
self.threadman.hang_caller()
f.close()
self.logger.debug('file saved.')
# fcntl.lockf(f, fcntl.LOCK_UN)
return True
def get(self, entry_id, local_path=None):
"""
Fetching content of OneNote files will raise OneDriveAPIException:
Resource type 'notebook' doesn't support the path 'content'. (request_url_invalid)
"""
while True:
try:
r = self.http_client.get(OneDriveAPI.API_URI + entry_id + '/content')
if r.status_code != requests.codes.ok:
ret = r.json()
# TODO: try testing this
if 'error' in ret and 'code' in ret['error'] and ret['error']['code'] == 'request_token_expired':
raise OneDriveAuthError(ret)
else:
raise OneDriveAPIException(ret)
if local_path is not None:
with open(local_path, 'wb') as f:
f.write(r.content)
return True
else:
return r.content
except OneDriveAuthError:
self.auto_recover_auth_error()
except requests.exceptions.ConnectionError:
self.logger.info('network connection error.')
self.threadman.hang_caller()
except OneDriveServerInternalError as e:
self.logger.error(e)
self.threadman.hang_caller()
def rm(self, entry_id):
"""
OneDrive API always returns HTTP 204.
"""
while True:
try:
self.http_client.delete(OneDriveAPI.API_URI + entry_id)
return
except requests.exceptions.ConnectionError:
self.logger.info('network connection error.')
self.threadman.hang_caller()
except OneDriveServerInternalError as e:
self.logger.error(e)
self.threadman.hang_caller()
def get_user_info(self, user_id='me'):
while True:
try:
r = self.http_client.get(OneDriveAPI.API_URI + user_id)
return self.parse_response(r, OneDriveAPIException, requests.codes.ok)
except OneDriveAuthError:
self.auto_recover_auth_error()
except requests.exceptions.ConnectionError:
self.logger.info('network connection error.')
self.threadman.hang_caller()
def get_contact_list(self, user_id='me'):
while True:
try:
r = self.http_client.get(OneDriveAPI.API_URI + user_id + '/friends')
return self.parse_response(r, OneDriveAPIException, requests.codes.ok)
except OneDriveAuthError:
self.auto_recover_auth_error()
except requests.exceptions.ConnectionError:
self.logger.info('network connection error.')
self.threadman.hang_caller()
| lgpl-3.0 | -6,850,790,834,364,889,000 | 33.286123 | 167 | 0.690812 | false |
Subsets and Splits