repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
KAsante95/osf.io | framework/auth/utils.py | 32 | 2156 | import re
from nameparser.parser import HumanName
from modularodm.exceptions import ValidationError
# email verification adopted from django. For licence information, see NOTICE
USER_REGEX = re.compile(
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*$" # dot-atom
# quoted-string
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]'
r'|\\[\001-\011\013\014\016-\177])*"$)', re.IGNORECASE)
DOMAIN_REGEX = re.compile(
# domain
r'(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+'
r'(?:[A-Z]{2,6}|[A-Z0-9-]{2,})$'
# literal form, ipv4 address (SMTP 4.1.3)
r'|^\[(25[0-5]|2[0-4]\d|[0-1]?\d?\d)'
r'(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\]$', re.IGNORECASE)
def validate_email(email):
if len(email) > 254:
raise ValidationError("Invalid Email")
if not email or '@' not in email:
raise ValidationError("Invalid Email")
user_part, domain_part = email.rsplit('@', 1)
if not USER_REGEX.match(user_part):
raise ValidationError("Invalid Email")
if not DOMAIN_REGEX.match(domain_part):
try:
domain_part = domain_part.encode('idna').decode('ascii')
except UnicodeError:
pass
else:
if DOMAIN_REGEX.match(domain_part):
return True
raise ValidationError("Invalid Email")
return True
def impute_names(name):
human = HumanName(name)
return {
'given': human.first,
'middle': human.middle,
'family': human.last,
'suffix': human.suffix,
}
def impute_names_model(name):
human = HumanName(name)
return {
'given_name': human.first,
'middle_names': human.middle,
'family_name': human.last,
'suffix': human.suffix,
}
def privacy_info_handle(info, anonymous, name=False):
"""hide user info from api if anonymous
:param str info: info which suppose to return
:param bool anonymous: anonymous or not
:param bool name: if the info is a name,
:return str: the handled info should be passed through api
"""
if anonymous:
return 'A user' if name else ''
return info
| apache-2.0 | -3,784,843,651,314,846,700 | 27 | 83 | 0.579314 | false |
tacaswell/bokeh | bokeh/models/ranges.py | 10 | 3886 | """ Models for describing different kinds of ranges of values
in different kinds of spaces (e.g., continuous or categorical)
and with options for "auto sizing".
"""
from __future__ import absolute_import
from ..plot_object import PlotObject
from ..properties import abstract
from ..properties import Int, Float, String, Datetime, Instance, List, Either
from .callbacks import Callback
from .renderers import Renderer
@abstract
class Range(PlotObject):
""" A base class for all range types. ``Range`` is not generally
useful to instantiate on its own.
"""
callback = Instance(Callback, help="""
A callback to run in the browser whenever the range is updated.
""")
class Range1d(Range):
""" A fixed, closed range [start, end] in a continuous scalar
dimension.
In addition to supplying ``start`` and ``end`` keyword arguments
to the ``Range1d`` initializer, you can also instantiate with
the convenience syntax::
Range(0, 10) # equivalent to Range(start=0, end=10)
"""
start = Either(Float, Datetime, Int, help="""
The start of the range.
""")
end = Either(Float, Datetime, Int, help="""
The end of the range.
""")
def __init__(self, *args, **kwargs):
if args and ('start' in kwargs or 'end' in kwargs):
raise ValueError("'start' and 'end' keywords cannot be used with positional arguments")
elif args and len(args) != 2:
raise ValueError('Only Range1d(start, end) acceptable when using positional arguments')
elif args:
kwargs['start'] = args[0]
kwargs['end'] = args[1]
super(Range1d, self).__init__(**kwargs)
@abstract
class DataRange(Range):
""" A base class for all data range types. ``DataRange`` is not
generally useful to instantiate on its own.
"""
names = List(String, help="""
A list of names to query for. If set, only renderers that
have a matching value for their ``name`` attribute will be used
for autoranging.
""")
renderers = List(Instance(Renderer), help="""
An explicit list of renderers to autorange against. If unset,
defaults to all renderers on a plot.
""")
class DataRange1d(DataRange):
""" An auto-fitting range in a continuous scalar dimension.
"""
range_padding = Float(0.1, help="""
A percentage of the total range size to add as padding to
the range start and end.
""")
start = Float(help="""
An explicitly supplied range start. If provided, will override
automatically computed start value.
""")
end = Float(help="""
An explicitly supplied range end. If provided, will override
automatically computed end value.
""")
class FactorRange(Range):
""" A range in a categorical dimension.
In addition to supplying ``factors`` keyword argument to the
``FactorRange`` initializer, you can also instantiate with
the convenience syntax::
FactorRange("foo", "bar") # equivalent to FactorRange(factors=["foo", "bar"])
.. note::
``FactorRange`` may be renamed to ``CategoricalRange`` in
the future.
"""
offset = Float(0, help="""
An offset to the (synthetic) range (default: 0)
.. note::
The primary usage of this is to support compatibility and integration
with other plotting systems, and will not generally of interest to
most users.
""")
factors = Either(List(String), List(Int), help="""
A list of string or integer factors (categories) to comprise
this categorical range.
""")
def __init__(self, *args, **kwargs):
if args and "factors" in kwargs:
raise ValueError("'factors' keyword cannot be used with positional arguments")
elif args:
kwargs['factors'] = list(args)
super(FactorRange, self).__init__(**kwargs)
| bsd-3-clause | -4,578,640,048,189,054,500 | 29.598425 | 99 | 0.64771 | false |
CCLab/Raw-Salad | scripts/autoupload/schema_modifier.py | 1 | 9770 | # -*- coding: utf-8 -*-
'''
Created on 30-08-2011
'''
import simplejson as json
from django.template.defaultfilters import slugify
from string import ascii_lowercase
def remove_diacritics(name):
"""Returns name without diacritics and inserted latin letters.
Arguments:
name -- word to have diacritics removed
"""
return name.replace(u'ą', 'a').replace(u'ć', 'c').replace(u'ę', 'e').replace(u'ł', 'l')\
.replace(u'ń', 'n').replace(u'ó', 'o').replace(u'ś', 's').replace(u'ź', 'z')\
.replace(u'ż', 'z').replace(u'Ą', 'A').replace(u'Ć', 'C').replace(u'Ę', 'E')\
.replace(u'Ł', 'L').replace(u'Ń', 'N').replace(u'Ó', 'O').replace(u'Ś', 'S')\
.replace(u'Ź', 'Z').replace(u'Ż', 'Z')
"""
Description of input and output schemas used by SchemaModifier
is in the end of this file.
"""
class SchemaModifier:
"""Class used to create schema files that will be used to upload data
to db. Bases on schema files describing data fields and hierarchy.
"""
def __init__(self, json_schema, json_hierarchy):
"""Initiates object.
Arguments:
json_schema -- dict describing fields of collection
json_hierarchy -- dict describing hierarchy in collection
"""
self.schema = json_schema
self.hierarchy = json_hierarchy
self.out_schema = None
self.out_coll = None
def modify_schema(self, add_id=False):
"""Creates schema describing names of fields and their types.
Modified schema will not contain description of hierarchy columns
and type column. One hierarchy column will be inserted and if add_id
is True, then also id column will be described in schema.
Arguments:
add_id -- specifies if id column is generated and therefore should
be described
"""
fields = self.schema["fields"][:]
self.out_schema = {}
alias = self.out_schema["alias"] = {}
type = self.out_schema["type"] = {}
self.remove_hierarchy_fields(fields)
field_nr = 0
if add_id:
alias[str(field_nr)] = "idef"
type["idef"] = "string"
field_nr += 1
alias[str(field_nr)] = "type"
type["type"] = "string"
field_nr += 1
alias[str(field_nr)] = "name"
type["type"] = "string"
field_nr += 1
for field in fields:
#name = slugify(field["label"])
name = slugify(remove_diacritics(field["label"]))
#base = slugify(field["label"])
base = slugify(remove_diacritics(field["label"]))
i = 1
while name in type:
i += 1
name = base + str(i)
alias[str(field_nr)] = name
type[name] = field["type"]
field_nr += 1
def modify_coll_descr(self, params_dict, add_id=False):
"""Creates schema describing collection.
Arguments:
params_dict -- dict with parameters needed to create description of
collection
add_id -- specifies if id column is generated and therefore should
be described
"""
explorable = ''
if add_id:
explorable = self.out_schema['alias']['1']
else:
explorable = self.out_schema['alias']['0']
perspective = self.schema['dataset_name'] + ' ' + self.schema['perspective_name'] +\
' ' + self.schema['issue']
max_level = len(self.hierarchy['columns'])
self.out_coll = {
"name": slugify(remove_diacritics(perspective)),
"perspective": perspective,
"ns": params_dict['ns'],
"dataset": params_dict['dataset'],
"idef": params_dict['perspective'],
"issue": self.schema['issue'],
"explorable": explorable,
"max_level": ascii_lowercase[max_level],
"batchsize": None,
"columns": self.prepare_columns_descr(add_id, explorable),
"aux": {"leaf": 1, "parent": 1, "idef": 1},
"query": {},
"sort": {
"0": {"idef_sort": 1},
"1": {"parent_sort": 1},
"2": {"level": 1}
}
}
def save(self, schema_name, coll_descr_name):
"""Saves schemas in files and closes those files.
Arguments:
schema_name -- name of a file that will have modified schema describing
names of fields and their types
coll_descr_name -- name of a file that will have collection description
"""
json_schema = json.dumps(self.out_schema, encoding='utf-8', sort_keys=True, indent=4)
json_coll_descr = json.dumps(self.out_coll, encoding='utf-8', sort_keys=True, indent=4)
schema_file = open(schema_name, 'wb')
schema_file.write(json_schema)
schema_file.close()
coll_descr_file = open(coll_descr_name, 'wb')
coll_descr_file.write(json_coll_descr)
coll_descr_file.close()
def get_new_schema(self):
"""Returns modified schema describing fields."""
return self.out_schema
def get_coll_descr(self):
"""Returns modified schema describing collection."""
return self.out_coll
def remove_hierarchy_fields(self, fields):
"""Removes hierarchy fields from list of fields.
Arguments:
fields -- list of fields before inserting hierarchy
"""
to_remove = sorted(self.hierarchy['columns'] + [self.hierarchy['name_column']], reverse=True)
for nr in to_remove:
del fields[nr]
def prepare_columns_descr(self, add_id, explorable_name):
"""Returns list describing columns in collection.
Arguments:
add_id -- specifies if id was generated
explorable_name -- name of field that represents explorable column
(hierarchy column)
"""
columns = []
# those columns should be generated for all collections
columns.append(self.create_column("idef_sort", "ID", "string", basic=False))
columns.append(self.create_column("parent_sort", "Rodzic", "string", basic=False))
columns.append(self.create_column("level", "Poziom", "string", basic=False))
fields = self.schema["fields"][:]
self.remove_hierarchy_fields(fields)
columns.append(self.create_column("type", self.hierarchy['field_type_label'], "string", basic=True))
columns.append(self.create_column("name", self.hierarchy['field_name_label'], "string", basic=True))
previous_names = []
for field in fields:
key = slugify(remove_diacritics(field["label"]))
key_base = slugify(remove_diacritics(field["label"]))
i = 1
while key in previous_names:
i += 1
key = key_base + str(i)
previous_names.append(key)
label = field['label']
type = field['type']
basic = 'basic' in field and field['basic']
column = self.create_column(key, label, type, basic)
columns.append(column)
return columns
def create_column(self, key, label, type, basic=True):
"""Creates object describing column, adds processable key = True.
Arguments:
key -- key value(internal representation of column)
label -- label(name of column in header)
type -- type of value("string", "int" or "float")
basic -- bool value
"""
column = {
"key": key,
"label": label,
"type": type,
"processable": True
}
if type in ["int", "float"]:
column["checkable"] = True
if basic:
column["basic"] = True
return column
"""
Expected form of schema describing data fields:
{
"fields": [
{
"name":
"type":
"label":
(optional)"basic":
}
]
}
Expected form of hierarchy:
{
"rows": [list of hierarchy columns]
"name_column": number of column which value represents name of data row
and will be moved to new field(its name is field_name_label)
"field_type_label": name of column(will be inserted) that represents
type of row
"field_name_label": name of column(will be inserted) that represents
name of row
}
SchemaModifier creates modified schema and collection description.
Form of modified schema:
{
"alias": {
"0": field_0_name,
...
},
"type": {
"field_0_name": field_0_type("string", "int" or "float"),
...
}
}
Form of collection description:
{
"aux":
"batchsize":
"columns": [
{
"key": string,
"label": string,
"type": "string", "int" or "float",
"processable": bool,
(optional)"checkable": bool
},
...
],
"dataset": int,
"explorable": string,
"idef": int,
"issue": string,
"max_level": string,
"name": string,
"ns": string,
"perspective": string,
"query" dict,
"sort": dict
}
""" | bsd-3-clause | -1,703,946,927,037,977,600 | 33.701068 | 108 | 0.529641 | false |
idan/oauthlib | oauthlib/uri_validate.py | 1 | 7570 | """
Regex for URIs
These regex are directly derived from the collected ABNF in RFC3986
(except for DIGIT, ALPHA and HEXDIG, defined by RFC2234).
They should be processed with re.VERBOSE.
Thanks Mark Nottingham for this code - https://gist.github.com/138549
"""
import re
# basics
DIGIT = r"[\x30-\x39]"
ALPHA = r"[\x41-\x5A\x61-\x7A]"
HEXDIG = r"[\x30-\x39A-Fa-f]"
# pct-encoded = "%" HEXDIG HEXDIG
pct_encoded = r" %% %(HEXDIG)s %(HEXDIG)s" % locals()
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
unreserved = r"(?: %(ALPHA)s | %(DIGIT)s | \- | \. | _ | ~ )" % locals()
# gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
gen_delims = r"(?: : | / | \? | \# | \[ | \] | @ )"
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
# / "*" / "+" / "," / ";" / "="
sub_delims = r"""(?: ! | \$ | & | ' | \( | \) |
\* | \+ | , | ; | = )"""
# pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
pchar = r"(?: %(unreserved)s | %(pct_encoded)s | %(sub_delims)s | : | @ )" % locals(
)
# reserved = gen-delims / sub-delims
reserved = r"(?: %(gen_delims)s | %(sub_delims)s )" % locals()
# scheme
# scheme = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." )
scheme = r"%(ALPHA)s (?: %(ALPHA)s | %(DIGIT)s | \+ | \- | \. )*" % locals()
# authority
# dec-octet = DIGIT ; 0-9
# / %x31-39 DIGIT ; 10-99
# / "1" 2DIGIT ; 100-199
# / "2" %x30-34 DIGIT ; 200-249
# / "25" %x30-35 ; 250-255
dec_octet = r"""(?: %(DIGIT)s |
[\x31-\x39] %(DIGIT)s |
1 %(DIGIT)s{2} |
2 [\x30-\x34] %(DIGIT)s |
25 [\x30-\x35]
)
""" % locals()
# IPv4address = dec-octet "." dec-octet "." dec-octet "." dec-octet
IPv4address = r"%(dec_octet)s \. %(dec_octet)s \. %(dec_octet)s \. %(dec_octet)s" % locals(
)
# h16 = 1*4HEXDIG
h16 = r"(?: %(HEXDIG)s ){1,4}" % locals()
# ls32 = ( h16 ":" h16 ) / IPv4address
ls32 = r"(?: (?: %(h16)s : %(h16)s ) | %(IPv4address)s )" % locals()
# IPv6address = 6( h16 ":" ) ls32
# / "::" 5( h16 ":" ) ls32
# / [ h16 ] "::" 4( h16 ":" ) ls32
# / [ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32
# / [ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32
# / [ *3( h16 ":" ) h16 ] "::" h16 ":" ls32
# / [ *4( h16 ":" ) h16 ] "::" ls32
# / [ *5( h16 ":" ) h16 ] "::" h16
# / [ *6( h16 ":" ) h16 ] "::"
IPv6address = r"""(?: (?: %(h16)s : ){6} %(ls32)s |
:: (?: %(h16)s : ){5} %(ls32)s |
%(h16)s :: (?: %(h16)s : ){4} %(ls32)s |
(?: %(h16)s : ) %(h16)s :: (?: %(h16)s : ){3} %(ls32)s |
(?: %(h16)s : ){2} %(h16)s :: (?: %(h16)s : ){2} %(ls32)s |
(?: %(h16)s : ){3} %(h16)s :: %(h16)s : %(ls32)s |
(?: %(h16)s : ){4} %(h16)s :: %(ls32)s |
(?: %(h16)s : ){5} %(h16)s :: %(h16)s |
(?: %(h16)s : ){6} %(h16)s ::
)
""" % locals()
# IPvFuture = "v" 1*HEXDIG "." 1*( unreserved / sub-delims / ":" )
IPvFuture = r"v %(HEXDIG)s+ \. (?: %(unreserved)s | %(sub_delims)s | : )+" % locals()
# IP-literal = "[" ( IPv6address / IPvFuture ) "]"
IP_literal = r"\[ (?: %(IPv6address)s | %(IPvFuture)s ) \]" % locals()
# reg-name = *( unreserved / pct-encoded / sub-delims )
reg_name = r"(?: %(unreserved)s | %(pct_encoded)s | %(sub_delims)s )*" % locals()
# userinfo = *( unreserved / pct-encoded / sub-delims / ":" )
userinfo = r"(?: %(unreserved)s | %(pct_encoded)s | %(sub_delims)s | : )" % locals(
)
# host = IP-literal / IPv4address / reg-name
host = r"(?: %(IP_literal)s | %(IPv4address)s | %(reg_name)s )" % locals()
# port = *DIGIT
port = r"(?: %(DIGIT)s )*" % locals()
# authority = [ userinfo "@" ] host [ ":" port ]
authority = r"(?: %(userinfo)s @)? %(host)s (?: : %(port)s)?" % locals()
# Path
# segment = *pchar
segment = r"%(pchar)s*" % locals()
# segment-nz = 1*pchar
segment_nz = r"%(pchar)s+" % locals()
# segment-nz-nc = 1*( unreserved / pct-encoded / sub-delims / "@" )
# ; non-zero-length segment without any colon ":"
segment_nz_nc = r"(?: %(unreserved)s | %(pct_encoded)s | %(sub_delims)s | @ )+" % locals()
# path-abempty = *( "/" segment )
path_abempty = r"(?: / %(segment)s )*" % locals()
# path-absolute = "/" [ segment-nz *( "/" segment ) ]
path_absolute = r"/ (?: %(segment_nz)s (?: / %(segment)s )* )?" % locals()
# path-noscheme = segment-nz-nc *( "/" segment )
path_noscheme = r"%(segment_nz_nc)s (?: / %(segment)s )*" % locals()
# path-rootless = segment-nz *( "/" segment )
path_rootless = r"%(segment_nz)s (?: / %(segment)s )*" % locals()
# path-empty = 0<pchar>
path_empty = r"" # FIXME
# path = path-abempty ; begins with "/" or is empty
# / path-absolute ; begins with "/" but not "//"
# / path-noscheme ; begins with a non-colon segment
# / path-rootless ; begins with a segment
# / path-empty ; zero characters
path = r"""(?: %(path_abempty)s |
%(path_absolute)s |
%(path_noscheme)s |
%(path_rootless)s |
%(path_empty)s
)
""" % locals()
### Query and Fragment
# query = *( pchar / "/" / "?" )
query = r"(?: %(pchar)s | / | \? )*" % locals()
# fragment = *( pchar / "/" / "?" )
fragment = r"(?: %(pchar)s | / | \? )*" % locals()
# URIs
# hier-part = "//" authority path-abempty
# / path-absolute
# / path-rootless
# / path-empty
hier_part = r"""(?: (?: // %(authority)s %(path_abempty)s ) |
%(path_absolute)s |
%(path_rootless)s |
%(path_empty)s
)
""" % locals()
# relative-part = "//" authority path-abempty
# / path-absolute
# / path-noscheme
# / path-empty
relative_part = r"""(?: (?: // %(authority)s %(path_abempty)s ) |
%(path_absolute)s |
%(path_noscheme)s |
%(path_empty)s
)
""" % locals()
# relative-ref = relative-part [ "?" query ] [ "#" fragment ]
relative_ref = r"%(relative_part)s (?: \? %(query)s)? (?: \# %(fragment)s)?" % locals(
)
# URI = scheme ":" hier-part [ "?" query ] [ "#" fragment ]
URI = r"^(?: %(scheme)s : %(hier_part)s (?: \? %(query)s )? (?: \# %(fragment)s )? )$" % locals(
)
# URI-reference = URI / relative-ref
URI_reference = r"^(?: %(URI)s | %(relative_ref)s )$" % locals()
# absolute-URI = scheme ":" hier-part [ "?" query ]
absolute_URI = r"^(?: %(scheme)s : %(hier_part)s (?: \? %(query)s )? )$" % locals(
)
def is_uri(uri):
return re.match(URI, uri, re.VERBOSE)
def is_uri_reference(uri):
return re.match(URI_reference, uri, re.VERBOSE)
def is_absolute_uri(uri):
return re.match(absolute_URI, uri, re.VERBOSE)
| bsd-3-clause | 1,597,236,451,372,848,600 | 34.373832 | 96 | 0.414531 | false |
hmcmooc/muddx-platform | common/djangoapps/terrain/steps.py | 3 | 6853 | # pylint: disable=C0111
# pylint: disable=W0621
# Disable the "wildcard import" warning so we can bring in all methods from
# course helpers and ui helpers
# pylint: disable=W0401
# Disable the "Unused import %s from wildcard import" warning
# pylint: disable=W0614
# Disable the "unused argument" warning because lettuce uses "step"
# pylint: disable=W0613
# django_url is assigned late in the process of loading lettuce,
# so we import this as a module, and then read django_url from
# it to get the correct value
import lettuce.django
from lettuce import world, step
from .course_helpers import *
from .ui_helpers import *
from nose.tools import assert_equals # pylint: disable=E0611
from xmodule.modulestore.locations import SlashSeparatedCourseKey
from logging import getLogger
logger = getLogger(__name__)
@step(r'I wait (?:for )?"(\d+\.?\d*)" seconds?$')
def wait_for_seconds(step, seconds):
world.wait(seconds)
@step('I reload the page$')
def reload_the_page(step):
world.wait_for_ajax_complete()
world.browser.reload()
world.wait_for_js_to_load()
@step('I press the browser back button$')
def browser_back(step):
world.browser.driver.back()
@step('I (?:visit|access|open) the homepage$')
def i_visit_the_homepage(step):
world.visit('/')
assert world.is_css_present('header.global')
@step(u'I (?:visit|access|open) the dashboard$')
def i_visit_the_dashboard(step):
world.visit('/dashboard')
assert world.is_css_present('section.container.dashboard')
@step('I should be on the dashboard page$')
def i_should_be_on_the_dashboard(step):
assert world.is_css_present('section.container.dashboard')
assert 'Dashboard' in world.browser.title
@step(u'I (?:visit|access|open) the courses page$')
def i_am_on_the_courses_page(step):
world.visit('/courses')
assert world.is_css_present('section.courses')
@step(u'I press the "([^"]*)" button$')
def and_i_press_the_button(step, value):
button_css = 'input[value="%s"]' % value
world.css_click(button_css)
@step(u'I click the link with the text "([^"]*)"$')
def click_the_link_with_the_text_group1(step, linktext):
world.click_link(linktext)
@step('I should see that the path is "([^"]*)"$')
def i_should_see_that_the_path_is(step, path):
assert world.url_equals(path)
@step(u'the page title should be "([^"]*)"$')
def the_page_title_should_be(step, title):
assert_equals(world.browser.title, title)
@step(u'the page title should contain "([^"]*)"$')
def the_page_title_should_contain(step, title):
assert(title in world.browser.title)
@step('I log in$')
def i_log_in(step):
world.log_in(username='robot', password='test')
@step('I am a logged in user$')
def i_am_logged_in_user(step):
world.create_user('robot', 'test')
world.log_in(username='robot', password='test')
@step('I am not logged in$')
def i_am_not_logged_in(step):
world.visit('logout')
@step('I am staff for course "([^"]*)"$')
def i_am_staff_for_course_by_id(step, course_id):
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
world.register_by_course_key(course_key, True)
@step(r'click (?:the|a) link (?:called|with the text) "([^"]*)"$')
def click_the_link_called(step, text):
world.click_link(text)
@step(r'should see that the url is "([^"]*)"$')
def should_have_the_url(step, url):
assert_equals(world.browser.url, url)
@step(r'should see (?:the|a) link (?:called|with the text) "([^"]*)"$')
def should_see_a_link_called(step, text):
assert len(world.browser.find_link_by_text(text)) > 0
@step(r'should see (?:the|a) link with the id "([^"]*)" called "([^"]*)"$')
def should_have_link_with_id_and_text(step, link_id, text):
link = world.browser.find_by_id(link_id)
assert len(link) > 0
assert_equals(link.text, text)
@step(r'should see a link to "([^"]*)" with the text "([^"]*)"$')
def should_have_link_with_path_and_text(step, path, text):
link = world.browser.find_link_by_text(text)
assert len(link) > 0
assert_equals(link.first["href"], lettuce.django.django_url(path))
@step(r'should( not)? see "(.*)" (?:somewhere|anywhere) (?:in|on) (?:the|this) page')
def should_see_in_the_page(step, doesnt_appear, text):
if world.LETTUCE_SELENIUM_CLIENT == 'saucelabs':
multiplier = 2
else:
multiplier = 1
if doesnt_appear:
assert world.browser.is_text_not_present(text, wait_time=5 * multiplier)
else:
assert world.browser.is_text_present(text, wait_time=5 * multiplier)
@step('I am logged in$')
def i_am_logged_in(step):
world.create_user('robot', 'test')
world.log_in(username='robot', password='test')
world.browser.visit(lettuce.django.django_url('/'))
dash_css = 'section.container.dashboard'
assert world.is_css_present(dash_css)
@step(u'I am an edX user$')
def i_am_an_edx_user(step):
world.create_user('robot', 'test')
@step(u'User "([^"]*)" is an edX user$')
def registered_edx_user(step, uname):
world.create_user(uname, 'test')
@step(u'All dialogs should be closed$')
def dialogs_are_closed(step):
assert world.dialogs_closed()
@step(u'visit the url "([^"]*)"')
def visit_url(step, url):
world.browser.visit(lettuce.django.django_url(url))
@step(u'wait for AJAX to (?:finish|complete)')
def wait_ajax(_step):
wait_for_ajax_complete()
@step('I will confirm all alerts')
def i_confirm_all_alerts(step):
"""
Please note: This method must be called RIGHT BEFORE an expected alert
Window variables are page local and thus all changes are removed upon navigating to a new page
In addition, this method changes the functionality of ONLY future alerts
"""
world.browser.execute_script('window.confirm = function(){return true;} ; window.alert = function(){return;}')
@step('I will cancel all alerts')
def i_cancel_all_alerts(step):
"""
Please note: This method must be called RIGHT BEFORE an expected alert
Window variables are page local and thus all changes are removed upon navigating to a new page
In addition, this method changes the functionality of ONLY future alerts
"""
world.browser.execute_script('window.confirm = function(){return false;} ; window.alert = function(){return;}')
@step('I will answer all prompts with "([^"]*)"')
def i_answer_prompts_with(step, prompt):
"""
Please note: This method must be called RIGHT BEFORE an expected alert
Window variables are page local and thus all changes are removed upon navigating to a new page
In addition, this method changes the functionality of ONLY future alerts
"""
world.browser.execute_script('window.prompt = function(){return %s;}') % prompt
@step('I run ipdb')
def run_ipdb(_step):
"""Run ipdb as step for easy debugging"""
import ipdb
ipdb.set_trace()
assert True
| agpl-3.0 | -5,741,739,564,179,757,000 | 28.925764 | 115 | 0.681016 | false |
2014c2g12/c2g12 | c2wp/wsgi/programs/c2g30/__init__.py | 10 | 13378 | import cherrypy
# 這是 C2G30 類別的定義
class C2G30(object):
# 各組利用 index 引導隨後的程式執行
@cherrypy.expose
def index(self, *args, **kwargs):
outstring = '''
這是 2014C2 協同專案下的 c2g30 分組程式開發網頁, 以下為 W12 的任務執行內容.<br />
<!-- 這裡採用相對連結, 而非網址的絕對連結 (這一段為 html 註解) -->
<a href="fillpoly">c2g30 fillpoly 繪圖</a><br />
<a href="drawline">c2g30 drawline 繪圖</a><br />
<a href="animate1">c2g30 animate1 繪圖</a><br />
<a href="flag">c2g30 flag 繪圖</a><br />
<a href="square">c2g30 square 繪圖</a><br />
<a href="star">c2g30 star 繪圖</a><br />
'''
return outstring
# 以下為 c2g30 組所建立的 CherryPy 程式方法, 這裡的 fillpoly 利用 Brython 執行網際繪圖
'''
假如採用下列規畫
import programs.c2g30 as c2g30
root.c2g30 = c2g30.C2G30()
則程式啟動後, 可以利用 /c2g30/fillpoly 呼叫函式執行
'''
@cherrypy.expose
def fillpoly(self, *args, **kwargs):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<script type="text/javascript" src="/static/Brython2.1.0-20140419-113919/brython.js"></script>
</head>
<body onload="brython({debug:1, cache:'version'})">
<canvas id="plotarea" width="800" height="800"></canvas>
<script type="text/python">
# 導入數學模組的所有方法
from math import *
# 導入時間模組
import time
# 導入 doc
from browser import doc
# 準備繪圖畫布
canvas = doc["plotarea"]
ctx = canvas.getContext("2d")
# 定義座標轉換(0, 0) 到 (75, 20)
def change_ref_system(x, y):
return (20 + x * 8, 420 - y * 20)
# 定義畫線函式
def draw_line(x1, y1, x2, y2, linethick = 3, color = "black"):
ctx.beginPath()
ctx.lineWidth = linethick
ctx.moveTo(x1, y1)
ctx.lineTo(x2, y2)
ctx.strokeStyle = color
ctx.stroke()
def fill():
ctx.beginPath()
ctx.moveTo(75,50)
ctx.lineTo(100,75)
ctx.lineTo(100,25)
ctx.fill()
def star():
ctx.beginPath()
ctx.moveTo(0,50)
ctx.lineTo(11,16)
ctx.lineTo(48,16)
ctx.fill()
ctx.fillStyle = "blue"
fill()
star()
x1, y1 = change_ref_system(0, 0)
for 索引 in range(0, 70, 4):
x2, y2 = change_ref_system(索引, 20)
draw_line(x1, y1, x2, y2, linethick=3, color="blue")
x1, y1 = change_ref_system(70, 0)
for 索引 in range(0, 70, 4):
x2, y2 = change_ref_system(索引, 20)
draw_line(x1, y1, x2, y2, linethick=3, color="red")
</script>
</body>
</html>
'''
return outstring
'''
假如採用下列規畫
import programs.c2g30 as c2g30
root.c2g30 = c2g30.C2G30()
則程式啟動後, 可以利用 /c2g30/drawline 呼叫函式執行
context.setTransform(a,b,c,d,e,f)
a To Scale the object across along the X axis
b To skew the object horizontally(i.e horizontal shear)
c To skew the object vertically(i.e vertical shear)
d To scale the object across Y axis
e To translate the object along the X axis
f To translate the object along the Y axis
a c e
b d f
0 0 1
'''
@cherrypy.expose
def drawline(self, *args, **kwargs):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<script type="text/javascript" src="/static/Brython2.1.0-20140419-113919/brython.js"></script>
</head>
<body onload="brython({debug:1, cache:'version'})">
<canvas id="plotarea" width="800" height="800"></canvas>
<script type="text/python">
# 導入 doc
from browser import doc
# 準備繪圖畫布
canvas = doc["plotarea"]
ctx = canvas.getContext("2d")
# 進行座標轉換, x 軸不變, y 軸反向且移動 800 光點
ctx.setTransform(1, 0, 0, -1, 0, 800)
# 定義畫線函式
def draw_line(x1, y1, x2, y2, linethick = 3, color = "black"):
ctx.beginPath()
ctx.lineWidth = linethick
ctx.moveTo(x1, y1)
ctx.lineTo(x2, y2)
ctx.strokeStyle = color
ctx.stroke()
draw_line(0, 0, 100, 100)
</script>
</body>
</html>
'''
return outstring
@cherrypy.expose
def animate1(self, *args, **kwargs):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<script type="text/javascript" src="/static/Brython2.1.0-20140419-113919/brython.js"></script>
</head>
<body onload="brython({debug:1, cache:'version'})">
<canvas id="plotarea" width="800" height="800"></canvas>
<script type="text/python">
# 導入 doc
from browser import doc
import browser.timer
# 準備繪圖畫布
canvas = doc["plotarea"]
ctx = canvas.getContext("2d")
# 進行座標轉換, x 軸不變, y 軸反向且移動 800 光點
ctx.setTransform(1, 0, 0, -1, 0, 800)
# 設定 y 的起始值
y = 0
# 設定增量變數 inc 為 1
inc = 1
# 將畫筆設為紅色
ctx.strokeStyle = "rgb(255, 0, 0)"
# 定義畫水平線函式
def draw():
# 將 y 與 inc 設為全域變數
global y, inc
# 畫新圖之前, 先清除畫面
ctx.clearRect(0, 0, 800, 800)
# 開始畫水平線
ctx.beginPath()
ctx.moveTo(0, y)
ctx.lineTo(800-1, y)
ctx.stroke()
# y 的值增量
y = y + inc
# 若 y 碰到兩個極端值, 則改變增量方向
if y == 0 or y == 800-1:
inc = inc*-1
# ev 為事件輸入, 與隨後的 bind 方法配合
def start(ev):
# interval 為全域變數, 因為 stop 函式需要
global interval
# 每 10 個 micro second 呼叫一次 draw
interval = browser.timer.set_interval(draw,10)
def stop(ev):
global interval
browser.timer.clear_interval(interval)
# 將 id 為 start 的按鈕與 start 函式利用 click 事件加以連結
doc['start'].bind('click', start)
# 將 id 為 stop 的按鈕與 stop 函式利用 click 事件加以連結
doc['stop'].bind('click', stop)
</script>
<!-- 這裡建立 start 與 stop 按鈕-->
<button id="start">start</button>
<button id="stop">stop</button>
</body>
</html>
'''
return outstring
@cherrypy.expose
def flag(self, *args, **kwargs):
'''
原始程式來源: http://blog.roodo.com/esabear/archives/19215194.html
改寫為 Brython 程式
'''
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<script type="text/javascript" src="/static/Brython2.1.0-20140419-113919/brython.js"></script>
</head>
<body onload="brython({debug:1, cache:'version'})">
<canvas id="plotarea" width="300" height="200"></canvas>
<script type="text/python">
# 導入 doc
from browser import doc
import math
# 準備繪圖畫布
canvas = doc["plotarea"]
ctx = canvas.getContext("2d")
# 進行座標轉換, x 軸不變, y 軸反向且移動 canvas.height 單位光點
# ctx.setTransform(1, 0, 0, -1, 0, canvas.height)
# 以下採用 canvas 原始座標繪圖
flag_w = canvas.width
flag_h = canvas.height
circle_x = flag_w/4
circle_y = flag_h/4
# 先畫滿地紅
ctx.fillStyle='rgb(255, 0, 0)'
ctx.fillRect(0,0,flag_w,flag_h)
# 再畫青天
ctx.fillStyle='rgb(0, 0, 150)'
ctx.fillRect(0,0,flag_w/2,flag_h/2)
# 畫十二到光芒白日
ctx.beginPath()
star_radius = flag_w/8
angle = 0
for i in range(24):
angle += 5*math.pi*2/12
toX = circle_x + math.cos(angle)*star_radius
toY = circle_y + math.sin(angle)*star_radius
# 只有 i 為 0 時移動到 toX, toY, 其餘都進行 lineTo
if (i):
ctx.lineTo(toX, toY)
else:
ctx.moveTo(toX, toY)
# 將填色設為白色
ctx.fillStyle = '#fff'
ctx.fill()
# 白日:藍圈
ctx.beginPath()
# 查詢 canvas arc 如何定義
ctx.arc(circle_x, circle_y, flag_w*17/240, 0, math.pi*2, true)
ctx.closePath()
# 填色設為藍色
ctx.fillStyle = 'rgb(0, 0, 149)'
ctx.fill()
# 白日:白心
ctx.beginPath()
ctx.arc(circle_x, circle_y, flag_w/16, 0, math.pi*2, true)
ctx.closePath()
# 填色設為白色
ctx.fillStyle = '#fff'
ctx.fill()
</script>
</body>
</html>
'''
return outstring
@cherrypy.expose
def square(self, *args, **kwargs):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<script type="text/javascript" src="/static/Brython2.1.0-20140419-113919/brython.js"></script>
</head>
<body onload="brython({debug:1, cache:'version'})">
<canvas id="plotarea" width="800" height="800"></canvas>
<script type="text/python">
# 導入 doc
from browser import doc
# 準備繪圖畫布
canvas = doc["plotarea"]
ctx = canvas.getContext("2d")
# 進行座標轉換, x 軸不變, y 軸反向且移動 800 光點
ctx.setTransform(1, 0, 0, -1, 0, 800)
# 定義畫線函式
def draw_line(x1, y1, x2, y2, linethick = 3, color = "black"):
ctx.beginPath()
ctx.lineWidth = linethick
ctx.moveTo(x1, y1)
ctx.lineTo(x2, y2)
ctx.strokeStyle = color
ctx.stroke()
def square(x, y, width, color="black"):
half = width/2
draw_line(x+half, y+half, x+half, y-half)
draw_line(x+half, y-half, x-half, y-half, color="red")
draw_line(x-half, y-half, x-half, y+half)
draw_line(x-half, y+half, x+half, y+half)
for i in range(5):
square(400, 400, 200+50*i)
square(400+i*50, 400-i*50, 200)
</script>
</body>
</html>
'''
return outstring
@cherrypy.expose
def star(self, *args, **kwargs):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<script type="text/javascript" src="/static/Brython2.1.0-20140419-113919/brython.js"></script>
</head>
<body onload="brython({debug:1, cache:'version'})">
<canvas id="plotarea" width="800" height="800"></canvas>
<script type="text/python">
# 導入 doc
from browser import doc
import math
# 準備繪圖畫布
canvas = doc["plotarea"]
ctx = canvas.getContext("2d")
# 進行座標轉換, x 軸不變, y 軸反向且移動 800 光點
ctx.setTransform(1, 0, 0, -1, 0, 800)
# 定義畫線函式
def draw_line(x1, y1, x2, y2, linethick = 3, color = "black"):
ctx.beginPath()
ctx.lineWidth = linethick
ctx.moveTo(x1, y1)
ctx.lineTo(x2, y2)
ctx.strokeStyle = color
ctx.stroke()
# x, y 為中心, r 為半徑, angle 旋轉角, solid 空心或實心, color 顏色
def star(x, y, r, angle=0, solid=False, color="#f00"):
# 以 x, y 為圓心, 計算五個外點
deg = math.pi/180
# 圓心到水平線距離
a = r*math.cos(72*deg)
# a 頂點向右到內點距離
b = (r*math.cos(72*deg)/math.cos(36*deg))*math.sin(36*deg)
# 利用畢氏定理求內點半徑
rin = math.sqrt(a**2 + b**2)
# 查驗 a, b 與 rin
#print(a, b, rin)
if(solid):
ctx.beginPath()
for i in range(5):
xout = (x + r*math.sin((360/5)*deg*i+angle*deg))
yout = (y + r*math.cos((360/5)*deg*i+angle*deg))
# 外點增量 + 1
xout2 = x + r*math.sin((360/5)*deg*(i+1)+angle*deg)
yout2 = y + r*math.cos((360/5)*deg*(i+1)+angle*deg)
xin = x + rin*math.sin((360/5)*deg*i+36*deg+angle*deg)
yin = y + rin*math.cos((360/5)*deg*i+36*deg+angle*deg)
# 查驗外點與內點座標
#print(xout, yout, xin, yin)
if(solid):
# 填色
if(i==0):
ctx.moveTo(xout, yout)
ctx.lineTo(xin, yin)
ctx.lineTo(xout2, yout2)
else:
ctx.lineTo(xin, yin)
ctx.lineTo(xout2, yout2)
else:
# 空心
draw_line(xout, yout, xin, yin, color)
# 畫空心五芒星, 無關畫線次序, 若實心則與畫線次序有關
draw_line(xout2, yout2, xin, yin, color)
if(solid):
ctx.fillStyle = color
ctx.fill()
star(600, 600, 100, 30, True, "#00f")
star(100, 100, 30, 0, True, "#f00")
#star(300, 300, 50, 0, False, "#000")
for i in range(5):
for j in range(5):
star(200+65*i, 200+65*j, 30, 0, False, "#000")
</script>
</body>
</html>
'''
return outstring
| gpl-2.0 | 1,659,936,041,346,532,900 | 27.424528 | 98 | 0.549369 | false |
papallas/baxter_cashier | scripts/baxter_cashier_perception/src/camera_calibration.py | 1 | 11217 | #!/usr/bin/python
"""
Image Calibrator tool.
This script implements the Camera Calibrator tool. The purpose of this tool
is to align the camera view relative to Baxter's base.
Since Baxter will be far away from the RGB-D camera, this script will allow the
user through a User Interface to aligh the two.
Copyright (C) 2016/2017 The University of Leeds and Rafael Papallas
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# Python specific imports
import argparse
import os
from os.path import isfile
from os.path import join
from os.path import expanduser
# ROS specific imports
import rospy
import tf
# Other imports
import numpy as np
import cv2
class BasicDatabase:
"""Stores the values from the script to a file for later use."""
def __init__(self):
"""
Default constructor.
Default constructor to setup the directory to store and load the
values.
"""
# Path where script will store the files
path = expanduser("~") + "/baxter_cashier_calibrator_files/"
self.file_save_directory = path
# If directory doesn't exist, then create that directory.
if not os.path.exists(self.file_save_directory):
os.makedirs(self.file_save_directory)
def get_available_files(self):
"""
Will return a list of available files.
Returns a list of available files in the directory. This includes only
files and not sub-directories. The usage of this function is to show
to the user a possible number of fils to choose from to load values.
"""
files = [f for f in os.listdir(self.file_save_directory)
if isfile(join(self.file_save_directory, f))]
return files if len(files) > 0 else None
def load_values(self, file_name):
"""
Will load the values from file.
Given a file name, this method will load the values from the file and
will return xyz and rpy values.
"""
# Load file
file_path = join(self.file_save_directory, file_name)
with open(file_path) as f:
content = f.readlines()
# Remove whitespace characters like `\n` at the end of each line
content = [x.strip() for x in content]
# Parse xyz and rpy (casting to ints)
xyz = [int(num) for num in content[0:3]] # First three values
rpy = [int(num) for num in content[3:]] # Last three values
return xyz, rpy
def save_values_to_file(self, file_name, xyz, rpy):
"""Will store the xyz and rpy values to file."""
full_file_path = join(self.file_save_directory, file_name)
store_values_file = open(full_file_path, "w")
x, y, z = xyz
store_values_file.write(str(x) + "\n")
store_values_file.write(str(y) + "\n")
store_values_file.write(str(z) + "\n")
r, p, y = rpy
store_values_file.write(str(r) + "\n")
store_values_file.write(str(p) + "\n")
store_values_file.write(str(y) + "\n")
store_values_file.close()
class Calibrator:
"""Calibrator aligns two camera's POV to a single one."""
def __init__(self, base_topic, target_topic, load_from_file=False):
"""
Class constructor that do some important initialisation.
- Creates the required rospy configuration
- Creates the Database instance to load or store values for this script
- Set the class' values to default or loads from file.
- Ask from the user to enter file name to store new values if is
values are not loaded from file.
"""
# Basic rospy configuration
rospy.init_node('camera_calibrator_tf_broadcaster')
self.rate = rospy.Rate(100.0)
# Flag indicating if the script will load values from file or not.
self.load_from_file = load_from_file
# Basic-flat database to store the values of the script.
self.database = BasicDatabase()
# The class' brodcasters
self.broadcaster = tf.TransformBroadcaster()
# Default values
self.quaternion = [0, 0, 0, 0]
self.xyz_transformed = [0, 0, 0]
self.xyz = [0.0, 0.0, 0.0]
self.rpy = [0, 0, 0]
self.file_name = None
# Topics that will be used to broadcast tf
self.base_topic = base_topic
self.target_topic = target_topic
# Load values from file
if self.load_from_file:
# Ask from the user for a file name from a list of possible files,
# and then load the values from that file.
self.file_name = self._get_file_name_from_user()
if self.file_name is not None:
self.xyz, self.rpy = self.database.load_values(self.file_name)
# Since we have load the values from file, we want to calculate the
# quaternion from these values as well as a small tuning on xyz.
self.calculate_values()
# Ask for file name to store the new values for this session
if self.file_name is None or not self.load_from_file:
if self.file_name is None:
print("No files found to load. Starting a new \
configuration...")
# Ask for new file name
message = "Enter a new file name to save configuration: "
self.file_name = raw_input(message)
# OpenCV for window
self.cv2 = cv2
# Initialise the trackbars (sliders) for the CV window
self._create_trackbars_for_window()
def _get_file_name_from_user(self):
"""
Will ask the user for file to load.
Asks from the user to choose a file from a list of possible files found
in the directory. The user should enter a number from that list
starting from zero.
"""
list_of_files = self.database.get_available_files()
if list_of_files is None:
return
print("Available files:")
for i in range(0, len(list_of_files)):
print("{}. {}".format(i, list_of_files[i]))
file_number = int(raw_input("Enter file number from the list: "))
if file_number >= 0 and file_number < len(list_of_files):
return list_of_files[file_number]
return None
def _create_trackbars_for_window(self):
"""
Will create the OpenCV window.
Called only once to initialise and created the OpenCV window with
the trackbars. The values of trackbars will be set to 0 if not loading
from file, or will be set to the last used values if loaded from file.
"""
self.cv2.namedWindow('image')
# Create trackbars
x, y, z = [int(v) for v in self.xyz]
self.cv2.createTrackbar('x', 'image', x, 12000, self._callback)
self.cv2.createTrackbar('y', 'image', y, 12000, self._callback)
self.cv2.createTrackbar('z', 'image', z, 12000, self._callback)
r, p, y = [int(v) for v in self.rpy]
self.cv2.createTrackbar('Roll', 'image', r, 12600, self._callback)
self.cv2.createTrackbar('Pitch', 'image', p, 12600, self._callback)
self.cv2.createTrackbar('Yaw', 'image', y, 12600, self._callback)
def calculate_values(self):
"""
Calculate the new values based on the slider values.
When xyz and rpy values are given from the user, some formulas needs to
be applied to get the xyz corrected and the quaternion value.
"""
def apply_formula(value):
return value * np.pi / 1800
# Perform (1000 - 1) to x, y and z using list comprehension
self.xyz_transformed = [v / 1000.0 - 6 for v in self.xyz]
# Using map function we get new values for r, p and y
# based on static formula computed by `apply_formula` function
roll, pitch, yaw = map(apply_formula, self.rpy)
# Using Euler method calculates the quaternion from roll, pitch and yaw
self.quaternion = tf.transformations.quaternion_from_euler(roll,
pitch,
yaw)
def _callback(self, _):
"""
Callback function on slider change.
This callback function is called whenever the trackbars from the OpenCV
window are changed.
"""
# Get xyz and rpy current position from the trackbars
self.xyz = self._extract_xyz_from_trackbars()
self.rpy = self._extract_rpy_from_trackbars()
# Calculate the new values based on the new configuration
self.calculate_values()
# Auto-save new values to file.
self.database.save_values_to_file(self.file_name, self.xyz, self.rpy)
def _extract_xyz_from_trackbars(self):
"""Will extract x, y and z values from CV2 trackbars."""
x = self.cv2.getTrackbarPos('x', 'image')
y = self.cv2.getTrackbarPos('y', 'image')
z = self.cv2.getTrackbarPos('z', 'image')
return [x, y, z]
def _extract_rpy_from_trackbars(self):
"""Will extract r, p and y values from CV2 trackbars."""
r = self.cv2.getTrackbarPos('Roll', 'image')
p = self.cv2.getTrackbarPos('Pitch', 'image')
y = self.cv2.getTrackbarPos('Yaw', 'image')
return [r, p, y]
def calibrate(self):
"""Method performs the basic operation."""
print("Start publishing tf...")
while not rospy.is_shutdown():
_ = self.cv2.waitKey(1) & 0xFF
self.broadcaster.sendTransform(tuple(self.xyz_transformed),
tuple(self.quaternion),
rospy.Time.now(),
self.base_topic,
self.target_topic)
self.rate.sleep()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-l", action="store_true",
help="Load values from file")
args = parser.parse_args()
# Topics to be used to publish the tf.
base_topic = "camera_link"
target_topic = "base"
# Load values from file
if args.l: # l for load
calibrator = Calibrator(base_topic=base_topic,
target_topic=target_topic,
load_from_file=True)
else:
calibrator = Calibrator(base_topic=base_topic,
target_topic=target_topic)
calibrator.calibrate()
| gpl-3.0 | -2,389,012,720,843,534,300 | 34.384858 | 79 | 0.602924 | false |
CCrypto/ccvpn3 | lambdainst/management/commands/expire_notify.py | 1 | 2208 | from django.core.management.base import BaseCommand
from datetime import timedelta
from django.db.models import Q, F
from django.conf import settings
from django.utils import timezone
from django.template.loader import get_template
from django.core.mail import send_mass_mail
from constance import config as site_config
from ccvpn.common import parse_integer_list
from lambdainst.models import VPNUser
ROOT_URL = settings.ROOT_URL
SITE_NAME = settings.TICKETS_SITE_NAME
def get_next_expirations(days=3):
""" Gets users whose subscription will expire in some days """
limit_date = timezone.now() + timedelta(days=days)
users = VPNUser.objects.exclude(user__email__exact='')
users = users.filter(expiration__gt=timezone.now()) # Not expired
users = users.filter(expiration__lt=limit_date) # Expire in a few days
# Make sure we dont send the notice twice
users = users.filter(Q(last_expiry_notice__isnull=True)
| Q(expiration__gt=F('last_expiry_notice')
+ timedelta(days=days)))
return users
class Command(BaseCommand):
help = "Notify users near the end of their subscription"
def handle(self, *args, **options):
from_email = settings.DEFAULT_FROM_EMAIL
for v in parse_integer_list(site_config.NOTIFY_DAYS_BEFORE):
emails = []
qs = get_next_expirations(v)
users = list(qs)
for u in users:
# Ignore users with active subscriptions
# They will get notified only if it gets cancelled (payments
# processors will cancel after a few failed payments)
if u.get_subscription():
continue
ctx = dict(site_name=SITE_NAME, user=u.user,
exp=u.expiration, url=ROOT_URL)
text = get_template('lambdainst/mail_expire_soon.txt').render(ctx)
emails.append(("CCVPN Expiration", text, from_email, [u.user.email]))
self.stdout.write("sending -%d days notify to %s ..." % (v, u.user.email))
send_mass_mail(emails)
qs.update(last_expiry_notice=timezone.now())
| mit | 3,720,305,249,847,602,700 | 35.8 | 90 | 0.636322 | false |
zsiciarz/django-markitup | markitup/templatetags/markitup_tags.py | 3 | 2073 | from __future__ import unicode_literals
from django import template
from django.core.urlresolvers import reverse, NoReverseMatch
from markitup import settings
from markitup.util import absolute_url
from markitup.fields import render_func
register = template.Library()
@register.filter
def render_markup(content):
return render_func(content)
# we do some funny stuff here for testability (the tests need to be
# able to force a recalculation of this context)
def _get_markitup_context():
context = {
'MARKITUP_SET': absolute_url(settings.MARKITUP_SET).rstrip('/'),
'MARKITUP_SKIN': absolute_url(settings.MARKITUP_SKIN).rstrip('/'),
'MARKITUP_JS': absolute_url('markitup/jquery.markitup.js'),
'AJAXCSRF_JS': absolute_url('markitup/ajax_csrf.js'),
}
if settings.JQUERY_URL is not None:
context['JQUERY_URL'] = absolute_url(settings.JQUERY_URL)
return context
register._markitup_context = _get_markitup_context()
@register.inclusion_tag('markitup/include_all.html')
def markitup_media(no_jquery=False):
include_jquery = not bool(no_jquery) and settings.JQUERY_URL is not None
return dict(register._markitup_context, include_jquery=include_jquery)
@register.inclusion_tag('markitup/include_js.html')
def markitup_js(no_jquery=False):
include_jquery = not bool(no_jquery) and settings.JQUERY_URL is not None
return dict(register._markitup_context, include_jquery=include_jquery)
@register.inclusion_tag('markitup/include_css.html')
def markitup_css():
return register._markitup_context
@register.inclusion_tag('markitup/editor.html')
def markitup_editor(textarea_id, auto_preview=None):
if auto_preview is not None:
auto_preview = (auto_preview == 'auto_preview')
else:
auto_preview = settings.MARKITUP_AUTO_PREVIEW
try:
preview_url = reverse('markitup_preview')
except NoReverseMatch:
preview_url = None;
return {'textarea_id': textarea_id,
'AUTO_PREVIEW': auto_preview,
'preview_url': preview_url}
| bsd-3-clause | -7,835,216,432,872,737,000 | 29.485294 | 76 | 0.712012 | false |
Dhivyap/ansible | lib/ansible/plugins/inventory/hcloud.py | 12 | 8464 | # Copyright (c) 2019 Hetzner Cloud GmbH <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r"""
name: hcloud
plugin_type: inventory
author:
- Lukas Kaemmerling (@lkaemmerling)
short_description: Ansible dynamic inventory plugin for the Hetzner Cloud.
version_added: "2.8"
requirements:
- python >= 2.7
- hcloud-python >= 1.0.0
description:
- Reads inventories from the Hetzner Cloud API.
- Uses a YAML configuration file that ends with hcloud.(yml|yaml).
extends_documentation_fragment:
- constructed
options:
plugin:
description: marks this as an instance of the "hcloud" plugin
required: true
choices: ["hcloud"]
token:
description: The Hetzner Cloud API Token.
required: true
env:
- name: HCLOUD_TOKEN
connect_with:
description: Connect to the server using the value from this field.
default: public_ipv4
type: str
choices:
- public_ipv4
- hostname
- ipv4_dns_ptr
locations:
description: Populate inventory with instances in this location.
default: []
type: list
required: false
types:
description: Populate inventory with instances with this type.
default: []
type: list
required: false
images:
description: Populate inventory with instances with this image name, only available for system images.
default: []
type: list
required: false
label_selector:
description: Populate inventory with instances with this label.
default: ""
type: str
required: false
"""
EXAMPLES = r"""
# Minimal example. `HCLOUD_TOKEN` is exposed in environment.
plugin: hcloud
# Example with locations, types, groups and token
plugin: hcloud
token: foobar
locations:
- nbg1
types:
- cx11
# Group by a location with prefix e.g. "hcloud_location_nbg1"
# and image_os_flavor without prefix and separator e.g. "ubuntu"
# and status with prefix e.g. "server_status_running"
plugin: hcloud
keyed_groups:
- key: location
prefix: hcloud_location
- key: image_os_flavor
separator: ""
- key: status
prefix: server_status
"""
import os
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_native
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
from ansible.release import __version__
try:
from hcloud import hcloud
except ImportError:
raise AnsibleError("The Hetzner Cloud dynamic inventory plugin requires hcloud-python.")
class InventoryModule(BaseInventoryPlugin, Constructable):
NAME = "hcloud"
def _configure_hcloud_client(self):
self.api_token = self.get_option("token")
if self.api_token is None:
raise AnsibleError(
"Please specify a token, via the option token or via environment variable HCLOUD_TOKEN")
self.endpoint = os.getenv("HCLOUD_ENDPOINT") or "https://api.hetzner.cloud/v1"
self.client = hcloud.Client(token=self.api_token,
api_endpoint=self.endpoint,
application_name="ansible-inventory",
application_version=__version__)
def _test_hcloud_token(self):
try:
# We test the API Token against the location API, because this is the API with the smallest result
# and not controllable from the customer.
self.client.locations.get_all()
except hcloud.APIException:
raise AnsibleError("Invalid Hetzner Cloud API Token.")
def _get_servers(self):
if len(self.get_option("label_selector")) > 0:
self.servers = self.client.servers.get_all(label_selector=self.get_option("label_selector"))
else:
self.servers = self.client.servers.get_all()
def _filter_servers(self):
if self.get_option("locations"):
tmp = []
for server in self.servers:
if server.datacenter.location.name in self.get_option("locations"):
tmp.append(server)
self.servers = tmp
if self.get_option("types"):
tmp = []
for server in self.servers:
if server.server_type.name in self.get_option("types"):
tmp.append(server)
self.servers = tmp
if self.get_option("images"):
tmp = []
for server in self.servers:
if server.image is not None and server.image.os_flavor in self.get_option("images"):
tmp.append(server)
self.servers = tmp
def _set_server_attributes(self, server):
self.inventory.set_variable(server.name, "id", to_native(server.id))
self.inventory.set_variable(server.name, "name", to_native(server.name))
self.inventory.set_variable(server.name, "status", to_native(server.status))
self.inventory.set_variable(server.name, "type", to_native(server.server_type.name))
# Network
self.inventory.set_variable(server.name, "ipv4", to_native(server.public_net.ipv4.ip))
self.inventory.set_variable(server.name, "ipv6_network", to_native(server.public_net.ipv6.network))
self.inventory.set_variable(server.name, "ipv6_network_mask", to_native(server.public_net.ipv6.network_mask))
if self.get_option("connect_with") == "public_ipv4":
self.inventory.set_variable(server.name, "ansible_host", to_native(server.public_net.ipv4.ip))
elif self.get_option("connect_with") == "hostname":
self.inventory.set_variable(server.name, "ansible_host", to_native(server.name))
elif self.get_option("connect_with") == "ipv4_dns_ptr":
self.inventory.set_variable(server.name, "ansible_host", to_native(server.public_net.ipv4.dns_ptr))
# Server Type
self.inventory.set_variable(server.name, "server_type", to_native(server.image.name))
# Datacenter
self.inventory.set_variable(server.name, "datacenter", to_native(server.datacenter.name))
self.inventory.set_variable(server.name, "location", to_native(server.datacenter.location.name))
# Image
self.inventory.set_variable(server.name, "image_id", to_native(server.image.id))
self.inventory.set_variable(server.name, "image_name", to_native(server.image.name))
self.inventory.set_variable(server.name, "image_os_flavor", to_native(server.image.os_flavor))
# Labels
self.inventory.set_variable(server.name, "labels", dict(server.labels))
def verify_file(self, path):
"""Return the possibly of a file being consumable by this plugin."""
return (
super(InventoryModule, self).verify_file(path) and
path.endswith((self.NAME + ".yaml", self.NAME + ".yml"))
)
def parse(self, inventory, loader, path, cache=True):
super(InventoryModule, self).parse(inventory, loader, path, cache)
self._read_config_data(path)
self._configure_hcloud_client()
self._test_hcloud_token()
self._get_servers()
self._filter_servers()
# Add a top group 'hcloud'
self.inventory.add_group(group="hcloud")
for server in self.servers:
self.inventory.add_host(server.name, group="hcloud")
self._set_server_attributes(server)
# Use constructed if applicable
strict = self.get_option('strict')
# Composed variables
self._set_composite_vars(self.get_option('compose'), {}, server.name, strict=strict)
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
self._add_host_to_composed_groups(self.get_option('groups'), {}, server.name, strict=strict)
# Create groups based on variable values and add the corresponding hosts to it
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), {}, server.name, strict=strict)
| gpl-3.0 | 7,025,890,395,361,384,000 | 38.004608 | 117 | 0.626772 | false |
jmchilton/galaxy-central | tools/emboss/emboss_format_corrector.py | 1 | 1696 | #EMBOSS format corrector
import operator
from galaxy import datatypes
#Properly set file formats after job run
def exec_after_process( app, inp_data, out_data, param_dict,tool, stdout, stderr):
#Properly set file formats before job run
#def exec_before_job(trans, inp_data, out_data, param_dict,tool):
#why isn't items an ordered list?
items = out_data.items()
#lets sort it ourselves....
items = sorted(items, key=operator.itemgetter(0))
#items is now sorted...
#normal filetype correction
data_count=1
for name, data in items:
outputType = param_dict.get( 'out_format'+str(data_count), None )
#print "data_count",data_count, "name", name, "outputType", outputType
if outputType !=None:
if outputType == 'ncbi':
outputType = "fasta"
elif outputType == 'excel':
outputType = "Tabular"
elif outputType == 'text':
outputType = "txt"
data = datatypes.change_datatype(data, outputType)
data.flush()
data_count+=1
#html filetype correction
data_count=1
for name, data in items:
wants_plot = param_dict.get( 'html_out'+str(data_count), None )
ext = "html"
if wants_plot == "yes":
data = datatypes.change_datatype(data, ext)
data.flush()
data_count+=1
#png file correction
data_count=1
for name, data in items:
wants_plot = param_dict.get( 'plot'+str(data_count), None )
ext = "png"
if wants_plot == "yes":
data = datatypes.change_datatype(data, ext)
data.flush()
data_count+=1
| mit | 4,169,446,580,304,199,000 | 32.92 | 82 | 0.590802 | false |
fzimmermann89/pyload | module/plugins/accounts/MegaRapidoNet.py | 1 | 5619 | # -*- coding: utf-8 -*-
import re
import time
from module.plugins.internal.MultiAccount import MultiAccount
class MegaRapidoNet(MultiAccount):
__name__ = "MegaRapidoNet"
__type__ = "account"
__version__ = "0.07"
__status__ = "testing"
__config__ = [("mh_mode" , "all;listed;unlisted", "Filter hosters to use" , "all"),
("mh_list" , "str" , "Hoster list (comma separated)", "" ),
("mh_interval", "int" , "Reload interval in minutes" , 60 )]
__description__ = """MegaRapido.net account plugin"""
__license__ = "GPLv3"
__authors__ = [("Kagenoshin", "[email protected]")]
VALID_UNTIL_PATTERN = r'<\s*?div[^>]*?class\s*?=\s*?[\'"]premium_index[\'"].*?>[^<]*?<[^>]*?b.*?>\s*?TEMPO\s*?PREMIUM.*?<[^>]*?/b.*?>\s*?(\d*)[^\d]*?DIAS[^\d]*?(\d*)[^\d]*?HORAS[^\d]*?(\d*)[^\d]*?MINUTOS[^\d]*?(\d*)[^\d]*?SEGUNDOS'
USER_ID_PATTERN = r'<\s*?div[^>]*?class\s*?=\s*?["\']checkbox_compartilhar["\'].*?>.*?<\s*?input[^>]*?name\s*?=\s*?["\']usar["\'].*?>.*?<\s*?input[^>]*?name\s*?=\s*?["\']user["\'][^>]*?value\s*?=\s*?["\'](.*?)\s*?["\']'
def grab_hosters(self, user, password, data):
hosters = {'1fichier' : [], # leave it there are so many possible addresses?
'1st-files' : ['1st-files.com'],
'2shared' : ['2shared.com'],
'4shared' : ['4shared.com', '4shared-china.com'],
'asfile' : ['http://asfile.com/'],
'bitshare' : ['bitshare.com'],
'brupload' : ['brupload.net'],
'crocko' : ['crocko.com', 'easy-share.com'],
'dailymotion' : ['dailymotion.com'],
'depfile' : ['depfile.com'],
'depositfiles': ['depositfiles.com', 'dfiles.eu'],
'dizzcloud' : ['dizzcloud.com'],
'dl.dropbox' : [],
'extabit' : ['extabit.com'],
'extmatrix' : ['extmatrix.com'],
'facebook' : [],
'file4go' : ['file4go.com'],
'filecloud' : ['filecloud.io', 'ifile.it', 'mihd.net'],
'filefactory' : ['filefactory.com'],
'fileom' : ['fileom.com'],
'fileparadox' : ['fileparadox.in'],
'filepost' : ['filepost.com', 'fp.io'],
'filerio' : ['filerio.in', 'filerio.com', 'filekeen.com'],
'filesflash' : ['filesflash.com'],
'firedrive' : ['firedrive.com', 'putlocker.com'],
'flashx' : [],
'freakshare' : ['freakshare.net', 'freakshare.com'],
'gigasize' : ['gigasize.com'],
'hipfile' : ['hipfile.com'],
'junocloud' : ['junocloud.me'],
'letitbit' : ['letitbit.net', 'shareflare.net'],
'mediafire' : ['mediafire.com'],
'mega' : ['mega.co.nz'],
'megashares' : ['megashares.com'],
'metacafe' : ['metacafe.com'],
'netload' : ['netload.in'],
'oboom' : ['oboom.com'],
'rapidgator' : ['rapidgator.net'],
'rapidshare' : ['rapidshare.com'],
'rarefile' : ['rarefile.net'],
'ryushare' : ['ryushare.com'],
'sendspace' : ['sendspace.com'],
'turbobit' : ['turbobit.net', 'unextfiles.com'],
'uploadable' : ['uploadable.ch'],
'uploadbaz' : ['uploadbaz.com'],
'uploaded' : ['uploaded.to', 'uploaded.net', 'ul.to'],
'uploadhero' : ['uploadhero.com'],
'uploading' : ['uploading.com'],
'uptobox' : ['uptobox.com'],
'xvideos' : ['xvideos.com'],
'youtube' : ['youtube.com']}
hoster_list = []
for item in hosters.values():
hoster_list.extend(item)
return hoster_list
def grab_info(self, user, password, data):
validuntil = None
trafficleft = None
premium = False
html = self.load("http://megarapido.net/gerador")
validuntil = re.search(self.VALID_UNTIL_PATTERN, html)
if validuntil:
#: Hier weitermachen!!! (müssen umbedingt die zeit richtig machen damit! (sollte aber möglich))
validuntil = time.time() + int(validuntil.group(1)) * 24 * 3600 + int(validuntil.group(2)) * 3600 + int(validuntil.group(3)) * 60 + int(validuntil.group(4))
trafficleft = -1
premium = True
return {'validuntil' : validuntil,
'trafficleft': trafficleft,
'premium' : premium}
def signin(self, user, password, data):
self.load("http://megarapido.net/login")
self.load("http://megarapido.net/painel_user/ajax/logar.php",
post={'login': user,
'senha': password})
html = self.load("http://megarapido.net/gerador")
if "sair" not in html.lower():
self.fail_login()
else:
m = re.search(self.USER_ID_PATTERN, html)
if m is not None:
data['uid'] = m.group(1)
else:
self.fail_login("Couldn't find the user ID")
| gpl-3.0 | 3,575,877,202,620,310,000 | 44.666667 | 235 | 0.445434 | false |
CingHu/neutron-ustack | neutron/tests/unit/vmware/nsxlib/test_router.py | 21 | 46835 | # Copyright (c) 2014 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import mock
from oslo.config import cfg
from neutron.common import exceptions
from neutron.openstack.common import uuidutils
from neutron.plugins.vmware.api_client import exception as api_exc
from neutron.plugins.vmware.api_client import version as version_module
from neutron.plugins.vmware.common import exceptions as nsx_exc
from neutron.plugins.vmware.common import utils
from neutron.plugins.vmware import nsxlib
from neutron.plugins.vmware.nsxlib import router as routerlib
from neutron.plugins.vmware.nsxlib import switch as switchlib
from neutron.tests.unit import test_api_v2
from neutron.tests.unit.vmware.nsxlib import base
_uuid = test_api_v2._uuid
class TestNatRules(base.NsxlibTestCase):
def _test_create_lrouter_dnat_rule(self, version):
with mock.patch.object(self.fake_cluster.api_client,
'get_version',
new=lambda: version_module.Version(version)):
tenant_id = 'pippo'
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
tenant_id,
'fake_router',
'192.168.0.1')
nat_rule = routerlib.create_lrouter_dnat_rule(
self.fake_cluster, lrouter['uuid'], '10.0.0.99',
match_criteria={'destination_ip_addresses':
'192.168.0.5'})
uri = nsxlib._build_uri_path(routerlib.LROUTERNAT_RESOURCE,
nat_rule['uuid'],
lrouter['uuid'])
resp_obj = nsxlib.do_request("GET", uri, cluster=self.fake_cluster)
self.assertEqual('DestinationNatRule', resp_obj['type'])
self.assertEqual('192.168.0.5',
resp_obj['match']['destination_ip_addresses'])
def test_create_lrouter_dnat_rule_v2(self):
self._test_create_lrouter_dnat_rule('2.9')
def test_create_lrouter_dnat_rule_v31(self):
self._test_create_lrouter_dnat_rule('3.1')
class TestExplicitLRouters(base.NsxlibTestCase):
def setUp(self):
self.fake_version = '3.2'
super(TestExplicitLRouters, self).setUp()
def _get_lrouter(self, tenant_id, router_name, router_id, relations=None):
schema = '/ws.v1/schema/RoutingTableRoutingConfig'
router = {'display_name': router_name,
'uuid': router_id,
'tags': utils.get_tags(os_tid=tenant_id),
'distributed': False,
'routing_config': {'type': 'RoutingTableRoutingConfig',
'_schema': schema},
'_schema': schema,
'nat_synchronization_enabled': True,
'replication_mode': 'service',
'type': 'LogicalRouterConfig',
'_href': '/ws.v1/lrouter/%s' % router_id, }
if relations:
router['_relations'] = relations
return router
def _get_single_route(self, router_id, route_id='fake_route_id_0',
prefix='0.0.0.0/0', next_hop_ip='1.1.1.1'):
return {'protocol': 'static',
'_href': '/ws.v1/lrouter/%s/rib/%s' % (router_id, route_id),
'prefix': prefix,
'_schema': '/ws.v1/schema/RoutingTableEntry',
'next_hop_ip': next_hop_ip,
'action': 'accept',
'uuid': route_id}
def test_prepare_body_with_implicit_routing_config(self):
router_name = 'fake_router_name'
tenant_id = 'fake_tenant_id'
neutron_router_id = 'pipita_higuain'
router_type = 'SingleDefaultRouteImplicitRoutingConfig'
route_config = {
'default_route_next_hop': {'gateway_ip_address': 'fake_address',
'type': 'RouterNextHop'}, }
body = routerlib._prepare_lrouter_body(router_name, neutron_router_id,
tenant_id, router_type,
**route_config)
expected = {'display_name': 'fake_router_name',
'routing_config': {
'default_route_next_hop':
{'gateway_ip_address': 'fake_address',
'type': 'RouterNextHop'},
'type': 'SingleDefaultRouteImplicitRoutingConfig'},
'tags': utils.get_tags(os_tid='fake_tenant_id',
q_router_id='pipita_higuain'),
'type': 'LogicalRouterConfig',
'replication_mode': cfg.CONF.NSX.replication_mode}
self.assertEqual(expected, body)
def test_prepare_body_without_routing_config(self):
router_name = 'fake_router_name'
tenant_id = 'fake_tenant_id'
neutron_router_id = 'marekiaro_hamsik'
router_type = 'RoutingTableRoutingConfig'
body = routerlib._prepare_lrouter_body(router_name, neutron_router_id,
tenant_id, router_type)
expected = {'display_name': 'fake_router_name',
'routing_config': {'type': 'RoutingTableRoutingConfig'},
'tags': utils.get_tags(os_tid='fake_tenant_id',
q_router_id='marekiaro_hamsik'),
'type': 'LogicalRouterConfig',
'replication_mode': cfg.CONF.NSX.replication_mode}
self.assertEqual(expected, body)
def test_get_lrouter(self):
tenant_id = 'fake_tenant_id'
router_name = 'fake_router_name'
router_id = 'fake_router_id'
relations = {
'LogicalRouterStatus':
{'_href': '/ws.v1/lrouter/%s/status' % router_id,
'lport_admin_up_count': 1,
'_schema': '/ws.v1/schema/LogicalRouterStatus',
'lport_count': 1,
'fabric_status': True,
'type': 'LogicalRouterStatus',
'lport_link_up_count': 0, }, }
with mock.patch.object(nsxlib, 'do_request',
return_value=self._get_lrouter(tenant_id,
router_name,
router_id,
relations)):
lrouter = routerlib.get_lrouter(self.fake_cluster, router_id)
self.assertTrue(
lrouter['_relations']['LogicalRouterStatus']['fabric_status'])
def test_create_lrouter(self):
tenant_id = 'fake_tenant_id'
router_name = 'fake_router_name'
router_id = 'fake_router_id'
nexthop_ip = '10.0.0.1'
with mock.patch.object(
nsxlib, 'do_request',
return_value=self._get_lrouter(tenant_id,
router_name,
router_id)):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
tenant_id,
router_name, nexthop_ip)
self.assertEqual(lrouter['routing_config']['type'],
'RoutingTableRoutingConfig')
self.assertNotIn('default_route_next_hop',
lrouter['routing_config'])
def test_update_lrouter_with_no_routes(self):
router_id = 'fake_router_id'
new_routes = [{"nexthop": "10.0.0.2",
"destination": "169.254.169.0/30"}, ]
nsx_routes = [self._get_single_route(router_id)]
with mock.patch.object(routerlib, 'get_explicit_routes_lrouter',
return_value=nsx_routes):
with mock.patch.object(routerlib, 'create_explicit_route_lrouter',
return_value='fake_uuid'):
old_routes = routerlib.update_explicit_routes_lrouter(
self.fake_cluster, router_id, new_routes)
self.assertEqual(old_routes, nsx_routes)
def test_update_lrouter_with_no_routes_raise_nsx_exception(self):
router_id = 'fake_router_id'
new_routes = [{"nexthop": "10.0.0.2",
"destination": "169.254.169.0/30"}, ]
nsx_routes = [self._get_single_route(router_id)]
with mock.patch.object(routerlib, 'get_explicit_routes_lrouter',
return_value=nsx_routes):
with mock.patch.object(routerlib, 'create_explicit_route_lrouter',
side_effect=api_exc.NsxApiException):
self.assertRaises(api_exc.NsxApiException,
routerlib.update_explicit_routes_lrouter,
self.fake_cluster, router_id, new_routes)
def test_update_lrouter_with_routes(self):
router_id = 'fake_router_id'
new_routes = [{"next_hop_ip": "10.0.0.2",
"prefix": "169.254.169.0/30"}, ]
nsx_routes = [self._get_single_route(router_id),
self._get_single_route(router_id, 'fake_route_id_1',
'0.0.0.1/24', '10.0.0.3'),
self._get_single_route(router_id, 'fake_route_id_2',
'0.0.0.2/24', '10.0.0.4'), ]
with mock.patch.object(routerlib, 'get_explicit_routes_lrouter',
return_value=nsx_routes):
with mock.patch.object(routerlib, 'delete_explicit_route_lrouter',
return_value=None):
with mock.patch.object(routerlib,
'create_explicit_route_lrouter',
return_value='fake_uuid'):
old_routes = routerlib.update_explicit_routes_lrouter(
self.fake_cluster, router_id, new_routes)
self.assertEqual(old_routes, nsx_routes)
def test_update_lrouter_with_routes_raises_nsx_expception(self):
router_id = 'fake_router_id'
new_routes = [{"nexthop": "10.0.0.2",
"destination": "169.254.169.0/30"}, ]
nsx_routes = [self._get_single_route(router_id),
self._get_single_route(router_id, 'fake_route_id_1',
'0.0.0.1/24', '10.0.0.3'),
self._get_single_route(router_id, 'fake_route_id_2',
'0.0.0.2/24', '10.0.0.4'), ]
with mock.patch.object(routerlib, 'get_explicit_routes_lrouter',
return_value=nsx_routes):
with mock.patch.object(routerlib, 'delete_explicit_route_lrouter',
side_effect=api_exc.NsxApiException):
with mock.patch.object(
routerlib, 'create_explicit_route_lrouter',
return_value='fake_uuid'):
self.assertRaises(
api_exc.NsxApiException,
routerlib.update_explicit_routes_lrouter,
self.fake_cluster, router_id, new_routes)
class RouterNegativeTestCase(base.NsxlibNegativeBaseTestCase):
def test_create_lrouter_on_failure(self):
self.assertRaises(api_exc.NsxApiException,
routerlib.create_lrouter,
self.fake_cluster,
uuidutils.generate_uuid(),
'pluto',
'fake_router',
'my_hop')
def test_delete_lrouter_on_failure(self):
self.assertRaises(api_exc.NsxApiException,
routerlib.delete_lrouter,
self.fake_cluster,
'fake_router')
def test_get_lrouter_on_failure(self):
self.assertRaises(api_exc.NsxApiException,
routerlib.get_lrouter,
self.fake_cluster,
'fake_router')
def test_update_lrouter_on_failure(self):
self.assertRaises(api_exc.NsxApiException,
routerlib.update_lrouter,
self.fake_cluster,
'fake_router',
'pluto',
'new_hop')
class TestLogicalRouters(base.NsxlibTestCase):
def _verify_lrouter(self, res_lrouter,
expected_uuid,
expected_display_name,
expected_nexthop,
expected_tenant_id,
expected_neutron_id=None,
expected_distributed=None):
self.assertEqual(res_lrouter['uuid'], expected_uuid)
nexthop = (res_lrouter['routing_config']
['default_route_next_hop']['gateway_ip_address'])
self.assertEqual(nexthop, expected_nexthop)
router_tags = self._build_tag_dict(res_lrouter['tags'])
self.assertIn('os_tid', router_tags)
self.assertEqual(res_lrouter['display_name'], expected_display_name)
self.assertEqual(expected_tenant_id, router_tags['os_tid'])
if expected_distributed is not None:
self.assertEqual(expected_distributed,
res_lrouter['distributed'])
if expected_neutron_id:
self.assertIn('q_router_id', router_tags)
self.assertEqual(expected_neutron_id, router_tags['q_router_id'])
def test_get_lrouters(self):
lrouter_uuids = [routerlib.create_lrouter(
self.fake_cluster, 'whatever', 'pippo', 'fake-lrouter-%s' % k,
'10.0.0.1')['uuid'] for k in range(3)]
routers = routerlib.get_lrouters(self.fake_cluster, 'pippo')
for router in routers:
self.assertIn(router['uuid'], lrouter_uuids)
def _create_lrouter(self, version, neutron_id=None, distributed=None):
with mock.patch.object(
self.fake_cluster.api_client, 'get_version',
return_value=version_module.Version(version)):
if not neutron_id:
neutron_id = uuidutils.generate_uuid()
lrouter = routerlib.create_lrouter(
self.fake_cluster, neutron_id, 'pippo',
'fake-lrouter', '10.0.0.1', distributed=distributed)
return routerlib.get_lrouter(self.fake_cluster,
lrouter['uuid'])
def test_create_and_get_lrouter_v30(self):
neutron_id = uuidutils.generate_uuid()
res_lrouter = self._create_lrouter('3.0', neutron_id=neutron_id)
self._verify_lrouter(res_lrouter, res_lrouter['uuid'],
'fake-lrouter', '10.0.0.1', 'pippo',
expected_neutron_id=neutron_id)
def test_create_and_get_lrouter_v31_centralized(self):
neutron_id = uuidutils.generate_uuid()
res_lrouter = self._create_lrouter('3.1', neutron_id=neutron_id,
distributed=False)
self._verify_lrouter(res_lrouter, res_lrouter['uuid'],
'fake-lrouter', '10.0.0.1', 'pippo',
expected_neutron_id=neutron_id,
expected_distributed=False)
def test_create_and_get_lrouter_v31_distributed(self):
neutron_id = uuidutils.generate_uuid()
res_lrouter = self._create_lrouter('3.1', neutron_id=neutron_id,
distributed=True)
self._verify_lrouter(res_lrouter, res_lrouter['uuid'],
'fake-lrouter', '10.0.0.1', 'pippo',
expected_neutron_id=neutron_id,
expected_distributed=True)
def test_create_and_get_lrouter_name_exceeds_40chars(self):
neutron_id = uuidutils.generate_uuid()
display_name = '*' * 50
lrouter = routerlib.create_lrouter(self.fake_cluster,
neutron_id,
'pippo',
display_name,
'10.0.0.1')
res_lrouter = routerlib.get_lrouter(self.fake_cluster,
lrouter['uuid'])
self._verify_lrouter(res_lrouter, lrouter['uuid'],
'*' * 40, '10.0.0.1', 'pippo',
expected_neutron_id=neutron_id)
def _test_version_dependent_update_lrouter(self, version):
def foo(*args, **kwargs):
return version
foo_func_dict = {
'update_lrouter': {
2: {-1: foo},
3: {-1: foo, 2: foo}
}
}
with mock.patch.object(self.fake_cluster.api_client,
'get_version',
return_value=version_module.Version(version)):
with mock.patch.dict(routerlib.ROUTER_FUNC_DICT,
foo_func_dict, clear=True):
return routerlib.update_lrouter(
self.fake_cluster, 'foo_router_id', 'foo_router_name',
'foo_nexthop', routes={'foo_destination': 'foo_address'})
def test_version_dependent_update_lrouter_old_versions(self):
self.assertRaises(nsx_exc.InvalidVersion,
self._test_version_dependent_update_lrouter,
"2.9")
self.assertRaises(nsx_exc.InvalidVersion,
self._test_version_dependent_update_lrouter,
"3.0")
self.assertRaises(nsx_exc.InvalidVersion,
self._test_version_dependent_update_lrouter,
"3.1")
def test_version_dependent_update_lrouter_new_versions(self):
self.assertEqual("3.2",
self._test_version_dependent_update_lrouter("3.2"))
self.assertEqual("4.0",
self._test_version_dependent_update_lrouter("4.0"))
self.assertEqual("4.1",
self._test_version_dependent_update_lrouter("4.1"))
def test_update_lrouter_no_nexthop(self):
neutron_id = uuidutils.generate_uuid()
lrouter = routerlib.create_lrouter(self.fake_cluster,
neutron_id,
'pippo',
'fake-lrouter',
'10.0.0.1')
lrouter = routerlib.update_lrouter(self.fake_cluster,
lrouter['uuid'],
'new_name',
None)
res_lrouter = routerlib.get_lrouter(self.fake_cluster,
lrouter['uuid'])
self._verify_lrouter(res_lrouter, lrouter['uuid'],
'new_name', '10.0.0.1', 'pippo',
expected_neutron_id=neutron_id)
def test_update_lrouter(self):
neutron_id = uuidutils.generate_uuid()
lrouter = routerlib.create_lrouter(self.fake_cluster,
neutron_id,
'pippo',
'fake-lrouter',
'10.0.0.1')
lrouter = routerlib.update_lrouter(self.fake_cluster,
lrouter['uuid'],
'new_name',
'192.168.0.1')
res_lrouter = routerlib.get_lrouter(self.fake_cluster,
lrouter['uuid'])
self._verify_lrouter(res_lrouter, lrouter['uuid'],
'new_name', '192.168.0.1', 'pippo',
expected_neutron_id=neutron_id)
def test_update_nonexistent_lrouter_raises(self):
self.assertRaises(exceptions.NotFound,
routerlib.update_lrouter,
self.fake_cluster,
'whatever',
'foo', '9.9.9.9')
def test_delete_lrouter(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
routerlib.delete_lrouter(self.fake_cluster, lrouter['uuid'])
self.assertRaises(exceptions.NotFound,
routerlib.get_lrouter,
self.fake_cluster,
lrouter['uuid'])
def test_query_lrouter_ports(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
router_port_uuids = [routerlib.create_router_lport(
self.fake_cluster, lrouter['uuid'], 'pippo',
'qp_id_%s' % k, 'port-%s' % k, True,
['192.168.0.%s' % k], '00:11:22:33:44:55')['uuid']
for k in range(3)]
ports = routerlib.query_lrouter_lports(
self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(ports), 3)
for res_port in ports:
self.assertIn(res_port['uuid'], router_port_uuids)
def test_query_lrouter_lports_nonexistent_lrouter_raises(self):
self.assertRaises(
exceptions.NotFound, routerlib.create_router_lport,
self.fake_cluster, 'booo', 'pippo', 'neutron_port_id',
'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
def test_create_and_get_lrouter_port(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
routerlib.create_router_lport(
self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
ports = routerlib.query_lrouter_lports(
self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(ports), 1)
res_port = ports[0]
port_tags = self._build_tag_dict(res_port['tags'])
self.assertEqual(['192.168.0.1'], res_port['ip_addresses'])
self.assertIn('os_tid', port_tags)
self.assertIn('q_port_id', port_tags)
self.assertEqual('pippo', port_tags['os_tid'])
self.assertEqual('neutron_port_id', port_tags['q_port_id'])
def test_create_lrouter_port_nonexistent_router_raises(self):
self.assertRaises(
exceptions.NotFound, routerlib.create_router_lport,
self.fake_cluster, 'booo', 'pippo', 'neutron_port_id',
'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
def test_update_lrouter_port(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
lrouter_port = routerlib.create_router_lport(
self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
routerlib.update_router_lport(
self.fake_cluster, lrouter['uuid'], lrouter_port['uuid'],
'pippo', 'another_port_id', 'name', False,
['192.168.0.1', '10.10.10.254'])
ports = routerlib.query_lrouter_lports(
self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(ports), 1)
res_port = ports[0]
port_tags = self._build_tag_dict(res_port['tags'])
self.assertEqual(['192.168.0.1', '10.10.10.254'],
res_port['ip_addresses'])
self.assertEqual('False', res_port['admin_status_enabled'])
self.assertIn('os_tid', port_tags)
self.assertIn('q_port_id', port_tags)
self.assertEqual('pippo', port_tags['os_tid'])
self.assertEqual('another_port_id', port_tags['q_port_id'])
def test_update_lrouter_port_nonexistent_router_raises(self):
self.assertRaises(
exceptions.NotFound, routerlib.update_router_lport,
self.fake_cluster, 'boo-router', 'boo-port', 'pippo',
'neutron_port_id', 'name', True, ['192.168.0.1'])
def test_update_lrouter_port_nonexistent_port_raises(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
self.assertRaises(
exceptions.NotFound, routerlib.update_router_lport,
self.fake_cluster, lrouter['uuid'], 'boo-port', 'pippo',
'neutron_port_id', 'name', True, ['192.168.0.1'])
def test_delete_lrouter_port(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
lrouter_port = routerlib.create_router_lport(
self.fake_cluster, lrouter['uuid'], 'pippo', 'x', 'y', True, [],
'00:11:22:33:44:55')
ports = routerlib.query_lrouter_lports(
self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(ports), 1)
routerlib.delete_router_lport(self.fake_cluster, lrouter['uuid'],
lrouter_port['uuid'])
ports = routerlib.query_lrouter_lports(
self.fake_cluster, lrouter['uuid'])
self.assertFalse(len(ports))
def test_delete_lrouter_port_nonexistent_router_raises(self):
self.assertRaises(exceptions.NotFound,
routerlib.delete_router_lport,
self.fake_cluster, 'xyz', 'abc')
def test_delete_lrouter_port_nonexistent_port_raises(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
self.assertRaises(exceptions.NotFound,
routerlib.delete_router_lport,
self.fake_cluster, lrouter['uuid'], 'abc')
def test_delete_peer_lrouter_port(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
lrouter_port = routerlib.create_router_lport(
self.fake_cluster, lrouter['uuid'], 'pippo', 'x', 'y', True, [],
'00:11:22:33:44:55')
def fakegetport(*args, **kwargs):
return {'_relations': {'LogicalPortAttachment':
{'peer_port_uuid': lrouter_port['uuid']}}}
# mock get_port
with mock.patch.object(switchlib, 'get_port', new=fakegetport):
routerlib.delete_peer_router_lport(self.fake_cluster,
lrouter_port['uuid'],
'whatwever', 'whatever')
def test_update_lrouter_port_ips_add_only(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
lrouter_port = routerlib.create_router_lport(
self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
routerlib.update_lrouter_port_ips(
self.fake_cluster, lrouter['uuid'], lrouter_port['uuid'],
['10.10.10.254'], [])
ports = routerlib.query_lrouter_lports(
self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(ports), 1)
res_port = ports[0]
self.assertEqual(['10.10.10.254', '192.168.0.1'],
res_port['ip_addresses'])
def test_update_lrouter_port_ips_remove_only(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
lrouter_port = routerlib.create_router_lport(
self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
'name', True, ['192.168.0.1', '10.10.10.254'],
'00:11:22:33:44:55')
routerlib.update_lrouter_port_ips(
self.fake_cluster, lrouter['uuid'], lrouter_port['uuid'],
[], ['10.10.10.254'])
ports = routerlib.query_lrouter_lports(
self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(ports), 1)
res_port = ports[0]
self.assertEqual(['192.168.0.1'], res_port['ip_addresses'])
def test_update_lrouter_port_ips_add_and_remove(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
lrouter_port = routerlib.create_router_lport(
self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
routerlib.update_lrouter_port_ips(
self.fake_cluster, lrouter['uuid'], lrouter_port['uuid'],
['10.10.10.254'], ['192.168.0.1'])
ports = routerlib.query_lrouter_lports(
self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(ports), 1)
res_port = ports[0]
self.assertEqual(['10.10.10.254'], res_port['ip_addresses'])
def test_update_lrouter_port_ips_nonexistent_router_raises(self):
self.assertRaises(
nsx_exc.NsxPluginException, routerlib.update_lrouter_port_ips,
self.fake_cluster, 'boo-router', 'boo-port', [], [])
def test_update_lrouter_port_ips_nsx_exception_raises(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
lrouter_port = routerlib.create_router_lport(
self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
def raise_nsx_exc(*args, **kwargs):
raise api_exc.NsxApiException()
with mock.patch.object(nsxlib, 'do_request', new=raise_nsx_exc):
self.assertRaises(
nsx_exc.NsxPluginException, routerlib.update_lrouter_port_ips,
self.fake_cluster, lrouter['uuid'],
lrouter_port['uuid'], [], [])
def test_plug_lrouter_port_patch_attachment(self):
tenant_id = 'pippo'
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = switchlib.create_lswitch(self.fake_cluster,
_uuid(),
tenant_id, 'fake-switch',
transport_zones_config)
lport = switchlib.create_lport(self.fake_cluster, lswitch['uuid'],
tenant_id, 'xyz',
'name', 'device_id', True)
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
tenant_id,
'fake-lrouter',
'10.0.0.1')
lrouter_port = routerlib.create_router_lport(
self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
'name', True, ['192.168.0.1'], '00:11:22:33:44:55:66')
result = routerlib.plug_router_port_attachment(
self.fake_cluster, lrouter['uuid'],
lrouter_port['uuid'],
lport['uuid'], 'PatchAttachment')
self.assertEqual(lport['uuid'],
result['LogicalPortAttachment']['peer_port_uuid'])
def test_plug_lrouter_port_l3_gw_attachment(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
lrouter_port = routerlib.create_router_lport(
self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
'name', True, ['192.168.0.1'], '00:11:22:33:44:55:66')
result = routerlib.plug_router_port_attachment(
self.fake_cluster, lrouter['uuid'],
lrouter_port['uuid'],
'gw_att', 'L3GatewayAttachment')
self.assertEqual(
'gw_att',
result['LogicalPortAttachment']['l3_gateway_service_uuid'])
def test_plug_lrouter_port_l3_gw_attachment_with_vlan(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
lrouter_port = routerlib.create_router_lport(
self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
result = routerlib.plug_router_port_attachment(
self.fake_cluster, lrouter['uuid'],
lrouter_port['uuid'],
'gw_att', 'L3GatewayAttachment', 123)
self.assertEqual(
'gw_att',
result['LogicalPortAttachment']['l3_gateway_service_uuid'])
self.assertEqual(
'123',
result['LogicalPortAttachment']['vlan_id'])
def test_plug_lrouter_port_invalid_attachment_type_raises(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
lrouter_port = routerlib.create_router_lport(
self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
self.assertRaises(nsx_exc.InvalidAttachmentType,
routerlib.plug_router_port_attachment,
self.fake_cluster, lrouter['uuid'],
lrouter_port['uuid'], 'gw_att', 'BadType')
def _test_create_router_snat_rule(self, version):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
with mock.patch.object(self.fake_cluster.api_client,
'get_version',
new=lambda: version_module.Version(version)):
routerlib.create_lrouter_snat_rule(
self.fake_cluster, lrouter['uuid'],
'10.0.0.2', '10.0.0.2', order=200,
match_criteria={'source_ip_addresses': '192.168.0.24'})
rules = routerlib.query_nat_rules(
self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(rules), 1)
def test_create_router_snat_rule_v3(self):
self._test_create_router_snat_rule('3.0')
def test_create_router_snat_rule_v2(self):
self._test_create_router_snat_rule('2.0')
def _test_create_router_dnat_rule(self, version, dest_port=None):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
with mock.patch.object(self.fake_cluster.api_client,
'get_version',
return_value=version_module.Version(version)):
routerlib.create_lrouter_dnat_rule(
self.fake_cluster, lrouter['uuid'], '192.168.0.2', order=200,
dest_port=dest_port,
match_criteria={'destination_ip_addresses': '10.0.0.3'})
rules = routerlib.query_nat_rules(
self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(rules), 1)
def test_create_router_dnat_rule_v3(self):
self._test_create_router_dnat_rule('3.0')
def test_create_router_dnat_rule_v2(self):
self._test_create_router_dnat_rule('2.0')
def test_create_router_dnat_rule_v2_with_destination_port(self):
self._test_create_router_dnat_rule('2.0', 8080)
def test_create_router_dnat_rule_v3_with_destination_port(self):
self._test_create_router_dnat_rule('3.0', 8080)
def test_create_router_snat_rule_invalid_match_keys_raises(self):
# In this case the version does not make a difference
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
with mock.patch.object(self.fake_cluster.api_client,
'get_version',
new=lambda: '2.0'):
self.assertRaises(AttributeError,
routerlib.create_lrouter_snat_rule,
self.fake_cluster, lrouter['uuid'],
'10.0.0.2', '10.0.0.2', order=200,
match_criteria={'foo': 'bar'})
def _test_create_router_nosnat_rule(self, version, expected=1):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
with mock.patch.object(self.fake_cluster.api_client,
'get_version',
new=lambda: version_module.Version(version)):
routerlib.create_lrouter_nosnat_rule(
self.fake_cluster, lrouter['uuid'],
order=100,
match_criteria={'destination_ip_addresses': '192.168.0.0/24'})
rules = routerlib.query_nat_rules(
self.fake_cluster, lrouter['uuid'])
# NoSNAT rules do not exist in V2
self.assertEqual(len(rules), expected)
def test_create_router_nosnat_rule_v2(self):
self._test_create_router_nosnat_rule('2.0', expected=0)
def test_create_router_nosnat_rule_v3(self):
self._test_create_router_nosnat_rule('3.0')
def _prepare_nat_rules_for_delete_tests(self):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
'pippo',
'fake-lrouter',
'10.0.0.1')
# v2 or v3 makes no difference for this test
with mock.patch.object(self.fake_cluster.api_client,
'get_version',
new=lambda: version_module.Version('2.0')):
routerlib.create_lrouter_snat_rule(
self.fake_cluster, lrouter['uuid'],
'10.0.0.2', '10.0.0.2', order=220,
match_criteria={'source_ip_addresses': '192.168.0.0/24'})
routerlib.create_lrouter_snat_rule(
self.fake_cluster, lrouter['uuid'],
'10.0.0.3', '10.0.0.3', order=200,
match_criteria={'source_ip_addresses': '192.168.0.2/32'})
routerlib.create_lrouter_dnat_rule(
self.fake_cluster, lrouter['uuid'], '192.168.0.2', order=200,
match_criteria={'destination_ip_addresses': '10.0.0.3'})
return lrouter
def test_delete_router_nat_rules_by_match_on_destination_ip(self):
lrouter = self._prepare_nat_rules_for_delete_tests()
rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(rules), 3)
routerlib.delete_nat_rules_by_match(
self.fake_cluster, lrouter['uuid'], 'DestinationNatRule', 1, 1,
destination_ip_addresses='10.0.0.3')
rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(rules), 2)
def test_delete_router_nat_rules_by_match_on_source_ip(self):
lrouter = self._prepare_nat_rules_for_delete_tests()
rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(rules), 3)
routerlib.delete_nat_rules_by_match(
self.fake_cluster, lrouter['uuid'], 'SourceNatRule', 1, 1,
source_ip_addresses='192.168.0.2/32')
rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(rules), 2)
def test_delete_router_nat_rules_by_match_no_match_expected(self):
lrouter = self._prepare_nat_rules_for_delete_tests()
rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(rules), 3)
routerlib.delete_nat_rules_by_match(
self.fake_cluster, lrouter['uuid'], 'SomeWeirdType', 0)
rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(rules), 3)
routerlib.delete_nat_rules_by_match(
self.fake_cluster, lrouter['uuid'], 'DestinationNatRule', 0,
destination_ip_addresses='99.99.99.99')
rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(rules), 3)
def test_delete_router_nat_rules_by_match_no_match_raises(self):
lrouter = self._prepare_nat_rules_for_delete_tests()
rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(rules), 3)
self.assertRaises(
nsx_exc.NatRuleMismatch,
routerlib.delete_nat_rules_by_match,
self.fake_cluster, lrouter['uuid'],
'SomeWeirdType', 1, 1)
def test_delete_nat_rules_by_match_len_mismatch_does_not_raise(self):
lrouter = self._prepare_nat_rules_for_delete_tests()
rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
self.assertEqual(len(rules), 3)
deleted_rules = routerlib.delete_nat_rules_by_match(
self.fake_cluster, lrouter['uuid'],
'DestinationNatRule',
max_num_expected=1, min_num_expected=1,
raise_on_len_mismatch=False,
destination_ip_addresses='99.99.99.99')
self.assertEqual(0, deleted_rules)
# add an extra rule to emulate a duplicate one
with mock.patch.object(self.fake_cluster.api_client,
'get_version',
new=lambda: version_module.Version('2.0')):
routerlib.create_lrouter_snat_rule(
self.fake_cluster, lrouter['uuid'],
'10.0.0.2', '10.0.0.2', order=220,
match_criteria={'source_ip_addresses': '192.168.0.0/24'})
deleted_rules_2 = routerlib.delete_nat_rules_by_match(
self.fake_cluster, lrouter['uuid'], 'SourceNatRule',
min_num_expected=1, max_num_expected=1,
raise_on_len_mismatch=False,
source_ip_addresses='192.168.0.0/24')
self.assertEqual(2, deleted_rules_2)
| apache-2.0 | 4,142,239,347,746,218,500 | 48.404008 | 79 | 0.50221 | false |
pombredanne/rekall | rekall-core/rekall/plugins/common/entity/efilter_protocols.py | 4 | 5229 | # Rekall Memory Forensics
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""
The Rekall Entity Layer.
"""
__author__ = "Adam Sindelar <[email protected]>"
from efilter.protocols import associative
from efilter.protocols import indexable
from efilter.protocols import hashable
from efilter.protocols import name_delegate
from rekall import obj
from rekall import plugin as rekall_plugin
from rekall.entities import entity
from rekall.entities import identity
from rekall.entities import component as entity_component
class RekallDelegate(object):
"""Exposes the global Rekall namespace and types to EFILTER.
This is a work in progress.
"""
def __init__(self, session, profile):
self.session = session
self.profile = profile
def _reflect_global(self, name):
if name in entity_component.Component.classes.keys():
# Is this a valid component? If so, just tell EFILTER it's an
# Entity.
return entity_component.Component.classes.get(name)
elif name.startswith("_"):
# Could be a global.
value = self.profile.get_constant(name)
if value:
return type(value)
return None
else:
# Try a plugin name.
# If name is in session plugins, return the plugin class.
return None
def _reflect_scoped(self, name, scope):
if issubclass(scope, entity_component.Component):
return self._reflect_component(name, scope)
if issubclass(scope, obj.BaseObject):
return self._reflect_vtype(name, scope)
if issubclass(scope, rekall_plugin.Plugin):
return self._reflect_plugin(name, scope)
return None
def _reflect_component(self, name, component):
field = component.reflect_attribute(name)
if not field:
return None
return getattr(field.typedesc, "type_cls", None)
def _reflect_vtype(self, name, vtype):
pass
def _reflect_plugin(self, name, plugin):
pass
def reflect(self, name, scope=None):
if scope is None:
return self._reflect_global(name)
return self._reflect_scoped(name, scope)
def provide(self, name):
return None
def getnames(self):
return ()
name_delegate.INameDelegate.implement(
for_type=RekallDelegate,
implementations={
name_delegate.reflect: RekallDelegate.reflect,
name_delegate.provide: RekallDelegate.provide,
name_delegate.getnames: RekallDelegate.getnames})
def _getkeys_Entity(e):
for component_name in entity_component.Component.classes.keys():
component = getattr(e.components, component_name)
if component is None:
continue
for idx, field in enumerate(component.component_fields):
if component[idx]:
yield "%s/%s" % (component_name, field.name)
### Entity-related types: ###
associative.IAssociative.implement(
for_type=entity.Entity,
implementations={
associative.select: lambda e, key: e.get_raw(key),
associative.resolve: lambda e, key: e.get(key),
associative.getkeys: _getkeys_Entity})
associative.IAssociative.implement(
for_type=entity_component.Component,
implementations={
associative.select: lambda c, key: c[key],
associative.resolve: lambda c, key: c[key],
associative.getkeys: lambda c: (f.name for f in c.component_fields)})
associative.IAssociative.implement(
for_type=entity.CurriedComponent,
implementations={
associative.select: entity.CurriedComponent.get,
associative.resolve: entity.CurriedComponent.get_raw,
associative.getkeys:
lambda c: (f.name for f in c.component.component_fields)})
indexable.IIndexable.implement(
for_types=(identity.Identity, entity.Entity),
implementations={
indexable.indices: lambda x: x.indices})
### Structs/vtypes: ###
associative.IAssociative.implement(
for_type=obj.Struct,
implementations={
associative.select: lambda o, key: o.m(key),
associative.resolve: lambda o, key: getattr(o, key, None),
associative.getkeys: lambda o: o.members.iterkeys()})
indexable.IIndexable.implement(
for_type=obj.Struct,
implementations={
indexable.indices: lambda o: o.indices})
hashable.IHashable.implement(
for_type=obj.BaseObject,
implementations={
hashable.hashed: hash})
| gpl-2.0 | -8,311,667,224,154,146,000 | 29.225434 | 77 | 0.673551 | false |
pycket/pycket | pycket/ast_vs_sexp.py | 1 | 35364 | from pycket import interpreter as interp
from pycket import values, values_string, vector, util, values_regex
from pycket.prims.correlated import W_Correlated
from pycket.error import SchemeException
from pycket.hash import simple, equal, base
from pycket.assign_convert import assign_convert
from pycket.util import PerfRegion
mksym = values.W_Symbol.make
def to_rpython_list(r_list, unwrap_correlated=False, reverse=False, improper=False):
# assumes r_list is proper
length = 0
acc = r_list
while(acc is not values.w_null):
length += 1
if improper:
length += 1
break
acc = acc.cdr()
acc = r_list
py_ls = [None]*length
out = False
for n in range(length):
a = None
if improper and not isinstance(acc, values.W_List):
a = acc.get_obj() if (unwrap_correlated and isinstance(acc, W_Correlated)) else acc
out = True
else:
a = acc.car().get_obj() if (unwrap_correlated and isinstance(acc.car(), W_Correlated)) else acc.car()
if reverse:
py_ls[length-n-1] = a
else:
py_ls[n] = a
if out:
break
acc = acc.cdr()
return py_ls, length
dir_sym = mksym(":D:")
bundle_sym = mksym(":B:")
linklet_sym = mksym("linklet")
import_sym = mksym("Import")
export_sym = mksym("Export")
def ast_to_sexp(form):
from pycket.prims.linklet import W_Linklet, W_LinkletBundle, W_LinkletDirectory
#util.console_log("ast->sexp is called with form : %s" % form.tostring(), 8)
if is_val_type(form, extra=[vector.W_Vector, base.W_HashTable, values.W_List, values.W_Symbol]):
return form
elif isinstance(form, W_Linklet):
name = form.name # W_Symbol
importss = form.importss # [[Import ...] ...]
exports = form.exports # {int_id:Export ...}
body_forms = form.forms # rlist of ASTs
# The AST contains gensymed references to all the variables
# the linklet closes on, so we need to serialize all the
# information we have in Import and Export objects to recreate
# the same gensymed bindings at the instantiation of the
# deserialized linklet.
# So it will look like this when serialized:
#
# (linklet name (((Import grp gen_id int_id ext_id) ...) ...) ((Export int_id gen_int_id ext_id) ...) body)
importss_rlist = [None]*len(importss)
for index, imp_group in enumerate(importss):
len_group = len(imp_group)
importss_inst = [None]*len_group
for i, imp_obj in enumerate(imp_group):
importss_inst[i] = values.to_list([import_sym, imp_obj.group, imp_obj.id, imp_obj.int_id, imp_obj.ext_id])
importss_rlist[index] = values.to_list(importss_inst)
importss_list = values.to_list(importss_rlist)
exports_rlist = [None]*len(exports)
i = 0
for k, exp_obj in exports.iteritems():
exports_rlist[i] = values.to_list([export_sym, k, exp_obj.int_id, exp_obj.ext_id])
i += 1
exports_list = values.to_list(exports_rlist)
body_forms_rlist = [None]*len(body_forms)
for index, ast_form in enumerate(body_forms):
body_forms_rlist[index] = ast_form.to_sexp()
linklet_rlist = [linklet_sym, name, importss_list, exports_list] + body_forms_rlist
linklet_s_exp = values.to_list(linklet_rlist)
return linklet_s_exp
elif isinstance(form, W_LinkletBundle) or isinstance(form, W_LinkletDirectory):
bd_sym = None
if isinstance(form, W_LinkletBundle):
bd_sym = mksym(":B:")
else:
bd_sym = mksym(":D:")
mapping = form.get_mapping()
l = mapping.length()
keys = [None]*l
vals = [None]*l
if isinstance(mapping, equal.W_EqualHashTable):
i = 0
for k, v in mapping.hash_items():
keys[i] = k
vals[i] = ast_to_sexp(v)
i += 1
return values.W_Cons.make(bd_sym, equal.W_EqualHashTable(keys, vals, immutable=True))
elif isinstance(mapping, simple.W_EqImmutableHashTable):
i = 0
for k, v in mapping.iteritems():
keys[i] = k
vals[i] = ast_to_sexp(v)
i += 1
return values.W_Cons.make(bd_sym, simple.make_simple_immutable_table(simple.W_EqImmutableHashTable, keys, vals))
else:
raise SchemeException("Something wrong with the bundle/directory mapping : %s" % mapping.tostring())
else:
return form.to_sexp()
def def_vals_to_ast(def_vals_sexp, exports, all_toplevels, linkl_imports, mutated_ids):
ls, ln = to_rpython_list(def_vals_sexp)
if not ln == 3:
raise SchemeException("defs_vals_to_ast : unhandled define-values form : %s" % def_vals_sexp.tostring())
names = ls[1] # def_vals_sexp.cdr().car()
names_ls, names_ln = to_rpython_list(names, unwrap_correlated=True)
the_name = names_ls[0].variable_name() if names_ln > 0 else ""
body = sexp_to_ast(ls[2], [], exports, all_toplevels, linkl_imports, mutated_ids, cell_ref=[], name=the_name)
return interp.DefineValues(names_ls, body, names_ls)
def lam_to_ast(lam_sexp, lex_env, exports, all_toplevels, linkl_imports, mutated_ids, cell_ref, name=""):
from pycket.expand import SourceInfo
lam_sexp_elements, l = to_rpython_list(lam_sexp)
if not (l == 3 or l == 2):
raise SchemeException("lam_to_ast : unhandled lambda form : %s" % lam_sexp.tostring())
if lam_sexp.car() is mksym("lambda"):
lam_sexp = lam_sexp.cdr()
formals_ = lam_sexp.car()
rest = None
formals_ls = []
formals_len = 0
if isinstance(formals_, values.W_Symbol):
# check for a "rest"
rest = formals_
lex_env.append(rest)
else:
# two passes over the formals
# 1) determine the rest arg and the number of formal args
while (formals_ is not values.w_null):
if isinstance(formals_, values.W_Symbol):
rest = formals_
lex_env.append(formals_)
break
elif formals_.car() is mksym("."):
# another check for a "rest"
if formals_.cdr() is values.w_null:
raise SchemeException("lam_to_ast : invalid lambda form : %s" % lam_sexp.tostring())
rest = formals_.cdr().car()
lex_env.append(rest)
break
formals_len += 1
formals_ = formals_.cdr()
# 2) make the r_list for formals
formals_ls = [None]*formals_len
formals_ = lam_sexp.car() # reset
index = 0
while isinstance(formals_, values.W_Cons) and formals_.car() is not mksym("."):
formals_ls[index] = formals_.car()
index += 1
formals_ = formals_.cdr()
body = sexp_to_ast(lam_sexp.cdr().car(), formals_ls + lex_env, exports, all_toplevels, linkl_imports, mutated_ids, cell_ref=[], name=name)
dummy = 1
return interp.make_lambda(formals_ls, rest, [body], SourceInfo(dummy, dummy, dummy, dummy, name))
def let_like_to_ast(let_sexp, lex_env, exports, all_toplevels, linkl_imports, mutated_ids, is_letrec, cell_ref):
let_ls, let_len = to_rpython_list(let_sexp)
# just a sanity check
if not (let_ls[0] is mksym("let-values") or (let_ls[0] is mksym("letrec-values") and is_letrec)):
raise SchemeException("let_to_ast : unhandled let form : %s" % let_sexp.tostring())
varss_rhss, varss_len = to_rpython_list(let_ls[1])
if is_letrec:
# populate lex_env
for rhs in varss_rhss: # rhs : ((id ...) rhs-expr)
ids, ids_len = to_rpython_list(rhs.car(), unwrap_correlated=True) # (id ...)
lex_env += ids
varss_list = [None] * varss_len
rhss_list = [None] * varss_len
num_ids = 0
i = 0
for w_vars_rhss in varss_rhss:
varr, varr_len = to_rpython_list(w_vars_rhss.car(), unwrap_correlated=True)
varss_list[i] = varr
rhsr = sexp_to_ast(w_vars_rhss.cdr().car(), lex_env, exports, all_toplevels, linkl_imports, mutated_ids, cell_ref=[])
rhss_list[i] = rhsr
i += 1
num_ids += varr_len
ids = [None] * num_ids
index = 0
for vars_ in varss_list:
for var_ in vars_:
ids[index] = var_ # W_Symbol
index += 1
let_body_ls = let_ls[2:]
body_ls = [None]*(let_len-2)
for index, b in enumerate(let_body_ls):
body_ls[index] = sexp_to_ast(b, ids + lex_env, exports, all_toplevels, linkl_imports, mutated_ids, cell_ref=[])
if varss_len == 0:
return interp.Begin.make(body_ls)
if is_letrec:
return interp.make_letrec(varss_list, rhss_list, body_ls)
else:
return interp.make_let(varss_list, rhss_list, body_ls)
def is_val_type(form, extra=[]):
val_types = [values.W_Number,
values.W_Void,
values.W_Bool,
values_string.W_String,
values.W_ImmutableBytes,
values.W_Character] + extra
for t in val_types:
if isinstance(form, t):
return True
return False
def is_imported(id_sym, linkl_importss):
for imp_index, imports_group in enumerate(linkl_importss):
for imp in imports_group:
if id_sym is imp.int_id:
return imp.id
return None
begin_sym = mksym("begin")
begin0_sym = mksym("begin0")
def_val_sym = mksym("define-values")
wcm_sym = mksym("with-continuation-mark")
variable_ref_sym = mksym("#%variable-reference")
caselam_sym = mksym("case-lambda")
lam_sym = mksym("lambda")
let_sym = mksym("let-values")
letrec_sym = mksym("letrec-values")
set_bang_sym = mksym("set!")
quote_sym = mksym("quote")
if_sym = mksym("if")
var_ref_sym = mksym("variable-ref")
var_ref_no_check_sym = mksym("variable-ref/no-check")
var_set_check_undef_sym = mksym("variable-set!/check-undefined")
var_set_sym = mksym("variable-set!")
var_prim_syms = [var_ref_sym, var_ref_no_check_sym, var_set_check_undef_sym, var_set_sym]
var_ref_mod_var = interp.ModuleVar(var_ref_sym, "#%kernel", var_ref_sym, None)
var_ref_no_check_mod_var = interp.ModuleVar(var_ref_no_check_sym, "#%kernel", var_ref_no_check_sym, None)
var_set_check_undef_mod_var = interp.ModuleVar(var_set_check_undef_sym, "#%kernel", var_set_check_undef_sym, None)
var_set_mod_var = interp.ModuleVar(var_set_sym, "#%kernel", var_set_sym, None)
known_mod_vars = {} # cache for kernel primitive ModuleVars
def sexp_to_ast(form, lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref=[], name=""):
#util.console_log("sexp->ast is called with form : %s" % form.tostring(), 8)
if isinstance(form, W_Correlated):
return sexp_to_ast(form.get_obj(), lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref, name)
elif is_val_type(form):
return interp.Quote(form)
elif isinstance(form, values.W_Symbol):
if form in cell_ref:
return interp.CellRef(form)
if form in lex_env:
return interp.LexicalVar(form)
if form in exports and (form in mutated_ids or form not in all_toplevels):
# dynamically find the W_LinkletVar for the exported variable
# possible point of optimization
rands = [interp.LinkletVar(exports[form].int_id)]
return interp.App.make(var_ref_mod_var, rands)
if form in all_toplevels:
return interp.ToplevelVar(form, is_free=False)
import_var_int_id = is_imported(form, linkl_importss)
if import_var_int_id: # this is gensymed internal variable name
# dynamically find the W_LinkletVar for the imported variable
# possible point of optimization
rands = [interp.LinkletVar(import_var_int_id)]
return interp.App.make(var_ref_no_check_mod_var, rands)
# kernel primitive ModuleVar
if form in known_mod_vars:
return known_mod_vars[form]
m_var = interp.ModuleVar(form, "#%kernel", form, None)
known_mod_vars[form] = m_var
return m_var
elif isinstance(form, values.W_List):
c = form.car()
### these are for the desearialization of the linklet body
if c in var_prim_syms:
linklet_var_sym = form.cdr().car()
rator, rands = None, None
if c is var_set_sym or c is var_set_check_undef_sym:
rator = var_set_mod_var if c is var_set_sym else var_set_check_undef_mod_var
linklet_var = interp.LinkletVar(linklet_var_sym)
new_val = sexp_to_ast(form.cdr().cdr().car(), lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref, name)
mode = interp.Quote(values.w_false) # FIXME: possible optimization
rands = [linklet_var, new_val, mode]
return interp.App.make(rator, rands)
if c is var_ref_sym or c is var_ref_no_check_sym:
rator = var_ref_mod_var if c is var_ref_sym else var_ref_no_check_mod_var
rands = [interp.LinkletVar(linklet_var_sym)]
return interp.App.make(rator, rands)
###
if c is begin_sym:
begin_exprs, ln = to_rpython_list(form.cdr())
return interp.Begin.make([sexp_to_ast(f, lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref, name) for f in begin_exprs])
elif c is begin0_sym:
fst = sexp_to_ast(form.cdr().car(), lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref, name)
rst_exprs, rest_len = to_rpython_list(form.cdr().cdr())
rst = [sexp_to_ast(f, lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref, name) for f in rst_exprs]
if rest_len == 0:
return fst
else:
return interp.Begin0.make(fst, rst)
elif c is def_val_sym:
return def_vals_to_ast(form, exports, all_toplevels, linkl_importss, mutated_ids)
elif c is wcm_sym:
from pycket.prims.general import elidable_length
if elidable_length(form) != 4:
raise SchemeException("Unrecognized with-continuation-mark form : %s" % form.tostring())
key = sexp_to_ast(form.cdr().car(), lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref, name)
val = sexp_to_ast(form.cdr().cdr().car(), lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref, name)
body = sexp_to_ast(form.cdr().cdr().cdr().car(), lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref, name)
return interp.WithContinuationMark(key, val, body)
elif c is variable_ref_sym:
if form.cdr() is values.w_null: # (variable-reference)
return interp.VariableReference(None, None)
elif form.cdr().cdr() is values.w_null: # (variable-reference id)
if isinstance(form.cdr().car(), values.W_Symbol):
var = sexp_to_ast(form.cdr().car(), lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref, name)
return interp.VariableReference(var, "dummy-path.rkt") # FIXME
elif isinstance(form.cdr().car(), values.W_Fixnum):
# because we're 'writing' variable-reference with is_mutable information
is_mut = False
if form.cdr().car().toint() != 0:
is_mut = True
return interp.VariableReference(None, None, is_mut)
else:
raise SchemeException("Invalid variable-reference form : %s -- arg type : %s" % (form.tostring(), form.cdr().car()))
elif form.cdr().cdr().cdr() is values.w_null: # (variable-reference 1 2)
raise SchemeException("Unhandled variable-reference form : %s" % (form.tostring()))
else:
# This is to handle varrefs serialized by Pycket
# no Racket varref has more than 1 argument
var_ = form.cdr().car()
path_ = form.cdr().cdr().car()
mut_ = form.cdr().cdr().cdr().car()
var = None
path = None
mut = False
if var_ is not values.w_false:
var = sexp_to_ast(var_, lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref, name)
if isinstance(path_, values.W_Object) and path_ is not values.w_false:
path = path_.tostring()
elif isinstance(path_, str):
path = path_
if mut_ is values.w_true:
mut = True
return interp.VariableReference(var, path, mut)
elif c is caselam_sym:
maybe_rec_sym_part = values.w_null
if form.cdr() is not values.w_null:
maybe_rec_sym_part = form.cdr().car() # (recursive-sym <sym>)
rec_sym = None
new_lex_env = lex_env
lams_part = form.cdr()
if isinstance(maybe_rec_sym_part, values.W_Cons) and maybe_rec_sym_part is not values.w_null:
if maybe_rec_sym_part.car() is mksym("recursive-sym"):
# then we're reading a caselam that we wrote
lams_part = form.cdr().cdr()
if maybe_rec_sym_part.cdr() is not values.w_null:
rec_sym = maybe_rec_sym_part.cdr().car()
new_lex_env = lex_env + [rec_sym]
lams_expr, ln = to_rpython_list(lams_part)
lams = [lam_to_ast(f, new_lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref, name) for f in lams_expr]
return interp.CaseLambda(lams, rec_sym)
elif c is lam_sym:
return interp.CaseLambda([lam_to_ast(form, lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref, name)])
elif c is let_sym:
return let_like_to_ast(form, lex_env, exports, all_toplevels, linkl_importss, mutated_ids, False, cell_ref)
elif c is letrec_sym:
return let_like_to_ast(form, lex_env, exports, all_toplevels, linkl_importss, mutated_ids, True, cell_ref)
elif c is set_bang_sym:
import_id = is_imported(form.cdr().car(), linkl_importss)
if import_id:
raise SchemeException("cannot mutate imported variable : %s" % form.tostring())
cr = cell_ref
target = form.cdr().car()
rhs = sexp_to_ast(form.cdr().cdr().car(), lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref, name)
# if it's for an exported variable, don't emit a set!
# we're going to variable-set! the exported variable
if target in exports:
rator = var_set_check_undef_mod_var
mode = interp.Quote(values.w_false) # FIXME: possible optimization
rands = [interp.LinkletVar(exports[target].int_id), rhs, mode]
return interp.App.make(rator, rands)
if target in lex_env:
cr = [target] if not cr else [target] + cr
var = sexp_to_ast(form.cdr().car(), lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref=cr, name=name)
rhs = sexp_to_ast(form.cdr().cdr().car(), lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref, name)
assert isinstance(var, interp.Var)
return interp.SetBang(var, rhs)
elif c is quote_sym:
if form.cdr() is values.w_null or form.cdr().cdr() is not values.w_null:
raise SchemeException("malformed quote form : %s" % form.tostring())
return interp.Quote(form.cdr().car())
elif c is if_sym:
tst_w = form.cdr().car()
thn_w = form.cdr().cdr().car()
els_w = form.cdr().cdr().cdr().car()
tst = sexp_to_ast(tst_w, lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref, name)
thn = sexp_to_ast(thn_w, lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref, name)
els = sexp_to_ast(els_w, lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref, name)
return interp.If.make(tst, thn, els)
else:
form_rator = sexp_to_ast(c, lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref)
rands_ls, rands_len = to_rpython_list(form.cdr())
rands = [sexp_to_ast(r, lex_env, exports, all_toplevels, linkl_importss, mutated_ids, cell_ref, name) for r in rands_ls]
return interp.App.make(form_rator, rands)
else:
raise SchemeException("Don't know what to do with this form yet : %s" % form.tostring())
def looks_like_linklet(sexp):
# (linklet () () ...)
# we know the sexp is not w_null
if not isinstance(sexp, values.W_Cons):
return False
if sexp.car() is not linklet_sym:
return False
if not isinstance(sexp.cdr(), values.W_Cons):
return False
if not isinstance(sexp.cdr().cdr(), values.W_Cons):
return False
maybe_name = sexp.cdr().car()
named = isinstance(maybe_name, values.W_Symbol)
if named and not isinstance(sexp.cdr().cdr().cdr(), values.W_Cons):
return False
rest = sexp.cdr() if (not named) else sexp.cdr().cdr()
# check the imports/exports
_imports = rest.car()
_exports = rest.cdr().car()
# FIXME : also check the imports and exports' inner structures
if not isinstance(_imports, values.W_List) or not isinstance(_exports, values.W_List):
return False
return True
class Import(object):
def __init__(self, group, id, int_id, ext_id):
self.group = group
self.id = id
self.int_id = int_id
self.ext_id = ext_id
def get_imports_from_w_importss_sexp(w_importss):
from pycket.interpreter import Gensym
importss_acc, importss_len = to_rpython_list(w_importss)
importss_list = [None]*importss_len
for index, importss_current in enumerate(importss_acc):
importss_group_ls, group_len = to_rpython_list(importss_current)
inner_acc = [None]*group_len
for i, c in enumerate(importss_group_ls):
if isinstance(c, values.W_Symbol):
w_imp_sym = Gensym.gensym(c.tostring())
inner_acc[i] = Import(values.W_Fixnum(index), w_imp_sym, c, c)
elif isinstance(c, values.W_List):
if c.cdr().cdr() is not values.w_null:
raise SchemeException("Unhandled renamed import form : %s" % c.tostring())
external_id = c.car().get_obj() if isinstance(c.car(), W_Correlated) else c.car()
internal_id = c.cdr().car().get_obj() if isinstance(c.cdr().car(), W_Correlated) else c.cdr().car()
w_internal_id = Gensym.gensym(internal_id.tostring())
inner_acc[i] = Import(values.W_Fixnum(index), w_internal_id, internal_id, external_id)
elif isinstance(c, W_Correlated):
cc = c.get_obj()
w_cc = Gensym.gensym(cc.tostring())
inner_acc[i] = Import(values.W_Fixnum(index), w_cc, cc, cc)
else:
raise SchemeException("uncrecognized import : %s" % c.tostring())
importss_list[index] = inner_acc
return importss_list
class Export(object):
def __init__(self, int_gensym, ext_id):
self.int_id = int_gensym
self.ext_id = ext_id
def get_exports_from_w_exports_sexp(w_exports):
from pycket.interpreter import Gensym
r_exports, exports_len = to_rpython_list(w_exports)
exports = {}
for i, exp in enumerate(r_exports):
if isinstance(exp, values.W_WrappedConsProper):
car = exp.car()
internal_name = car.get_obj() if isinstance(car, W_Correlated) else car
cadr = exp.cdr().car()
external_name = cadr.get_obj() if isinstance(cadr, W_Correlated) else cadr
w_internal_name = Gensym.gensym(internal_name.tostring())
# don't gensym the external_id
exports[internal_name] = Export(w_internal_name, external_name)
else:
c_exp = exp.get_obj() if isinstance(exp, W_Correlated) else exp
w_c_exp = Gensym.gensym(c_exp.tostring())
exports[c_exp] = Export(w_c_exp, c_exp)
return exports
# collect the ids in define-values forms
def create_toplevel_linklet_vars(forms_ls, linklet):
linkl_toplevels = {} # {W_Symbol:LinkletVar}
for form in forms_ls:
if isinstance(form, W_Correlated):
form = form.get_obj()
if isinstance(form, values.W_List) and form.car() is mksym("define-values"):
ids = form.cdr().car()
ids_ls, ids_len = to_rpython_list(ids, unwrap_correlated=True)
# create LinkletVar for each id
for id in ids_ls:
if id in linkl_toplevels:
raise SchemeException("duplicate binding name : %s" % id.tostring())
linkl_toplevels[id] = interp.LinkletDefinedVar(id, defining_linklet=linklet)
return linkl_toplevels
# collect the ids in define-values forms
def get_toplevel_defined_ids(forms_ls):
linkl_toplevels = {} # {W_Symbol:None}
for form in forms_ls:
if isinstance(form, W_Correlated):
form = form.get_obj()
if isinstance(form, values.W_List) and form.car() is mksym("define-values"):
ids = form.cdr().car()
ids_ls, ids_len = to_rpython_list(ids, unwrap_correlated=True)
# create LinkletVar for each id
for id in ids_ls:
if id in linkl_toplevels:
raise SchemeException("duplicate binding name : %s" % id.tostring())
linkl_toplevels[id] = None
return linkl_toplevels
def extend_dict(a, b):
for k,v in b.iteritems():
a[k] = v
return a
def extend_dicts(list_of_dicts):
a = {}
for d in list_of_dicts:
a = extend_dict(a, d)
return a
def find_mutated(form):
if isinstance(form, W_Correlated):
return find_mutated(form.get_obj())
elif isinstance(form, values.W_Cons):
if not form.is_proper_list():
elements, _ = to_rpython_list(form, unwrap_correlated=True, improper=True)
return extend_dicts([find_mutated(f) for f in elements])
c = form.car()
if c is set_bang_sym:
return extend_dict({form.cdr().car():None}, find_mutated(form.cdr().cdr().car()))
elif isinstance(c, values.W_Cons) and c is not values.w_null:
all_exprs, _ = to_rpython_list(form, unwrap_correlated=True)
return extend_dicts([find_mutated(f) for f in all_exprs])
else:
rest_exprs, _ = to_rpython_list(form.cdr(), unwrap_correlated=True)
return extend_dicts([find_mutated(f) for f in rest_exprs])
else:
return {}
def process_w_body_sexp(w_body, importss_list, exports, from_zo=False):
body_forms_ls, body_length = to_rpython_list(w_body, unwrap_correlated=True)
cur_toplevels = {}
# make a recursive (!arbitrarily deep!) pass to find set!ed ids
mutated = find_mutated(w_body) # {W_Symbol:None}
# another pass to find toplevel defined ids
all_toplevels = get_toplevel_defined_ids(body_forms_ls)
variable_set_lines = 0
for d in all_toplevels:
if d in exports:
variable_set_lines += 1
# for each exported defined id, we need to add a variable-set! for
# the exported var with the defined id
total_forms_len = body_length + variable_set_lines
body_forms = [None]*(total_forms_len)
added = 0
current_index = 0
# this juggling is because we don't know how many extra ast forms
# we're going to add for the exported defined ids
for b in body_forms_ls:
b_form = sexp_to_ast(b, [], exports, all_toplevels, importss_list, mutated)
if not from_zo: # no need to normalize if it's alread normalized
with PerfRegion("compile-normalize"):
b_form = interp.Context.normalize_term(b_form)
with PerfRegion("compile-assign-convert"):
b_form = assign_convert(b_form)
body_forms[current_index+added] = b_form
current_index += 1
if isinstance(b_form, interp.DefineValues):
for n in b_form.names:
if n in exports:
rator = interp.ModuleVar(var_set_sym, "#%kernel", var_set_sym, None)
exp_var = interp.LinkletVar(exports[n].int_id)
top_var = interp.ToplevelVar(n, is_free=False)
mode = interp.Quote(values.w_false) # FIXME: possible optimization
rands = [exp_var, top_var, mode]
body_forms[current_index+added] = interp.App.make(rator,rands)
added += 1
return body_forms
def looks_like_an_import(sexp):
# should be (Import grp gen_id int_id ext_id)
if not isinstance(sexp, values.W_Cons):
return False
if sexp.car() is not import_sym:
return False
if not isinstance(sexp.cdr(), values.W_Cons):
return False
if not isinstance(sexp.cdr().cdr(), values.W_Cons):
return False
if not isinstance(sexp.cdr().cdr().cdr(), values.W_Cons):
return False
if not isinstance(sexp.cdr().cdr().cdr().cdr(), values.W_Cons):
return False
return True
# We can't use the same thing with what compile-linklet uses anymore,
# becuse what we serialize is now specific to Pycket (contains some
# extra info than a regular linklet s-expr that the expander would
# pass)
def deserialize_importss(w_importss):
importss_acc, importss_len = to_rpython_list(w_importss)
importss_list = [None]*importss_len
for index, importss_current in enumerate(importss_acc):
importss_group_ls, group_len = to_rpython_list(importss_current)
inner_acc = [None]*group_len
for i, c in enumerate(importss_group_ls):
if looks_like_an_import(c):
w_grp_index = c.cdr().car()
id = c.cdr().cdr().car()
int_id = c.cdr().cdr().cdr().car()
ext_id = c.cdr().cdr().cdr().cdr().car()
inner_acc[i] = Import(w_grp_index, id, int_id, ext_id)
else:
raise SchemeException("looks like an invalid serialization of import : %s" % c.tostring())
importss_list[index] = inner_acc
return importss_list
def looks_like_an_export(sexp):
# should be (Import grp gen_id int_id ext_id)
if not isinstance(sexp, values.W_Cons):
return False
if sexp.car() is not export_sym:
return False
if not isinstance(sexp.cdr(), values.W_Cons):
return False
if not isinstance(sexp.cdr().cdr(), values.W_Cons):
return False
if not isinstance(sexp.cdr().cdr().cdr(), values.W_Cons):
return False
return True
# See the comment for deserialize_importss
def deserialize_exports(w_exports):
r_exports, exports_len = to_rpython_list(w_exports)
exports = {}
for i, exp in enumerate(r_exports):
if looks_like_an_export(exp):
k = exp.cdr().car()
gen_int_id = exp.cdr().cdr().car()
ext_id = exp.cdr().cdr().cdr().car()
exports[k] = Export(gen_int_id, ext_id)
else:
raise SchemeException("looks like an invalid serialization of export : %s" % exp.tostring())
return exports
def deserialize_loop(sexp):
from pycket.prims.linklet import W_Linklet, W_LinkletBundle, W_LinkletDirectory
from pycket.env import w_global_config
if isinstance(sexp, values.W_Cons):
c = sexp.car()
if c is dir_sym:
dir_map = sexp.cdr()
return W_LinkletDirectory(deserialize_loop(dir_map))
elif c is bundle_sym:
bundle_map = sexp.cdr()
return W_LinkletBundle(deserialize_loop(bundle_map))
elif looks_like_linklet(sexp):
# Unify this with compile_linklet
if isinstance(sexp.cdr().car(), values.W_List):
w_name = mksym("anonymous")
w_importss = sexp.cdr().car()
w_exports = sexp.cdr().cdr().car()
w_body = sexp.cdr().cdr().cdr()
else:
w_name = sexp.cdr().car()
w_importss = sexp.cdr().cdr().car()
w_exports = sexp.cdr().cdr().cdr().car()
w_body = sexp.cdr().cdr().cdr().cdr()
importss_list = deserialize_importss(w_importss)
# Process the exports
exports = deserialize_exports(w_exports)
# Process the body
with PerfRegion("compile-sexp-to-ast"):
body_forms = process_w_body_sexp(w_body, importss_list, exports, from_zo=True)
return W_Linklet(w_name, importss_list, exports, body_forms)
else:
# get the length
ls = sexp
length = 0
is_improper = False
while ls is not values.w_null:
if isinstance(ls, values.W_Cons):
length += 1
ls = ls.cdr()
else:
is_improper = True
ls = values.w_null
# allocate an r_list (to avoid reversing w_list)
if is_improper:
sexp_ls = [None]*(length+1)
else:
sexp_ls = [None]*length
# second pass, get the elements
ls = sexp
for i in range(length-1, -1, -1):
sexp_ls[i] = ls.car()
ls = ls.cdr()
if is_improper:
sexp_ls[length] = ls
# make the new list
new = values.w_null
for s in sexp_ls:
new = values.W_Cons.make(deserialize_loop(s), new)
return new
elif isinstance(sexp, simple.W_EqImmutableHashTable):
l = sexp.length()
keys = [None]*l
vals = [None]*l
i = 0
for k, v in sexp.iteritems():
keys[i] = k
vals[i] = deserialize_loop(v)
i += 1
return simple.make_simple_immutable_table(simple.W_EqImmutableHashTable, keys, vals)
elif isinstance(sexp, equal.W_EqualHashTable):
l = sexp.length()
keys = [None]*l
vals = [None]*l
i = 0
for k, v in sexp.hash_items():
keys[i] = k
vals[i] = deserialize_loop(v)
i += 1
return equal.W_EqualHashTable(keys, vals, immutable=True)
elif isinstance(sexp, vector.W_Vector):
new = [None]*sexp.length()
items = sexp.get_strategy().ref_all(sexp)
for index, obj in enumerate(items):
new[index] = deserialize_loop(obj)
return vector.W_Vector.fromelements(new, sexp.immutable())
else:
return sexp
| mit | 2,687,355,948,987,064,000 | 41.865455 | 153 | 0.585002 | false |
0x08e/SENginx | 3rd-party/naxsi/nx_util/nx_lib/nx_imports.py | 3 | 20172 | import urlparse
import string
import itertools
import datetime
import time
import pprint
import gzip
import bz2
import glob
import logging
import sys
from select import select
import re
class NxImportFilter():
""" Used to handle user supplied input filters on data acquisition """
def __init__(self, filters):
self.gi = None
self.res_op = []
self.kw = {
"ip" : {"methods" : "=,!=,=~"},
"date" : {"methods" : "=,!=,=~,>,<,>=,<=",
"match_method" : self.date_cmp},
"server" : {"methods" : "=,!=,=~"},
"uri" : {"methods" : "=,!=,=~"},
"zone" : {"methods" : "=,!="},
"id" : {"methods" : "=,!=,>,<,>=,<=",
"match_method" : self.int_cmp},
"var_name" : {"methods" : "=,!=,=~"},
"content" : {"methods" : "=,!=,=~"},
"country" : {"methods" : "=,!="}
}
try:
import GeoIP
self.gi = GeoIP.new(GeoIP.GEOIP_MEMORY_CACHE)
except:
logging.warning("""Python's GeoIP module is not present.
'World Map' reports won't work,
and you can't use per-country filters.""")
# returns an integer less than, equal to or greater than zero
# if date1 is < date2, date1 == date2 or date1 > date2
def date_cmp(self, date1, date2):
d1s = time.strptime(date1, "%Y-%m-%d %H:%M:%S")
d2s = time.strptime(date2, "%Y-%m-%d %H:%M:%S")
if date1 > date2:
return 1
if date1 == date2:
return 0
if date1 < date2:
return -1
def int_cmp(self, date1, date2):
int1 = int(date1)
int2 = int(date2)
if int1 > int2:
return 1
if int1 == int2:
return 0
if int1 < int2:
return -1
def word(self, w, res):
if w not in self.kw.keys():
return -1
res.append(w)
return 1
def check(self, w, res):
if w not in self.kw[res[-1]]["methods"].split(","):
logging.critical("operator "+w+" not allowed for var "+res[-1])
return -1
res.append(w)
return 2
def checkval(self, w, res):
# for checks on date, we allow a specialy trick :
# lastweek => now() - 7days
# lastmonth = now() - 1 month
if res[-2] == "date":
if w == "lastweek":
mnow = time.gmtime(time.time() - (60*60*24*7))
w = time.strftime("%Y-%m-%d %H:%M:%S", mnow)
if w == "lastmonth":
mnow = time.gmtime(time.time() - (60*60*24*30))
w = time.strftime("%Y-%m-%d %H:%M:%S", mnow)
if w == "lastday":
mnow = time.gmtime(time.time() - (60*60*24))
w = time.strftime("%Y-%m-%d %H:%M:%S", mnow)
if w == "lasthour":
mnow = time.gmtime(time.time() - (60*60))
w = time.strftime("%Y-%m-%d %H:%M:%S", mnow)
res.append(w)
return 3
def synt(self, w, res):
if w != "or" and w != "and":
return -1
res.append(w)
return 0
def filter_build(self, instr):
words = instr.split(' ')
res_op = []
# -1 : err, 0 : var, 1 : check, 2 : syntax (and/or), 3 : value, 4 : in quoted string
state = 0
tmp_word = ""
for w in words:
# wut, quoted string ?
# and YES, not \ handling, booh
if w.startswith("'") or w.startswith('"'):
tmp_word = w[1:]
state = 4
continue
if state == 0:
state = self.word(w, res_op)
elif state == 1:
state = self.check(w, res_op)
elif state == 2:
state = self.checkval(w, res_op)
elif state == 3:
state = self.synt(w, res_op)
elif state == 4:
if w.endswith("'") or w.endswith('"'):
tmp_word = tmp_word + " " + w[:-1]
state = self.checkval(tmp_word, res_op)
else:
tmp_word = tmp_word + " " +w
if state == -1:
logging.critical("Unable to build filter, check you syntax at '"+w+"'")
return False
self.res_op = res_op
return True
def subfil(self, src, sub):
if sub[0] not in src:
logging.critical("Unable to filter : key "+sub[0]+" does not exist in dict")
return False
srcval = src[sub[0]]
filval = sub[2]
if sub[1] == "=" and srcval == filval:
return True
elif sub[1] == "!=" and srcval != filval:
return True
elif sub[1] == "=~" and re.match(filval, srcval):
return True
elif sub[1].startswith(">") or sub[1].startswith("<"):
if sub[0] not in self.kw or "match_method" not in self.kw[sub[0]]:
logging.critical("Unable to apply operator </>/<=/>= without method")
logging.critical(pprint.pformat(self.kw[sub[0]]))
return False
# if date1 is < date2, date1 == date2 or date1 > date2
if sub[1] == ">" or sub[1] == ">=":
if self.kw[sub[0]]["match_method"](srcval, filval) == 1:
#print srcval+">="+filval
return True
if sub[1] == "<" or sub[1] == "<=":
if self.kw[sub[0]]["match_method"](srcval, filval) == -1:
#print srcval+"<="+filval
return True
if sub[1] == ">=" or sub[1] == "<=":
if self.kw[sub[0]]["match_method"](srcval, filval) == 0:
return True
return False
return False
def dofilter(self, src):
filters = self.res_op
if self.gi is not None:
src['country'] = self.gi.country_code_by_addr(src['ip'])
else:
logging.debug("Unable to GeoIP lookup ip "+src['ip'])
src['country'] = "??"
last = False
ok_fail = False
while last is False:
sub = filters[0:3]
filters = filters[3:]
if len(filters) == 0:
last = True
result = self.subfil(src, sub)
# Final check
if last is True:
# if last keyword was or, we can have a fail on last test
# and still return true.
if ok_fail is True:
return True
return result
# if this test succeed with a OR, we can fail next.
if result is True and filters[0] == "or":
return True
if result is False and filters[0] == "or":
ok_fail = True
filters = filters[1:]
continue
if result is False and filters[0] == "and":
return False
# remove and/or
filters = filters[1:]
return True
class NxReader():
""" Feeds the given injector from logfiles """
def __init__(self, injector, stdin=False, lglob=[], step=50000,
stdin_timeout=5, date_filters=[["", ""]]):
self.injector = injector
self.step = step
self.files = []
self.date_filters = date_filters
self.timeout = stdin_timeout
self.stdin = False
if stdin is not False:
logging.warning("Using stdin")
self.stdin = True
return
if len(lglob) > 0:
for regex in lglob:
self.files.extend(glob.glob(regex))
logging.warning("List of files :"+str(self.files))
def read_stdin(self):
rlist, _, _ = select([sys.stdin], [], [], self.timeout)
success = discard = not_nx = malformed = 0
if rlist:
s = sys.stdin.readline()
if s == '':
return False
self.injector.acquire_nxline(s)
return True
else:
return False
def read_files(self):
if self.stdin is True:
ret = ""
while self.read_stdin() is True:
pass
self.injector.commit()
logging.info("Committing to db ...")
self.injector.wrapper.StopInsert()
return 0
count = 0
total = 0
for lfile in self.files:
success = not_nx = discard = malformed = fragmented = reunited = 0
logging.info("Importing file "+lfile)
try:
if lfile.endswith(".gz"):
fd = gzip.open(lfile, "rb")
elif lfile.endswith(".bz2"):
fd = bz2.BZ2File(lfile, "r")
else:
fd = open(lfile, "r")
except:
logging.critical("Unable to open file : "+lfile)
return 1
for line in fd:
ret = self.injector.acquire_nxline(line)
success += ret[0]
discard += ret[1]
malformed += ret[2]
fragmented = ret[3]
reunited = ret[4]
count += ret[0]
if count >= self.step:
self.injector.commit()
count = 0
fd.close()
logging.info("Successful events :"+str(success))
logging.info("Filtered out events :"+str(discard))
logging.info("Non-naxsi lines :"+str(not_nx))
logging.info("Malformed lines :"+str(malformed))
logging.info("Incomplete lines :"+str(fragmented))
logging.info("Reunited lines :"+str(reunited))
total += success
if count > 0:
self.injector.commit()
logging.info("End of db commit... ")
self.injector.wrapper.StopInsert()
logging.info("Count (lines) success:"+str(total))
return 0
class NxInject():
""" Transforms naxsi error log into dicts """
# din_fmt and fil_fmt are format of dates from logs and from user-supplied filters
def __init__(self, wrapper, filters=""):
self.naxsi_keywords = [" NAXSI_FMT: ", " NAXSI_EXLOG: "]
self.wrapper = wrapper
self.dict_buf = []
self.total_objs = 0
self.total_commits = 0
self.filters = filters
self.filt_engine = None
self.multiline_buf = {}
self.fragmented_lines = 0
self.reunited_lines = 0
if self.filters is not None:
self.filt_engine = NxImportFilter(self.filters)
if self.filt_engine.filter_build(self.filters) is False:
logging.critical("Unable to create filter, abort.")
sys.exit(-1)
def demult_event(self, event):
demult = []
import copy
if event.get('seed_start') and event.get('seed_end') is None:
#First line of a multiline naxsi fmt
self.multiline_buf[event['seed_start']] = event
self.fragmented_lines += 1
return demult
elif event.get('seed_start') and event.get('seed_end'):
# naxsi fmt is very long, at least 3 lines
# print 'middle part of a multiline', event['seed_start'], event['seed_end']
self.fragmented_lines += 1
if self.multiline_buf.get(event['seed_end']) is None:
logging.critical('Got a line with seed_end {0} and seed_start {1}, but i cant find a matching seed_start...\nLine will probably be incomplete'.format(event['seed_end'], event['seed_start']))
return demult
self.multiline_buf[event['seed_end']].update(event)
self.multiline_buf[event['seed_start']] = self.multiline_buf[event['seed_end']]
del self.multiline_buf[event['seed_end']]
return demult
elif event.get('seed_start') is None and event.get('seed_end'):
# last line of the naxsi_fmt, just update the dict, and parse it like a normal line
if self.multiline_buf.get(event['seed_end']) is None:
logging.critical('Got a line with seed_end {0}, but i cant find a matching seed_start...\nLine will probably be incomplete'.format(event['seed_end']))
return demult
self.fragmented_lines += 1
self.reunited_lines += 1
self.multiline_buf[event['seed_end']].update(event)
event = self.multiline_buf[event['seed_end']]
del self.multiline_buf[event['seed_end']]
entry = {}
if not event.has_key('uri'):
entry['uri'] = ''
else:
entry['uri'] = event['uri']
if not event.has_key('server'):
entry['server'] = ''
else:
entry['server'] = event['server']
if not event.has_key('content'):
entry['content'] = ''
else:
entry['content'] = event['content']
if not event.has_key('ip'):
entry['ip'] = ''
else:
entry['ip'] = event['ip']
if not event.has_key('date'):
entry['date'] = ''
else:
entry['date'] = event['date']
entry['var_name'] = ''
clean = entry
# NAXSI_EXLOG lines only have one triple (zone,id,var_name), but has non-empty content
if 'zone' in event.keys():
if 'var_name' in event.keys():
entry['var_name'] = event['var_name']
entry['zone'] = event['zone']
entry['id'] = event['id']
demult.append(entry)
return demult
# NAXSI_FMT can have many (zone,id,var_name), but does not have content
# we iterate over triples.
elif 'zone0' in event.keys():
commit = True
for i in itertools.count():
entry = copy.deepcopy(clean)
zn = ''
vn = ''
rn = ''
if 'var_name' + str(i) in event.keys():
entry['var_name'] = event['var_name' + str(i)]
if 'zone' + str(i) in event.keys():
entry['zone'] = event['zone' + str(i)]
else:
commit = False
break
if 'id' + str(i) in event.keys():
entry['id'] = event['id' + str(i)]
else:
commit = False
break
if commit is True:
demult.append(entry)
else:
logging.warning("Malformed/incomplete event [missing subfield]")
logging.info(pprint.pformat(event))
return demult
return demult
else:
logging.warning("Malformed/incomplete event [no zone]")
logging.info(pprint.pformat(event))
return demult
def commit(self):
"""Process dicts of dict (yes) and push them to DB """
self.total_objs += len(self.dict_buf)
count = 0
for entry in self.dict_buf:
url_id = self.wrapper.insert(url = entry['uri'], table='urls')()
count += 1
exception_id = self.wrapper.insert(zone = entry['zone'], var_name = entry['var_name'], rule_id = entry['id'], content = entry['content']
, table = 'exceptions')()
self.wrapper.insert(peer_ip=entry['ip'], host = entry['server'], url_id=str(url_id), id_exception=str(exception_id),
date=str(entry['date']), table = 'connections')()
self.total_commits += count
# Real clearing of dict.
del self.dict_buf[0:len(self.dict_buf)]
def exception_to_dict(self, line):
"""Parses a naxsi exception to a dict,
1 on error, 0 on success"""
odict = urlparse.parse_qs(line)
for x in odict.keys():
odict[x][0] = odict[x][0].replace('\n', "\\n")
odict[x][0] = odict[x][0].replace('\r', "\\r")
odict[x] = odict[x][0]
# check for incomplete/truncated lines
if 'zone0' in odict.keys():
for i in itertools.count():
is_z = is_id = False
if 'zone' + str(i) in odict.keys():
is_z = True
if 'id' + str(i) in odict.keys():
is_id = True
if is_z is True and is_id is True:
continue
if is_z is False and is_id is False:
break
# if is_z is True:
try:
del (odict['zone' + str(i)])
#if is_id is True:
del (odict['id' + str(i)])
del (odict['var_name' + str(i)])
except:
pass
break
return odict
def date_unify(self, date):
idx = 0
res = ""
ref_format = "%Y-%m-%d %H:%M:%S"
supported_formats = [
"%b %d %H:%M:%S",
"%b %d %H:%M:%S",
"%Y/%m/%d %H:%M:%S",
"%Y-%m-%d %H:%M:%S",
"%Y-%m-%dT%H:%M:%S"
# "%Y-%m-%dT%H:%M:%S+%:z"
]
while date[idx] == " " or date[idx] == "\t":
idx += 1
success = 0
for date_format in supported_formats:
nb_sp = date_format.count(" ")
clean_date = string.join(date.split(" ")[:nb_sp+1], " ")
# strptime does not support numeric time zone, hack.
idx = clean_date.find("+")
if idx != -1:
clean_date = clean_date[:idx]
try:
x = time.strptime(clean_date, date_format)
z = time.strftime(ref_format, x)
success = 1
break
except:
#print "'"+clean_date+"' not in format '"+date_format+"'"
pass
if success == 0:
logging.critical("Unable to parse date format :'"+date+"'")
sys.exit(-1)
return z
# returns an array of [success, discarded, bad_line] events counters
def acquire_nxline(self, line, date_format='%Y/%m/%d %H:%M:%S',
sod_marker=[' [error] ', ' [debug] '], eod_marker=[', client: ', '']):
success = 0
discard = 0
bad_line = 0
line = line.rstrip('\n')
for mark in sod_marker:
date_end = line.find(mark)
if date_end != -1:
break
for mark in eod_marker:
if mark == '':
data_end = len(line)
break
data_end = line.find(mark)
if data_end != -1:
break
if date_end == -1 or data_end == 1:
bad_line += 1
return [success, discard, bad_line, self.fragmented_lines, self.reunited_lines]
date = self.date_unify(line[:date_end])
chunk = line[date_end:data_end]
md = None
for word in self.naxsi_keywords:
idx = chunk.find(word)
if (idx != -1):
md = self.exception_to_dict(chunk[idx+len(word):])
if md is None:
bad_line += 1
return [success, discard, bad_line, self.fragmented_lines, self.reunited_lines]
md['date'] = date
break
if md is None:
bad_line += 1
return [success, discard, bad_line, self.fragmented_lines, self.reunited_lines]
# if input filters on country were used, forced geoip XXX
for event in self.demult_event(md):
if self.filt_engine is None or self.filt_engine.dofilter(event) is True:
self.dict_buf.append(event)
success += 1
else:
discard += 1
return [success, discard, bad_line, self.fragmented_lines, self.reunited_lines]
| bsd-3-clause | -1,315,556,397,614,317,800 | 37.643678 | 206 | 0.473776 | false |
utkbansal/kuma | kuma/wiki/tests/test_tasks.py | 3 | 4056 | from __future__ import with_statement
import os
from django.conf import settings
from django.test import override_settings
import bitly_api
import mock
from nose.tools import eq_, ok_
from kuma.core.cache import memcache
from kuma.users.tests import UserTestCase, user
from . import revision, document
from ..tasks import (build_sitemaps, update_community_stats,
update_document_share_url)
from ..models import Document
class UpdateCommunityStatsTests(UserTestCase):
contributors = 10
def setUp(self):
super(UpdateCommunityStatsTests, self).setUp()
self.cache = memcache
def test_empty_community_stats(self):
update_community_stats()
stats = self.cache.get('community_stats')
self.assertIsNone(stats)
def test_populated_community_stats(self):
for i in range(self.contributors):
if i % 2 == 0:
locale = 'en-US'
else:
locale = 'pt-BR'
test_user = user(save=True)
doc = document(save=True, locale=locale)
revision(save=True, creator=test_user, document=doc)
update_community_stats()
stats = self.cache.get('community_stats')
self.assertIsNotNone(stats)
self.assertIn('contributors', stats)
self.assertIn('locales', stats)
self.assertIsInstance(stats['contributors'], long)
self.assertIsInstance(stats['locales'], long)
self.assertEqual(stats['contributors'], self.contributors)
self.assertEqual(stats['locales'], 2)
class SitemapsTestCase(UserTestCase):
fixtures = UserTestCase.fixtures + ['wiki/documents.json']
@override_settings(CELERY_ALWAYS_EAGER=True)
def test_sitemaps_files(self):
build_sitemaps()
locales = (Document.objects.filter_for_list()
.values_list('locale', flat=True))
expected_sitemap_locs = []
for locale in set(locales):
# we'll expect to see this locale in the sitemap index file
expected_sitemap_locs.append(
"<loc>https://example.com/sitemaps/%s/sitemap.xml</loc>" %
locale
)
sitemap_path = os.path.join(settings.MEDIA_ROOT, 'sitemaps',
locale, 'sitemap.xml')
with open(sitemap_path, 'r') as sitemap_file:
sitemap_xml = sitemap_file.read()
docs = Document.objects.filter_for_list(locale=locale)
for doc in docs:
ok_(doc.modified.strftime('%Y-%m-%d') in sitemap_xml)
ok_(doc.slug in sitemap_xml)
sitemap_path = os.path.join(settings.MEDIA_ROOT, 'sitemap.xml')
with open(sitemap_path, 'r') as sitemap_file:
index_xml = sitemap_file.read()
for loc in expected_sitemap_locs:
ok_(loc in index_xml)
@override_settings(BITLY_API_KEY='test', BITLY_USERNAME='test')
class BitlyTestCase(UserTestCase):
fixtures = UserTestCase.fixtures + ['wiki/documents.json']
def setUp(self):
super(BitlyTestCase, self).setUp()
self.long_url = 'http://example.com/long-url'
self.short_url = 'http://bit.ly/short-url'
self.doc = Document.objects.get(pk=1)
@mock.patch('kuma.wiki.tasks.bitly')
def test_update_document_share_url(self, bitly):
bitly.shorten.return_value = {'url': self.short_url}
update_document_share_url(self.doc.pk)
eq_(Document.objects.get(pk=self.doc.pk).share_url, self.short_url)
@mock.patch('kuma.wiki.tasks.bitly')
def test_update_document_share_url_invalid(self, bitly):
bitly.shorten.return_value = {}
update_document_share_url(self.doc.pk)
eq_(self.doc.share_url, None)
@mock.patch('kuma.wiki.tasks.bitly')
def test_update_document_share_url_error(self, bitly):
bitly.shorten.side_effect = bitly_api.BitlyError('500', 'fail')
update_document_share_url(self.doc.pk)
eq_(self.doc.share_url, None)
| mpl-2.0 | -3,020,199,949,217,570,000 | 34.893805 | 75 | 0.623274 | false |
bpshetty/erpnext | erpnext/stock/doctype/batch/test_batch.py | 3 | 4238 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.exceptions import ValidationError
import unittest
from erpnext.stock.doctype.batch.batch import get_batch_qty, UnableToSelectBatchError
class TestBatch(unittest.TestCase):
def test_item_has_batch_enabled(self):
self.assertRaises(ValidationError, frappe.get_doc({
"doctype": "Batch",
"name": "_test Batch",
"item": "_Test Item"
}).save)
def make_batch_item(self):
from erpnext.stock.doctype.item.test_item import make_item
if not frappe.db.exists('ITEM-BATCH-1'):
make_item('ITEM-BATCH-1', dict(has_batch_no = 1, create_new_batch = 1))
def test_purchase_receipt(self, batch_qty = 100):
'''Test automated batch creation from Purchase Receipt'''
self.make_batch_item()
receipt = frappe.get_doc(dict(
doctype = 'Purchase Receipt',
supplier = '_Test Supplier',
items = [
dict(
item_code = 'ITEM-BATCH-1',
qty = batch_qty,
rate = 10
)
]
)).insert()
receipt.submit()
self.assertTrue(receipt.items[0].batch_no)
self.assertEquals(get_batch_qty(receipt.items[0].batch_no,
receipt.items[0].warehouse), batch_qty)
return receipt
def test_stock_entry_incoming(self):
'''Test batch creation via Stock Entry (Production Order)'''
self.make_batch_item()
stock_entry = frappe.get_doc(dict(
doctype = 'Stock Entry',
purpose = 'Material Receipt',
company = '_Test Company',
items = [
dict(
item_code = 'ITEM-BATCH-1',
qty = 90,
t_warehouse = '_Test Warehouse - _TC',
cost_center = 'Main - _TC',
rate = 10
)
]
)).insert()
stock_entry.submit()
self.assertTrue(stock_entry.items[0].batch_no)
self.assertEquals(get_batch_qty(stock_entry.items[0].batch_no, stock_entry.items[0].t_warehouse), 90)
def test_delivery_note(self):
'''Test automatic batch selection for outgoing items'''
batch_qty = 15
receipt = self.test_purchase_receipt(batch_qty)
delivery_note = frappe.get_doc(dict(
doctype = 'Delivery Note',
customer = '_Test Customer',
company = receipt.company,
items = [
dict(
item_code = 'ITEM-BATCH-1',
qty = batch_qty,
rate = 10,
warehouse = receipt.items[0].warehouse
)
]
)).insert()
delivery_note.submit()
# shipped with same batch
self.assertEquals(delivery_note.items[0].batch_no, receipt.items[0].batch_no)
# balance is 0
self.assertEquals(get_batch_qty(receipt.items[0].batch_no,
receipt.items[0].warehouse), 0)
def test_delivery_note_fail(self):
'''Test automatic batch selection for outgoing items'''
receipt = self.test_purchase_receipt(100)
delivery_note = frappe.get_doc(dict(
doctype = 'Delivery Note',
customer = '_Test Customer',
company = receipt.company,
items = [
dict(
item_code = 'ITEM-BATCH-1',
qty = 5000,
rate = 10,
warehouse = receipt.items[0].warehouse
)
]
))
self.assertRaises(UnableToSelectBatchError, delivery_note.insert)
def test_stock_entry_outgoing(self):
'''Test automatic batch selection for outgoing stock entry'''
batch_qty = 16
receipt = self.test_purchase_receipt(batch_qty)
stock_entry = frappe.get_doc(dict(
doctype = 'Stock Entry',
purpose = 'Material Issue',
company = receipt.company,
items = [
dict(
item_code = 'ITEM-BATCH-1',
qty = batch_qty,
s_warehouse = receipt.items[0].warehouse,
)
]
)).insert()
stock_entry.submit()
# assert same batch is selected
self.assertEqual(stock_entry.items[0].batch_no, receipt.items[0].batch_no)
# balance is 0
self.assertEquals(get_batch_qty(receipt.items[0].batch_no,
receipt.items[0].warehouse), 0)
def test_batch_split(self):
'''Test batch splitting'''
receipt = self.test_purchase_receipt()
from erpnext.stock.doctype.batch.batch import split_batch
new_batch = split_batch(receipt.items[0].batch_no, 'ITEM-BATCH-1', receipt.items[0].warehouse, 22)
self.assertEquals(get_batch_qty(receipt.items[0].batch_no, receipt.items[0].warehouse), 78)
self.assertEquals(get_batch_qty(new_batch, receipt.items[0].warehouse), 22)
| gpl-3.0 | 6,169,295,907,783,267,000 | 26.699346 | 103 | 0.676498 | false |
rpufky/trafficserver | tests/tools/traffic-replay/Config.py | 2 | 1044 | #!/bin/env python3
'''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# SSL config
ca_certs = None
keyfile = None
# Proxy config
proxy_host = "127.0.0.1"
proxy_ssl_port = 443
proxy_nonssl_port = 8080
# process and thread config
nProcess = 4
nThread = 4
#colorize output
colorize = True
| apache-2.0 | 3,079,210,067,174,813,700 | 29.705882 | 75 | 0.743295 | false |
usc-isi/extra-specs | nova/tests/scheduler/test_chance_scheduler.py | 2 | 9861 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Chance Scheduler.
"""
import random
import mox
from nova import context
from nova import exception
from nova.scheduler import chance
from nova.scheduler import driver
from nova.tests.scheduler import test_scheduler
class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
"""Test case for Chance Scheduler."""
driver_cls = chance.ChanceScheduler
def test_filter_hosts_avoid(self):
"""Test to make sure _filter_hosts() filters original hosts if
avoid_original_host is True."""
hosts = ['host1', 'host2', 'host3']
request_spec = dict(instance_properties=dict(host='host2'))
filter_properties = {'ignore_hosts': ['host2']}
filtered = self.driver._filter_hosts(request_spec, hosts,
filter_properties=filter_properties)
self.assertEqual(filtered, ['host1', 'host3'])
def test_filter_hosts_no_avoid(self):
"""Test to make sure _filter_hosts() does not filter original
hosts if avoid_original_host is False."""
hosts = ['host1', 'host2', 'host3']
request_spec = dict(instance_properties=dict(host='host2'))
filter_properties = {'ignore_hosts': []}
filtered = self.driver._filter_hosts(request_spec, hosts,
filter_properties=filter_properties)
self.assertEqual(filtered, hosts)
def test_basic_schedule_run_instance(self):
ctxt = context.RequestContext('fake', 'fake', False)
ctxt_elevated = 'fake-context-elevated'
fake_args = (1, 2, 3)
fake_kwargs = {'fake_kwarg1': 'fake_value1',
'fake_kwarg2': 'fake_value2'}
instance_opts = {'fake_opt1': 'meow'}
request_spec = {'num_instances': 2,
'instance_properties': instance_opts}
instance1 = {'uuid': 'fake-uuid1'}
instance2 = {'uuid': 'fake-uuid2'}
instance1_encoded = {'uuid': 'fake-uuid1', '_is_precooked': False}
instance2_encoded = {'uuid': 'fake-uuid2', '_is_precooked': False}
# create_instance_db_entry() usually does this, but we're
# stubbing it.
def _add_uuid1(ctxt, request_spec):
request_spec['instance_properties']['uuid'] = 'fake-uuid1'
def _add_uuid2(ctxt, request_spec):
request_spec['instance_properties']['uuid'] = 'fake-uuid2'
self.mox.StubOutWithMock(ctxt, 'elevated')
self.mox.StubOutWithMock(self.driver, 'hosts_up')
self.mox.StubOutWithMock(random, 'random')
self.mox.StubOutWithMock(self.driver, 'create_instance_db_entry')
self.mox.StubOutWithMock(driver, 'cast_to_compute_host')
self.mox.StubOutWithMock(driver, 'encode_instance')
ctxt.elevated().AndReturn(ctxt_elevated)
# instance 1
self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(
['host1', 'host2', 'host3', 'host4'])
random.random().AndReturn(.5)
self.driver.create_instance_db_entry(ctxt,
request_spec).WithSideEffects(_add_uuid1).AndReturn(
instance1)
driver.cast_to_compute_host(ctxt, 'host3', 'run_instance',
instance_uuid=instance1['uuid'], **fake_kwargs)
driver.encode_instance(instance1).AndReturn(instance1_encoded)
# instance 2
ctxt.elevated().AndReturn(ctxt_elevated)
self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(
['host1', 'host2', 'host3', 'host4'])
random.random().AndReturn(.2)
self.driver.create_instance_db_entry(ctxt,
request_spec).WithSideEffects(_add_uuid2).AndReturn(
instance2)
driver.cast_to_compute_host(ctxt, 'host1', 'run_instance',
instance_uuid=instance2['uuid'], **fake_kwargs)
driver.encode_instance(instance2).AndReturn(instance2_encoded)
self.mox.ReplayAll()
result = self.driver.schedule_run_instance(ctxt, request_spec,
*fake_args, **fake_kwargs)
expected = [instance1_encoded, instance2_encoded]
self.assertEqual(result, expected)
def test_scheduler_includes_launch_index(self):
ctxt = "fake-context"
instance_opts = {'fake_opt1': 'meow'}
request_spec = {'num_instances': 2,
'instance_properties': instance_opts}
instance1 = {'uuid': 'fake-uuid1'}
instance2 = {'uuid': 'fake-uuid2'}
# create_instance_db_entry() usually does this, but we're
# stubbing it.
def _add_uuid(num):
"""Return a function that adds the provided uuid number."""
def _add_uuid_num(_, spec):
spec['instance_properties']['uuid'] = 'fake-uuid%d' % num
return _add_uuid_num
def _has_launch_index(expected_index):
"""Return a function that verifies the expected index."""
def _check_launch_index(value):
if 'instance_properties' in value:
if 'launch_index' in value['instance_properties']:
index = value['instance_properties']['launch_index']
if index == expected_index:
return True
return False
return _check_launch_index
self.mox.StubOutWithMock(self.driver, '_schedule')
self.mox.StubOutWithMock(self.driver, 'create_instance_db_entry')
self.mox.StubOutWithMock(driver, 'cast_to_compute_host')
self.mox.StubOutWithMock(driver, 'encode_instance')
# instance 1
self.driver._schedule(ctxt, 'compute', request_spec).AndReturn('host')
self.driver.create_instance_db_entry(
ctxt, mox.Func(_has_launch_index(0))
).WithSideEffects(_add_uuid(1)).AndReturn(instance1)
driver.cast_to_compute_host(ctxt, 'host', 'run_instance',
instance_uuid=instance1['uuid'])
driver.encode_instance(instance1).AndReturn(instance1)
# instance 2
self.driver._schedule(ctxt, 'compute', request_spec).AndReturn('host')
self.driver.create_instance_db_entry(
ctxt, mox.Func(_has_launch_index(1))
).WithSideEffects(_add_uuid(2)).AndReturn(instance2)
driver.cast_to_compute_host(ctxt, 'host', 'run_instance',
instance_uuid=instance2['uuid'])
driver.encode_instance(instance2).AndReturn(instance2)
self.mox.ReplayAll()
self.driver.schedule_run_instance(ctxt, request_spec)
def test_basic_schedule_run_instance_no_hosts(self):
ctxt = context.RequestContext('fake', 'fake', False)
ctxt_elevated = 'fake-context-elevated'
fake_args = (1, 2, 3)
fake_kwargs = {'fake_kwarg1': 'fake_value1',
'fake_kwarg2': 'fake_value2'}
instance_opts = 'fake_instance_opts'
request_spec = {'num_instances': 2,
'instance_properties': instance_opts}
self.mox.StubOutWithMock(ctxt, 'elevated')
self.mox.StubOutWithMock(self.driver, 'hosts_up')
# instance 1
ctxt.elevated().AndReturn(ctxt_elevated)
self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn([])
self.mox.ReplayAll()
self.assertRaises(exception.NoValidHost,
self.driver.schedule_run_instance, ctxt, request_spec,
*fake_args, **fake_kwargs)
def test_basic_schedule_fallback(self):
ctxt = context.RequestContext('fake', 'fake', False)
ctxt_elevated = 'fake-context-elevated'
topic = 'fake_topic'
method = 'fake_method'
fake_args = (1, 2, 3)
fake_kwargs = {'fake_kwarg1': 'fake_value1',
'fake_kwarg2': 'fake_value2'}
self.mox.StubOutWithMock(ctxt, 'elevated')
self.mox.StubOutWithMock(self.driver, 'hosts_up')
self.mox.StubOutWithMock(random, 'random')
self.mox.StubOutWithMock(driver, 'cast_to_host')
ctxt.elevated().AndReturn(ctxt_elevated)
self.driver.hosts_up(ctxt_elevated, topic).AndReturn(
['host1', 'host2', 'host3', 'host4'])
random.random().AndReturn(.5)
driver.cast_to_host(ctxt, topic, 'host3', method, **fake_kwargs)
self.mox.ReplayAll()
self.driver.schedule(ctxt, topic, method, *fake_args, **fake_kwargs)
def test_basic_schedule_fallback_no_hosts(self):
ctxt = context.RequestContext('fake', 'fake', False)
ctxt_elevated = 'fake-context-elevated'
topic = 'fake_topic'
method = 'fake_method'
fake_args = (1, 2, 3)
fake_kwargs = {'fake_kwarg1': 'fake_value1',
'fake_kwarg2': 'fake_value2'}
self.mox.StubOutWithMock(ctxt, 'elevated')
self.mox.StubOutWithMock(self.driver, 'hosts_up')
ctxt.elevated().AndReturn(ctxt_elevated)
self.driver.hosts_up(ctxt_elevated, topic).AndReturn([])
self.mox.ReplayAll()
self.assertRaises(exception.NoValidHost,
self.driver.schedule, ctxt, topic, method,
*fake_args, **fake_kwargs)
| apache-2.0 | -1,759,562,343,065,463,300 | 41.321888 | 78 | 0.610486 | false |
tquilian/exeNext | exe/engine/quiztestidevice.py | 10 | 16960 | # ===========================================================================
# eXe
# Copyright 2004-2006, University of Auckland
# Copyright 2004-2008 eXe Project, http://eXeLearning.org/
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# ===========================================================================
"""
A QuizTest Idevice is one built up from TestQuestions
"""
import logging
from exe.engine.persist import Persistable
from exe.engine.idevice import Idevice
from exe.engine.translate import lateTranslate
from exe.engine.field import TextAreaField
import re
log = logging.getLogger(__name__)
# ===========================================================================
class AnswerOption(Persistable):
"""
A TestQuestion is built up of question and AnswerOptions. Each
answerOption can be rendered as an XHTML element
"""
def __init__(self, question, idevice, answer="", isCorrect=False):
"""
Initialize
"""
self.question = question
self.idevice = idevice
self.answerTextArea = TextAreaField(x_(u'Option'),
self.question._optionInstruc,
answer)
self.answerTextArea.idevice = idevice
self.isCorrect = isCorrect
def getResourcesField(self, this_resource):
"""
implement the specific resource finding mechanism for this iDevice:
"""
# be warned that before upgrading, this iDevice field could not exist:
if hasattr(self, 'answerTextArea')\
and hasattr(self.answerTextArea, 'images'):
for this_image in self.answerTextArea.images:
if hasattr(this_image, '_imageResource') \
and this_resource == this_image._imageResource:
return self.answerTextArea
return None
def getRichTextFields(self):
"""
Like getResourcesField(), a general helper to allow nodes to search
through all of their fields without having to know the specifics of each
iDevice type.
"""
fields_list = []
if hasattr(self, 'answerTextArea'):
fields_list.append(self.answerTextArea)
return fields_list
def upgrade_setIdevice(self, idevice, question):
"""
While some of this might typically be done in an automatic upgrade
method called from in increased persistence version, the problem
with that approach is that the idevice was not previously stored,
and cannot easily be gotten at that stage of operation.
Rather than making such an upgrade method more messy than necessary,
this method allows the parent TestQuestion to merely set
itself on each of its AnswerOptions during its own upgrade.
Helps upgrade to somewhere before version 0.25 (post-v0.24),
taking the old unicode string fields,
and converting them into a image-enabled TextAreaFields:
"""
self.idevice = idevice
self.question = question
self.answerTextArea = TextAreaField(x_(u'Option'),
self.question._optionInstruc,
self.answer)
self.answerTextArea.idevice = self.idevice
# ===========================================================================
class TestQuestion(Persistable):
"""
A TestQuestion is built up of question and AnswerOptions.
"""
persistenceVersion = 3
def __init__(self, idevice, question=""):
"""
Initialize
"""
self.idevice = idevice
self.options = []
self.correctAns = -2
self.userAns = -1
self._questionInstruc = x_(u"""Enter the question stem.
The quest should be clear and unambiguous. Avoid negative premises
as these can tend to be ambiguous.""")
self._optionInstruc = x_(u"""Enter an answer option. Provide
a range of plausible distractors (usually 3-4) as well as the correct answer.
Click on the <Add another option> button to add another answer.""")
self._correctAnswerInstruc = x_(u"""To indicate the correct answer,
click the radio button next to the correct option.""")
self.questionTextArea = TextAreaField(x_(u'Question:'),
self._questionInstruc, u'')
self.questionTextArea.idevice = self.idevice
self.addOption()
# Properties
questionInstruc = lateTranslate('questionInstruc')
optionInstruc = lateTranslate('optionInstruc')
correctAnswerInstruc = lateTranslate('correctAnswerInstruc')
def addOption(self):
"""
Add a new option to this question.
"""
self.options.append(AnswerOption(self, self.idevice))
def getResourcesField(self, this_resource):
"""
implement the specific resource finding mechanism for this iDevice:
"""
# be warned that before upgrading, this iDevice field could not exist:
if hasattr(self, 'questionTextArea')\
and hasattr(self.questionTextArea, 'images'):
for this_image in self.questionTextArea.images:
if hasattr(this_image, '_imageResource') \
and this_resource == this_image._imageResource:
return self.questionTextArea
for this_option in self.options:
this_field = this_option.getResourcesField(this_resource)
if this_field is not None:
return this_field
return None
def getRichTextFields(self):
"""
Like getResourcesField(), a general helper to allow nodes to search
through all of their fields without having to know the specifics of each
iDevice type.
"""
fields_list = []
if hasattr(self, 'questionTextArea'):
fields_list.append(self.questionTextArea)
for this_option in self.options:
fields_list.extend(this_option.getRichTextFields())
return fields_list
def upgradeToVersion1(self):
"""
Upgrades to v 0.13
"""
self._optionInstruc = x_(u"""Enter an answer option. Provide
a range of plausible distractors (usually 3-4) as well as the correct answer.
Click on the <Add another option> button to add another answer.""")
def upgradeToVersion2(self):
"""
Upgrades to v 0.13
"""
self._questionInstruc= x_(u"""Enter the question stem.
The quest should be clear and unambiguous. Avoid negative premises
as these can tend to be ambiguous.""")
def upgradeToVersion3(self):
"""
Upgrades to v 0.13
"""
self._correctAnswerInstruc = x_(u"""To indicate the correct answer,
click the radio button next to the correct option.""")
def upgrade_setIdevice(self, idevice):
"""
While some of this might typically be done in an automatic upgrade
method called from in increased persistence version, the problem
with that approach is that the idevice was not previously stored,
and cannot easily be gotten at that stage of operation.
Rather than making such an upgrade method more messy than necessary,
this method allows the parent TestQuestionIdevice to merely set
itself on each of its TestQuestions during its own upgrade.
Helps upgrade to somewhere before version 0.25 (post-v0.24),
taking the old unicode string fields,
and converting them into a image-enabled TextAreaFields:
"""
self.idevice = idevice
self.questionTextArea = TextAreaField(x_(u'Question:'),
self._questionInstruc,
self.question)
self.questionTextArea.idevice = self.idevice
# and then, need to propagate the same upgrades
# down through each of the options:
for option in self.options:
option.upgrade_setIdevice(self.idevice, self)
# ===========================================================================
class QuizTestIdevice(Idevice):
"""
A QuizTestIdevice Idevice is one built up from question and options
"""
persistenceVersion = 10
def __init__(self):
"""
Initialize
"""
Idevice.__init__(self,
x_(u"SCORM Quiz"),
x_(u"University of Auckland"),
x_(u"""Unlike the MCQ the SCORM quiz is used to test
the learners knowledge on a topic without providing the learner with feedback
to the correct answer. The quiz will often be given once the learner has had
time to learn and practice using the information or skill.
"""), u"", "question")
self.isQuiz = True
self.emphasis = Idevice.SomeEmphasis
self.score = -1
self.isAnswered = True
self.passRate = "50"
self.questions = []
self.addQuestion()
self.systemResources += ["common.js"]
def addQuestion(self):
"""
Add a new question to this iDevice.
"""
self.questions.append(TestQuestion(self))
def getResourcesField(self, this_resource):
"""
implement the specific resource finding mechanism for this iDevice:
"""
for this_question in self.questions:
this_field = this_question.getResourcesField(this_resource)
if this_field is not None:
return this_field
return None
def getRichTextFields(self):
"""
Like getResourcesField(), a general helper to allow nodes to search
through all of their fields without having to know the specifics of each
iDevice type.
"""
fields_list = []
for this_question in self.questions:
fields_list.extend(this_question.getRichTextFields())
return fields_list
def burstHTML(self, i):
"""
takes a BeautifulSoup fragment (i) and bursts its contents to
import this idevice from a CommonCartridge export
"""
# SCORM QuizTest Idevice:
title = i.find(name='h2', attrs={'class' : 'iDeviceTitle' })
self.title = title.renderContents().decode('utf-8')
inner = i.find(name='div', attrs={'class' : 'iDevice_inner' })
passrate = inner.find(name='div', attrs={'class' : 'passrate' })
self.passRate = passrate.attrMap['value'].decode('utf-8')
# copied and modified from Multi-Select:
sc_questions = inner.findAll(name='div', attrs={'class' : 'question'})
if len(sc_questions) < 1:
# need to remove the default 1st question
del self.questions[0]
for question_num in range(len(sc_questions)):
if question_num > 0:
# only created with the first question, add others:
self.addQuestion()
question = sc_questions[question_num]
questions = question.findAll(name='div', attrs={'class' : 'block',
'id' : re.compile('^taquestion') })
if len(questions) == 1:
# ELSE: should warn of unexpected result!
inner_question = questions[0]
self.questions[question_num].questionTextArea.content_wo_resourcePaths \
= inner_question.renderContents().decode('utf-8')
# and add the LOCAL resource paths back in:
self.questions[question_num].questionTextArea.content_w_resourcePaths \
= self.questions[question_num].questionTextArea.MassageResourceDirsIntoContent( \
self.questions[question_num].questionTextArea.content_wo_resourcePaths)
self.questions[question_num].questionTextArea.content \
= self.questions[question_num].questionTextArea.content_w_resourcePaths
options = question.findAll(name='div', attrs={'class' : 'block',
'id' : re.compile('^taoptionAnswer') })
answers = question.findAll(name='input', attrs={'type' : 'radio'})
if len(options) < 1:
# need to remove the default 1st option
del self.questions[question_num].options[0]
for option_loop in range(0, len(options)):
if option_loop >= 1:
# more options than created by default:
self.questions[question_num].addOption()
self.questions[question_num].options[option_loop].answerTextArea.content_wo_resourcePaths \
= options[option_loop].renderContents().decode('utf-8')
# and add the LOCAL resource paths back in:
self.questions[question_num].options[option_loop].answerTextArea.content_w_resourcePaths \
= self.questions[question_num].options[option_loop].answerTextArea.MassageResourceDirsIntoContent( \
self.questions[question_num].options[option_loop].answerTextArea.content_wo_resourcePaths)
self.questions[question_num].options[option_loop].answerTextArea.content \
= self.questions[question_num].options[option_loop].answerTextArea.content_w_resourcePaths
# and finally, see if this is a correct answer:
this_answer = answers[option_loop].attrMap['value']
if this_answer == "0":
# then this option is correct:
self.questions[question_num].options[option_loop].isCorrect\
= True
# and SCORM quiz also has an overall correctAnswer;
# since it only allows one answer, this must be it:
self.questions[question_num].correctAns = option_loop
def upgradeToVersion2(self):
"""
Upgrades the node from 1 (v0.5) to 2 (v0.6).
Old packages will loose their icons, but they will load.
"""
log.debug(u"Upgrading iDevice")
self.emphasis = Idevice.SomeEmphasis
def upgradeToVersion3(self):
"""
Upgrades the node from 1 (v0.6) to 2 (v0.7).
Change icon from 'multichoice' to 'question'
"""
log.debug(u"Upgrading iDevice icon")
self.icon = "question"
def upgradeToVersion4(self):
"""
Upgrades v0.6 to v0.7.
"""
self.lastIdevice = False
def upgradeToVersion5(self):
"""
Upgrades to exe v0.10
"""
self._upgradeIdeviceToVersion1()
def upgradeToVersion6(self):
"""
Upgrades to v0.12
"""
self._upgradeIdeviceToVersion2()
self.systemResources += ["common.js", "libot_drag.js"]
def upgradeToVersion7(self):
"""
Upgrades to v0.14
"""
# Note: the following routine doesn't appear to exist anymore,
# so now that the persistence version is finally upgrading to 7,
# (and then, actually on to 8) this is no longer works, go figure!
#####
#self._upgradeIdeviceToVersion3()
####
self.isQuiz = True
def upgradeToVersion8(self):
"""
Upgrades to somewhere before version 0.25 (post-v0.24)
Taking the TestQuestions' old unicode string fields,
and converting them into a image-enabled TextAreaFields:
"""
for question in self.questions:
question.upgrade_setIdevice(self)
def upgradeToVersion9(self):
if "libot_drag.js" in self.systemResources:
self.systemResources.remove("libot_drag.js")
def upgradeToVersion10(self):
"""
Delete icon from system resources
"""
self._upgradeIdeviceToVersion3()
## ===========================================================================
#def register(ideviceStore):
#"""Register with the ideviceStore"""
#ideviceStore.extended.append(QuizTestIdevice())
| gpl-2.0 | 377,215,837,858,042,940 | 37.721461 | 124 | 0.591627 | false |
murrown/cyder | cyder/cydhcp/network/forms.py | 2 | 4382 | from django import forms
from django.core.exceptions import ValidationError
import ipaddr
from cyder.base.constants import IP_TYPES, IP_TYPE_4, IP_TYPE_6
from cyder.base.eav.forms import get_eav_form
from cyder.base.mixins import UsabilityFormMixin
from cyder.cydhcp.network.models import Network, NetworkAV
from cyder.cydhcp.site.models import Site
from cyder.cydhcp.vlan.models import Vlan
from cydns.ip.models import ipv6_to_longs
class NetworkForm(forms.ModelForm, UsabilityFormMixin):
site = forms.ModelChoiceField(
queryset=Site.objects.all(),
empty_label="(Defaults to parent's site.)",
required=False,
help_text="The site the network will be put into. "
"Defaults to parent network's site"
)
def __init__(self, *args, **kwargs):
super(NetworkForm, self).__init__(*args, **kwargs)
self.fields['dhcpd_raw_include'].label = "DHCP Config Extras"
self.fields['dhcpd_raw_include'].widget.attrs.update(
{'cols': '80',
'style':
'display: none; width: 680px;'})
class Meta:
model = Network
exclude = ('ip_upper', 'ip_lower', 'prefixlen')
widgets = {'ip_type': forms.RadioSelect}
def clean(self):
cleaned_data = super(NetworkForm, self).clean()
network_str = cleaned_data.get('network_str', '')
try:
ip_type = cleaned_data.get('ip_type')
if ip_type not in IP_TYPES:
raise ValidationError("IP type must be either IPv4 or IPv6.")
if ip_type == IP_TYPE_4:
network = ipaddr.IPv4Network(network_str)
ip_upper, ip_lower = 0, int(network.network)
elif ip_type == IP_TYPE_6:
network = ipaddr.IPv6Network(network_str)
ip_upper, ip_lower = ipv6_to_longs(network.network)
except ipaddr.AddressValueError, e:
raise ValidationError("Bad IP address {0}".format(e))
except ipaddr.NetmaskValueError, e:
raise ValidationError("Bad netmask {0}".format(e))
return cleaned_data
NetworkAVForm = get_eav_form(NetworkAV, Network)
class NetworkForm_network(forms.Form):
network = forms.CharField(
required=True,
help_text='Enter the address and mask in '
'CIDR notation (e.g. 10.0.0.0/24)')
ip_type = forms.ChoiceField(choices=IP_TYPES.items())
def clean(self):
cleaned_data = super(NetworkForm_network, self).clean()
network_str = cleaned_data.get('network', '')
try:
ip_type = cleaned_data.get('ip_type')
if ip_type not in IP_TYPES:
raise ValidationError("IP type must be either IPv4 or IPv6.")
elif ip_type == IP_TYPE_4:
network = ipaddr.IPv4Network(network_str)
ip_upper, ip_lower = 0, int(network.network)
elif ip_type == IP_TYPE_4:
network = ipaddr.IPv6Network(network_str)
ip_upper, ip_lower = ipv6_to_longs(network.network)
except ipaddr.AddressValueError, e:
raise ValidationError("Bad IP address {0}".format(e))
except ipaddr.NetmaskValueError, e:
raise ValidationError("Bad netmask {0}".format(e))
if (Network.objects.filter(ip_upper=ip_upper,
ip_lower=ip_lower).exists()):
raise ValidationError("This network has already been allocated.")
# TODO add parent calculaitons
return cleaned_data
class NetworkForm_site(forms.Form):
site = forms.ModelChoiceField(
queryset=Site.objects.all(),
required=True
)
def clean(self):
cleaned_data = super(NetworkForm_site, self).clean()
site = cleaned_data.get('site', None)
if not site:
raise ValidationError("That site does not exist")
return cleaned_data
class NetworkForm_vlan(forms.Form):
vlan = forms.ModelChoiceField(
queryset=Vlan.objects.all(),
required=True,
)
name = forms.CharField()
number = forms.IntegerField()
create_choice = forms.ChoiceField(
widget=forms.RadioSelect, initial='e', choices=(
('existing', 'Use existing VLAN template'),
('new', 'Create new VLAN'),
('none', "Don't assign a VLAN"),
))
| bsd-3-clause | -1,959,771,949,257,195,500 | 36.135593 | 77 | 0.607485 | false |
Pakoach/Sick-Beard | lib/tvdb_api/tests/test_tvdb_api.py | 26 | 15937 | #!/usr/bin/env python
#encoding:utf-8
#author:dbr/Ben
#project:tvdb_api
#repository:http://github.com/dbr/tvdb_api
#license:unlicense (http://unlicense.org/)
"""Unittests for tvdb_api
"""
import os
import sys
import datetime
import unittest
# Force parent directory onto path
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import tvdb_api
import tvdb_ui
from tvdb_api import (tvdb_shownotfound, tvdb_seasonnotfound,
tvdb_episodenotfound, tvdb_attributenotfound)
class test_tvdb_basic(unittest.TestCase):
# Used to store the cached instance of Tvdb()
t = None
def setUp(self):
if self.t is None:
self.__class__.t = tvdb_api.Tvdb(cache = True, banners = False)
def test_different_case(self):
"""Checks the auto-correction of show names is working.
It should correct the weirdly capitalised 'sCruBs' to 'Scrubs'
"""
self.assertEquals(self.t['scrubs'][1][4]['episodename'], 'My Old Lady')
self.assertEquals(self.t['sCruBs']['seriesname'], 'Scrubs')
def test_spaces(self):
"""Checks shownames with spaces
"""
self.assertEquals(self.t['My Name Is Earl']['seriesname'], 'My Name Is Earl')
self.assertEquals(self.t['My Name Is Earl'][1][4]['episodename'], 'Faked His Own Death')
def test_numeric(self):
"""Checks numeric show names
"""
self.assertEquals(self.t['24'][2][20]['episodename'], 'Day 2: 3:00 A.M.-4:00 A.M.')
self.assertEquals(self.t['24']['seriesname'], '24')
def test_show_iter(self):
"""Iterating over a show returns each seasons
"""
self.assertEquals(
len(
[season for season in self.t['Life on Mars']]
),
2
)
def test_season_iter(self):
"""Iterating over a show returns episodes
"""
self.assertEquals(
len(
[episode for episode in self.t['Life on Mars'][1]]
),
8
)
def test_get_episode_overview(self):
"""Checks episode overview is retrieved correctly.
"""
self.assertEquals(
self.t['Battlestar Galactica (2003)'][1][6]['overview'].startswith(
'When a new copy of Doral, a Cylon who had been previously'),
True
)
def test_get_parent(self):
"""Check accessing series from episode instance
"""
show = self.t['Battlestar Galactica (2003)']
season = show[1]
episode = show[1][1]
self.assertEquals(
season.show,
show
)
self.assertEquals(
episode.season,
season
)
self.assertEquals(
episode.season.show,
show
)
class test_tvdb_errors(unittest.TestCase):
# Used to store the cached instance of Tvdb()
t = None
def setUp(self):
if self.t is None:
self.__class__.t = tvdb_api.Tvdb(cache = True, banners = False)
def test_seasonnotfound(self):
"""Checks exception is thrown when season doesn't exist.
"""
self.assertRaises(tvdb_seasonnotfound, lambda:self.t['CNNNN'][10][1])
def test_shownotfound(self):
"""Checks exception is thrown when episode doesn't exist.
"""
self.assertRaises(tvdb_shownotfound, lambda:self.t['the fake show thingy'])
def test_episodenotfound(self):
"""Checks exception is raised for non-existent episode
"""
self.assertRaises(tvdb_episodenotfound, lambda:self.t['Scrubs'][1][30])
def test_attributenamenotfound(self):
"""Checks exception is thrown for if an attribute isn't found.
"""
self.assertRaises(tvdb_attributenotfound, lambda:self.t['CNNNN'][1][6]['afakeattributething'])
self.assertRaises(tvdb_attributenotfound, lambda:self.t['CNNNN']['afakeattributething'])
class test_tvdb_search(unittest.TestCase):
# Used to store the cached instance of Tvdb()
t = None
def setUp(self):
if self.t is None:
self.__class__.t = tvdb_api.Tvdb(cache = True, banners = False)
def test_search_len(self):
"""There should be only one result matching
"""
self.assertEquals(len(self.t['My Name Is Earl'].search('Faked His Own Death')), 1)
def test_search_checkname(self):
"""Checks you can get the episode name of a search result
"""
self.assertEquals(self.t['Scrubs'].search('my first')[0]['episodename'], 'My First Day')
self.assertEquals(self.t['My Name Is Earl'].search('Faked His Own Death')[0]['episodename'], 'Faked His Own Death')
def test_search_multiresults(self):
"""Checks search can return multiple results
"""
self.assertEquals(len(self.t['Scrubs'].search('my first')) >= 3, True)
def test_search_no_params_error(self):
"""Checks not supplying search info raises TypeError"""
self.assertRaises(
TypeError,
lambda: self.t['Scrubs'].search()
)
def test_search_season(self):
"""Checks the searching of a single season"""
self.assertEquals(
len(self.t['Scrubs'][1].search("First")),
3
)
def test_search_show(self):
"""Checks the searching of an entire show"""
self.assertEquals(
len(self.t['CNNNN'].search('CNNNN', key='episodename')),
3
)
def test_aired_on(self):
"""Tests airedOn show method"""
sr = self.t['Scrubs'].airedOn(datetime.date(2001, 10, 2))
self.assertEquals(len(sr), 1)
self.assertEquals(sr[0]['episodename'], u'My First Day')
class test_tvdb_data(unittest.TestCase):
# Used to store the cached instance of Tvdb()
t = None
def setUp(self):
if self.t is None:
self.__class__.t = tvdb_api.Tvdb(cache = True, banners = False)
def test_episode_data(self):
"""Check the firstaired value is retrieved
"""
self.assertEquals(
self.t['lost']['firstaired'],
'2004-09-22'
)
class test_tvdb_misc(unittest.TestCase):
# Used to store the cached instance of Tvdb()
t = None
def setUp(self):
if self.t is None:
self.__class__.t = tvdb_api.Tvdb(cache = True, banners = False)
def test_repr_show(self):
"""Check repr() of Season
"""
self.assertEquals(
repr(self.t['CNNNN']),
"<Show Chaser Non-Stop News Network (CNNNN) (containing 3 seasons)>"
)
def test_repr_season(self):
"""Check repr() of Season
"""
self.assertEquals(
repr(self.t['CNNNN'][1]),
"<Season instance (containing 9 episodes)>"
)
def test_repr_episode(self):
"""Check repr() of Episode
"""
self.assertEquals(
repr(self.t['CNNNN'][1][1]),
"<Episode 01x01 - Terror Alert>"
)
def test_have_all_languages(self):
"""Check valid_languages is up-to-date (compared to languages.xml)
"""
et = self.t._getetsrc(
"http://thetvdb.com/api/%s/languages.xml" % (
self.t.config['apikey']
)
)
languages = [x.find("abbreviation").text for x in et.findall("Language")]
self.assertEquals(
sorted(languages),
sorted(self.t.config['valid_languages'])
)
class test_tvdb_languages(unittest.TestCase):
def test_episode_name_french(self):
"""Check episode data is in French (language="fr")
"""
t = tvdb_api.Tvdb(cache = True, language = "fr")
self.assertEquals(
t['scrubs'][1][1]['episodename'],
"Mon premier jour"
)
self.assertTrue(
t['scrubs']['overview'].startswith(
u"J.D. est un jeune m\xe9decin qui d\xe9bute"
)
)
def test_episode_name_spanish(self):
"""Check episode data is in Spanish (language="es")
"""
t = tvdb_api.Tvdb(cache = True, language = "es")
self.assertEquals(
t['scrubs'][1][1]['episodename'],
"Mi Primer Dia"
)
self.assertTrue(
t['scrubs']['overview'].startswith(
u'Scrubs es una divertida comedia'
)
)
def test_multilanguage_selection(self):
"""Check selected language is used
"""
class SelectEnglishUI(tvdb_ui.BaseUI):
def selectSeries(self, allSeries):
return [x for x in allSeries if x['language'] == "en"][0]
class SelectItalianUI(tvdb_ui.BaseUI):
def selectSeries(self, allSeries):
return [x for x in allSeries if x['language'] == "it"][0]
t_en = tvdb_api.Tvdb(
cache=True,
custom_ui = SelectEnglishUI,
language = "en")
t_it = tvdb_api.Tvdb(
cache=True,
custom_ui = SelectItalianUI,
language = "it")
self.assertEquals(
t_en['dexter'][1][2]['episodename'], "Crocodile"
)
self.assertEquals(
t_it['dexter'][1][2]['episodename'], "Lacrime di coccodrillo"
)
class test_tvdb_unicode(unittest.TestCase):
def test_search_in_chinese(self):
"""Check searching for show with language=zh returns Chinese seriesname
"""
t = tvdb_api.Tvdb(cache = True, language = "zh")
show = t[u'T\xecnh Ng\u01b0\u1eddi Hi\u1ec7n \u0110\u1ea1i']
self.assertEquals(
type(show),
tvdb_api.Show
)
self.assertEquals(
show['seriesname'],
u'T\xecnh Ng\u01b0\u1eddi Hi\u1ec7n \u0110\u1ea1i'
)
def test_search_in_all_languages(self):
"""Check search_all_languages returns Chinese show, with language=en
"""
t = tvdb_api.Tvdb(cache = True, search_all_languages = True, language="en")
show = t[u'T\xecnh Ng\u01b0\u1eddi Hi\u1ec7n \u0110\u1ea1i']
self.assertEquals(
type(show),
tvdb_api.Show
)
self.assertEquals(
show['seriesname'],
u'Virtues Of Harmony II'
)
class test_tvdb_banners(unittest.TestCase):
# Used to store the cached instance of Tvdb()
t = None
def setUp(self):
if self.t is None:
self.__class__.t = tvdb_api.Tvdb(cache = True, banners = True)
def test_have_banners(self):
"""Check banners at least one banner is found
"""
self.assertEquals(
len(self.t['scrubs']['_banners']) > 0,
True
)
def test_banner_url(self):
"""Checks banner URLs start with http://
"""
for banner_type, banner_data in self.t['scrubs']['_banners'].items():
for res, res_data in banner_data.items():
for bid, banner_info in res_data.items():
self.assertEquals(
banner_info['_bannerpath'].startswith("http://"),
True
)
def test_episode_image(self):
"""Checks episode 'filename' image is fully qualified URL
"""
self.assertEquals(
self.t['scrubs'][1][1]['filename'].startswith("http://"),
True
)
def test_show_artwork(self):
"""Checks various image URLs within season data are fully qualified
"""
for key in ['banner', 'fanart', 'poster']:
self.assertEquals(
self.t['scrubs'][key].startswith("http://"),
True
)
class test_tvdb_actors(unittest.TestCase):
t = None
def setUp(self):
if self.t is None:
self.__class__.t = tvdb_api.Tvdb(cache = True, actors = True)
def test_actors_is_correct_datatype(self):
"""Check show/_actors key exists and is correct type"""
self.assertTrue(
isinstance(
self.t['scrubs']['_actors'],
tvdb_api.Actors
)
)
def test_actors_has_actor(self):
"""Check show has at least one Actor
"""
self.assertTrue(
isinstance(
self.t['scrubs']['_actors'][0],
tvdb_api.Actor
)
)
def test_actor_has_name(self):
"""Check first actor has a name"""
self.assertEquals(
self.t['scrubs']['_actors'][0]['name'],
"Zach Braff"
)
def test_actor_image_corrected(self):
"""Check image URL is fully qualified
"""
for actor in self.t['scrubs']['_actors']:
if actor['image'] is not None:
# Actor's image can be None, it displays as the placeholder
# image on thetvdb.com
self.assertTrue(
actor['image'].startswith("http://")
)
class test_tvdb_doctest(unittest.TestCase):
# Used to store the cached instance of Tvdb()
t = None
def setUp(self):
if self.t is None:
self.__class__.t = tvdb_api.Tvdb(cache = True, banners = False)
def test_doctest(self):
"""Check docstring examples works"""
import doctest
doctest.testmod(tvdb_api)
class test_tvdb_custom_caching(unittest.TestCase):
def test_true_false_string(self):
"""Tests setting cache to True/False/string
Basic tests, only checking for errors
"""
tvdb_api.Tvdb(cache = True)
tvdb_api.Tvdb(cache = False)
tvdb_api.Tvdb(cache = "/tmp")
def test_invalid_cache_option(self):
"""Tests setting cache to invalid value
"""
try:
tvdb_api.Tvdb(cache = 2.3)
except ValueError:
pass
else:
self.fail("Expected ValueError from setting cache to float")
def test_custom_urlopener(self):
class UsedCustomOpener(Exception):
pass
import urllib2
class TestOpener(urllib2.BaseHandler):
def default_open(self, request):
print request.get_method()
raise UsedCustomOpener("Something")
custom_opener = urllib2.build_opener(TestOpener())
t = tvdb_api.Tvdb(cache = custom_opener)
try:
t['scrubs']
except UsedCustomOpener:
pass
else:
self.fail("Did not use custom opener")
class test_tvdb_by_id(unittest.TestCase):
t = None
def setUp(self):
if self.t is None:
self.__class__.t = tvdb_api.Tvdb(cache = True, actors = True)
def test_actors_is_correct_datatype(self):
"""Check show/_actors key exists and is correct type"""
self.assertEquals(
self.t[76156]['seriesname'],
'Scrubs'
)
class test_tvdb_zip(unittest.TestCase):
# Used to store the cached instance of Tvdb()
t = None
def setUp(self):
if self.t is None:
self.__class__.t = tvdb_api.Tvdb(cache = True, useZip = True)
def test_get_series_from_zip(self):
"""
"""
self.assertEquals(self.t['scrubs'][1][4]['episodename'], 'My Old Lady')
self.assertEquals(self.t['sCruBs']['seriesname'], 'Scrubs')
def test_spaces_from_zip(self):
"""Checks shownames with spaces
"""
self.assertEquals(self.t['My Name Is Earl']['seriesname'], 'My Name Is Earl')
self.assertEquals(self.t['My Name Is Earl'][1][4]['episodename'], 'Faked His Own Death')
if __name__ == '__main__':
runner = unittest.TextTestRunner(verbosity = 2)
unittest.main(testRunner = runner)
| gpl-3.0 | 8,590,117,882,473,875,000 | 30.24902 | 123 | 0.554935 | false |
orion/swift-config | swift/common/ring/builder.py | 4 | 45916 | # Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bisect
import itertools
import math
import random
import cPickle as pickle
from array import array
from collections import defaultdict
from time import time
from swift.common import exceptions
from swift.common.ring import RingData
from swift.common.ring.utils import tiers_for_dev, build_tier_tree
MAX_BALANCE = 999.99
class RingBuilder(object):
"""
Used to build swift.common.ring.RingData instances to be written to disk
and used with swift.common.ring.Ring instances. See bin/swift-ring-builder
for example usage.
The instance variable devs_changed indicates if the device information has
changed since the last balancing. This can be used by tools to know whether
a rebalance request is an isolated request or due to added, changed, or
removed devices.
:param part_power: number of partitions = 2**part_power.
:param replicas: number of replicas for each partition
:param min_part_hours: minimum number of hours between partition changes
"""
def __init__(self, part_power, replicas, min_part_hours):
if part_power > 32:
raise ValueError("part_power must be at most 32 (was %d)"
% (part_power,))
if replicas < 1:
raise ValueError("replicas must be at least 1 (was %.6f)"
% (replicas,))
if min_part_hours < 0:
raise ValueError("min_part_hours must be non-negative (was %d)"
% (min_part_hours,))
self.part_power = part_power
self.replicas = replicas
self.min_part_hours = min_part_hours
self.parts = 2 ** self.part_power
self.devs = []
self.devs_changed = False
self.version = 0
# _replica2part2dev maps from replica number to partition number to
# device id. So, for a three replica, 2**23 ring, it's an array of
# three 2**23 arrays of device ids (unsigned shorts). This can work a
# bit faster than the 2**23 array of triplet arrays of device ids in
# many circumstances. Making one big 2**23 * 3 array didn't seem to
# have any speed change; though you're welcome to try it again (it was
# a while ago, code-wise, when I last tried it).
self._replica2part2dev = None
# _last_part_moves is a 2**23 array of unsigned bytes representing the
# number of hours since a given partition was last moved. This is used
# to guarantee we don't move a partition twice within a given number of
# hours (24 is my usual test). Removing a device or setting its weight
# to 0 overrides this behavior as it's assumed those actions are done
# because of device failure.
# _last_part_moves_epoch indicates the time the offsets in
# _last_part_moves is based on.
self._last_part_moves_epoch = None
self._last_part_moves = None
self._last_part_gather_start = 0
self._remove_devs = []
self._ring = None
def weight_of_one_part(self):
"""
Returns the weight of each partition as calculated from the
total weight of all the devices.
"""
try:
return self.parts * self.replicas / \
sum(d['weight'] for d in self._iter_devs())
except ZeroDivisionError:
raise exceptions.EmptyRingError('There are no devices in this '
'ring, or all devices have been '
'deleted')
def copy_from(self, builder):
"""
Reinitializes this RingBuilder instance from data obtained from the
builder dict given. Code example::
b = RingBuilder(1, 1, 1) # Dummy values
b.copy_from(builder)
This is to restore a RingBuilder that has had its b.to_dict()
previously saved.
"""
if hasattr(builder, 'devs'):
self.part_power = builder.part_power
self.replicas = builder.replicas
self.min_part_hours = builder.min_part_hours
self.parts = builder.parts
self.devs = builder.devs
self.devs_changed = builder.devs_changed
self.version = builder.version
self._replica2part2dev = builder._replica2part2dev
self._last_part_moves_epoch = builder._last_part_moves_epoch
self._last_part_moves = builder._last_part_moves
self._last_part_gather_start = builder._last_part_gather_start
self._remove_devs = builder._remove_devs
else:
self.part_power = builder['part_power']
self.replicas = builder['replicas']
self.min_part_hours = builder['min_part_hours']
self.parts = builder['parts']
self.devs = builder['devs']
self.devs_changed = builder['devs_changed']
self.version = builder['version']
self._replica2part2dev = builder['_replica2part2dev']
self._last_part_moves_epoch = builder['_last_part_moves_epoch']
self._last_part_moves = builder['_last_part_moves']
self._last_part_gather_start = builder['_last_part_gather_start']
self._remove_devs = builder['_remove_devs']
self._ring = None
# Old builders may not have a region defined for their devices, in
# which case we default it to 1.
for dev in self._iter_devs():
dev.setdefault("region", 1)
def to_dict(self):
"""
Returns a dict that can be used later with copy_from to
restore a RingBuilder. swift-ring-builder uses this to
pickle.dump the dict to a file and later load that dict into
copy_from.
"""
return {'part_power': self.part_power,
'replicas': self.replicas,
'min_part_hours': self.min_part_hours,
'parts': self.parts,
'devs': self.devs,
'devs_changed': self.devs_changed,
'version': self.version,
'_replica2part2dev': self._replica2part2dev,
'_last_part_moves_epoch': self._last_part_moves_epoch,
'_last_part_moves': self._last_part_moves,
'_last_part_gather_start': self._last_part_gather_start,
'_remove_devs': self._remove_devs}
def change_min_part_hours(self, min_part_hours):
"""
Changes the value used to decide if a given partition can be moved
again. This restriction is to give the overall system enough time to
settle a partition to its new location before moving it to yet another
location. While no data would be lost if a partition is moved several
times quickly, it could make that data unreachable for a short period
of time.
This should be set to at least the average full partition replication
time. Starting it at 24 hours and then lowering it to what the
replicator reports as the longest partition cycle is best.
:param min_part_hours: new value for min_part_hours
"""
self.min_part_hours = min_part_hours
def set_replicas(self, new_replica_count):
"""
Changes the number of replicas in this ring.
If the new replica count is sufficiently different that
self._replica2part2dev will change size, sets
self.devs_changed. This is so tools like
bin/swift-ring-builder can know to write out the new ring
rather than bailing out due to lack of balance change.
"""
old_slots_used = int(self.parts * self.replicas)
new_slots_used = int(self.parts * new_replica_count)
if old_slots_used != new_slots_used:
self.devs_changed = True
self.replicas = new_replica_count
def get_ring(self):
"""
Get the ring, or more specifically, the swift.common.ring.RingData.
This ring data is the minimum required for use of the ring. The ring
builder itself keeps additional data such as when partitions were last
moved.
"""
# We cache the self._ring value so multiple requests for it don't build
# it multiple times. Be sure to set self._ring = None whenever the ring
# will need to be rebuilt.
if not self._ring:
# Make devs list (with holes for deleted devices) and not including
# builder-specific extra attributes.
devs = [None] * len(self.devs)
for dev in self._iter_devs():
devs[dev['id']] = dict((k, v) for k, v in dev.items()
if k not in ('parts', 'parts_wanted'))
# Copy over the replica+partition->device assignments, the device
# information, and the part_shift value (the number of bits to
# shift an unsigned int >I right to obtain the partition for the
# int).
if not self._replica2part2dev:
self._ring = RingData([], devs, 32 - self.part_power)
else:
self._ring = \
RingData([array('H', p2d) for p2d in
self._replica2part2dev],
devs, 32 - self.part_power)
return self._ring
def add_dev(self, dev):
"""
Add a device to the ring. This device dict should have a minimum of the
following keys:
====== ===============================================================
id unique integer identifier amongst devices. Defaults to the next
id if the 'id' key is not provided in the dict
weight a float of the relative weight of this device as compared to
others; this indicates how many partitions the builder will try
to assign to this device
region integer indicating which region the device is in
zone integer indicating which zone the device is in; a given
partition will not be assigned to multiple devices within the
same (region, zone) pair if there is any alternative
ip the ip address of the device
port the tcp port of the device
device the device's name on disk (sdb1, for example)
meta general use 'extra' field; for example: the online date, the
hardware description
====== ===============================================================
.. note::
This will not rebalance the ring immediately as you may want to
make multiple changes for a single rebalance.
:param dev: device dict
:returns: id of device
"""
if 'id' not in dev:
dev['id'] = 0
if self.devs:
dev['id'] = max(d['id'] for d in self.devs if d) + 1
if dev['id'] < len(self.devs) and self.devs[dev['id']] is not None:
raise exceptions.DuplicateDeviceError(
'Duplicate device id: %d' % dev['id'])
# Add holes to self.devs to ensure self.devs[dev['id']] will be the dev
while dev['id'] >= len(self.devs):
self.devs.append(None)
dev['weight'] = float(dev['weight'])
dev['parts'] = 0
self.devs[dev['id']] = dev
self._set_parts_wanted()
self.devs_changed = True
self.version += 1
return dev['id']
def set_dev_weight(self, dev_id, weight):
"""
Set the weight of a device. This should be called rather than just
altering the weight key in the device dict directly, as the builder
will need to rebuild some internal state to reflect the change.
.. note::
This will not rebalance the ring immediately as you may want to
make multiple changes for a single rebalance.
:param dev_id: device id
:param weight: new weight for device
"""
self.devs[dev_id]['weight'] = weight
self._set_parts_wanted()
self.devs_changed = True
self.version += 1
def remove_dev(self, dev_id):
"""
Remove a device from the ring.
.. note::
This will not rebalance the ring immediately as you may want to
make multiple changes for a single rebalance.
:param dev_id: device id
"""
dev = self.devs[dev_id]
dev['weight'] = 0
self._remove_devs.append(dev)
self._set_parts_wanted()
self.devs_changed = True
self.version += 1
def rebalance(self, seed=None):
"""
Rebalance the ring.
This is the main work function of the builder, as it will assign and
reassign partitions to devices in the ring based on weights, distinct
zones, recent reassignments, etc.
The process doesn't always perfectly assign partitions (that'd take a
lot more analysis and therefore a lot more time -- I had code that did
that before). Because of this, it keeps rebalancing until the device
skew (number of partitions a device wants compared to what it has) gets
below 1% or doesn't change by more than 1% (only happens with ring that
can't be balanced no matter what -- like with 3 zones of differing
weights with replicas set to 3).
:returns: (number_of_partitions_altered, resulting_balance)
"""
if seed:
random.seed(seed)
self._ring = None
if self._last_part_moves_epoch is None:
self._initial_balance()
self.devs_changed = False
return self.parts, self.get_balance()
retval = 0
self._update_last_part_moves()
last_balance = 0
new_parts, removed_part_count = self._adjust_replica2part2dev_size()
retval += removed_part_count
self._reassign_parts(new_parts)
retval += len(new_parts)
while True:
reassign_parts = self._gather_reassign_parts()
self._reassign_parts(reassign_parts)
retval += len(reassign_parts)
while self._remove_devs:
self.devs[self._remove_devs.pop()['id']] = None
balance = self.get_balance()
if balance < 1 or abs(last_balance - balance) < 1 or \
retval == self.parts:
break
last_balance = balance
self.devs_changed = False
self.version += 1
return retval, balance
def validate(self, stats=False):
"""
Validate the ring.
This is a safety function to try to catch any bugs in the building
process. It ensures partitions have been assigned to real devices,
aren't doubly assigned, etc. It can also optionally check the even
distribution of partitions across devices.
:param stats: if True, check distribution of partitions across devices
:returns: if stats is True, a tuple of (device_usage, worst_stat), else
(None, None). device_usage[dev_id] will equal the number of
partitions assigned to that device. worst_stat will equal the
number of partitions the worst device is skewed from the
number it should have.
:raises RingValidationError: problem was found with the ring.
"""
# "len" showed up in profiling, so it's just computed once.
dev_len = len(self.devs)
parts_on_devs = sum(d['parts'] for d in self._iter_devs())
if not self._replica2part2dev:
raise exceptions.RingValidationError(
'_replica2part2dev empty; did you forget to rebalance?')
parts_in_map = sum(len(p2d) for p2d in self._replica2part2dev)
if parts_on_devs != parts_in_map:
raise exceptions.RingValidationError(
'All partitions are not double accounted for: %d != %d' %
(parts_on_devs, parts_in_map))
if stats:
# dev_usage[dev_id] will equal the number of partitions assigned to
# that device.
dev_usage = array('I', (0 for _junk in xrange(dev_len)))
for part2dev in self._replica2part2dev:
for dev_id in part2dev:
dev_usage[dev_id] += 1
for part, replica in self._each_part_replica():
dev_id = self._replica2part2dev[replica][part]
if dev_id >= dev_len or not self.devs[dev_id]:
raise exceptions.RingValidationError(
"Partition %d, replica %d was not allocated "
"to a device." %
(part, replica))
for dev in self._iter_devs():
if not isinstance(dev['port'], int):
raise exceptions.RingValidationError(
"Device %d has port %r, which is not an integer." %
(dev['id'], dev['port']))
if stats:
weight_of_one_part = self.weight_of_one_part()
worst = 0
for dev in self._iter_devs():
if not dev['weight']:
if dev_usage[dev['id']]:
# If a device has no weight, but has partitions, then
# its overage is considered "infinity" and therefore
# always the worst possible. We show MAX_BALANCE for
# convenience.
worst = MAX_BALANCE
break
continue
skew = abs(100.0 * dev_usage[dev['id']] /
(dev['weight'] * weight_of_one_part) - 100.0)
if skew > worst:
worst = skew
return dev_usage, worst
return None, None
def get_balance(self):
"""
Get the balance of the ring. The balance value is the highest
percentage off the desired amount of partitions a given device
wants. For instance, if the "worst" device wants (based on its
weight relative to the sum of all the devices' weights) 123
partitions and it has 124 partitions, the balance value would
be 0.83 (1 extra / 123 wanted * 100 for percentage).
:returns: balance of the ring
"""
balance = 0
weight_of_one_part = self.weight_of_one_part()
for dev in self._iter_devs():
if not dev['weight']:
if dev['parts']:
# If a device has no weight, but has partitions, then its
# overage is considered "infinity" and therefore always the
# worst possible. We show MAX_BALANCE for convenience.
balance = MAX_BALANCE
break
continue
dev_balance = abs(100.0 * dev['parts'] /
(dev['weight'] * weight_of_one_part) - 100.0)
if dev_balance > balance:
balance = dev_balance
return balance
def pretend_min_part_hours_passed(self):
"""
Override min_part_hours by marking all partitions as having been moved
255 hours ago. This can be used to force a full rebalance on the next
call to rebalance.
"""
for part in xrange(self.parts):
self._last_part_moves[part] = 0xff
def get_part_devices(self, part):
"""
Get the devices that are responsible for the partition,
filtering out duplicates.
:param part: partition to get devices for
:returns: list of device dicts
"""
devices = []
for dev in self._devs_for_part(part):
if dev not in devices:
devices.append(dev)
return devices
def _iter_devs(self):
"""
Returns an iterator all the non-None devices in the ring. Note that
this means list(b._iter_devs())[some_id] may not equal b.devs[some_id];
you will have to check the 'id' key of each device to obtain its
dev_id.
"""
for dev in self.devs:
if dev is not None:
yield dev
def _set_parts_wanted(self):
"""
Sets the parts_wanted key for each of the devices to the number of
partitions the device wants based on its relative weight. This key is
used to sort the devices according to "most wanted" during rebalancing
to best distribute partitions. A negative parts_wanted indicates the
device is "overweight" and wishes to give partitions away if possible.
"""
weight_of_one_part = self.weight_of_one_part()
for dev in self._iter_devs():
if not dev['weight']:
# With no weight, that means we wish to "drain" the device. So
# we set the parts_wanted to a really large negative number to
# indicate its strong desire to give up everything it has.
dev['parts_wanted'] = -self.parts * self.replicas
else:
dev['parts_wanted'] = \
int(weight_of_one_part * dev['weight']) - dev['parts']
def _adjust_replica2part2dev_size(self):
"""
Make sure that the lengths of the arrays in _replica2part2dev
are correct for the current value of self.replicas.
Example:
self.part_power = 8
self.replicas = 2.25
self._replica2part2dev will contain 3 arrays: the first 2 of
length 256 (2**8), and the last of length 64 (0.25 * 2**8).
Returns a 2-tuple: the first element is a list of (partition,
replicas) tuples indicating which replicas need to be
(re)assigned to devices, and the second element is a count of
how many replicas were removed.
"""
removed_replicas = 0
fractional_replicas, whole_replicas = math.modf(self.replicas)
whole_replicas = int(whole_replicas)
desired_lengths = [self.parts] * whole_replicas
if fractional_replicas:
desired_lengths.append(int(self.parts * fractional_replicas))
to_assign = defaultdict(list)
if self._replica2part2dev is not None:
# If we crossed an integer threshold (say, 4.1 --> 4),
# we'll have a partial extra replica clinging on here. Clean
# up any such extra stuff.
for part2dev in self._replica2part2dev[len(desired_lengths):]:
for dev_id in part2dev:
dev_losing_part = self.devs[dev_id]
dev_losing_part['parts'] -= 1
removed_replicas += 1
self._replica2part2dev = \
self._replica2part2dev[:len(desired_lengths)]
else:
self._replica2part2dev = []
for replica, desired_length in enumerate(desired_lengths):
if replica < len(self._replica2part2dev):
part2dev = self._replica2part2dev[replica]
if len(part2dev) < desired_length:
# Not long enough: needs to be extended and the
# newly-added pieces assigned to devices.
for part in xrange(len(part2dev), desired_length):
to_assign[part].append(replica)
part2dev.append(0)
elif len(part2dev) > desired_length:
# Too long: truncate this mapping.
for part in xrange(desired_length, len(part2dev)):
dev_losing_part = self.devs[part2dev[part]]
dev_losing_part['parts'] -= 1
removed_replicas += 1
self._replica2part2dev[replica] = part2dev[:desired_length]
else:
# Mapping not present at all: make one up and assign
# all of it.
for part in xrange(desired_length):
to_assign[part].append(replica)
self._replica2part2dev.append(
array('H', (0 for _junk in xrange(desired_length))))
return (list(to_assign.iteritems()), removed_replicas)
def _initial_balance(self):
"""
Initial partition assignment is the same as rebalancing an
existing ring, but with some initial setup beforehand.
"""
self._last_part_moves = array('B', (0 for _junk in xrange(self.parts)))
self._last_part_moves_epoch = int(time())
self._reassign_parts(self._adjust_replica2part2dev_size()[0])
def _update_last_part_moves(self):
"""
Updates how many hours ago each partition was moved based on the
current time. The builder won't move a partition that has been moved
more recently than min_part_hours.
"""
elapsed_hours = int(time() - self._last_part_moves_epoch) / 3600
for part in xrange(self.parts):
# The "min(self._last_part_moves[part] + elapsed_hours, 0xff)"
# which was here showed up in profiling, so it got inlined.
last_plus_elapsed = self._last_part_moves[part] + elapsed_hours
if last_plus_elapsed < 0xff:
self._last_part_moves[part] = last_plus_elapsed
else:
self._last_part_moves[part] = 0xff
self._last_part_moves_epoch = int(time())
def _gather_reassign_parts(self):
"""
Returns a list of (partition, replicas) pairs to be reassigned by
gathering from removed devices, insufficiently-far-apart replicas, and
overweight drives.
"""
# inline memoization of tiers_for_dev() results (profiling reveals it
# as a hot-spot).
tfd = {}
# First we gather partitions from removed devices. Since removed
# devices usually indicate device failures, we have no choice but to
# reassign these partitions. However, we mark them as moved so later
# choices will skip other replicas of the same partition if possible.
removed_dev_parts = defaultdict(list)
if self._remove_devs:
dev_ids = [d['id'] for d in self._remove_devs if d['parts']]
if dev_ids:
for part, replica in self._each_part_replica():
dev_id = self._replica2part2dev[replica][part]
if dev_id in dev_ids:
self._last_part_moves[part] = 0
removed_dev_parts[part].append(replica)
# Now we gather partitions that are "at risk" because they aren't
# currently sufficient spread out across the cluster.
spread_out_parts = defaultdict(list)
max_allowed_replicas = self._build_max_replicas_by_tier()
for part in xrange(self.parts):
# Only move one replica at a time if possible.
if part in removed_dev_parts:
continue
# First, add up the count of replicas at each tier for each
# partition.
# replicas_at_tier was a "lambda: 0" defaultdict, but profiling
# revealed the lambda invocation as a significant cost.
replicas_at_tier = {}
for dev in self._devs_for_part(part):
if dev['id'] not in tfd:
tfd[dev['id']] = tiers_for_dev(dev)
for tier in tfd[dev['id']]:
if tier not in replicas_at_tier:
replicas_at_tier[tier] = 1
else:
replicas_at_tier[tier] += 1
# Now, look for partitions not yet spread out enough and not
# recently moved.
for replica in self._replicas_for_part(part):
dev = self.devs[self._replica2part2dev[replica][part]]
removed_replica = False
if dev['id'] not in tfd:
tfd[dev['id']] = tiers_for_dev(dev)
for tier in tfd[dev['id']]:
rep_at_tier = 0
if tier in replicas_at_tier:
rep_at_tier = replicas_at_tier[tier]
if (rep_at_tier > max_allowed_replicas[tier] and
self._last_part_moves[part] >=
self.min_part_hours):
self._last_part_moves[part] = 0
spread_out_parts[part].append(replica)
dev['parts_wanted'] += 1
dev['parts'] -= 1
removed_replica = True
break
if removed_replica:
if dev['id'] not in tfd:
tfd[dev['id']] = tiers_for_dev(dev)
for tier in tfd[dev['id']]:
replicas_at_tier[tier] -= 1
# Last, we gather partitions from devices that are "overweight" because
# they have more partitions than their parts_wanted.
reassign_parts = defaultdict(list)
# We randomly pick a new starting point in the "circular" ring of
# partitions to try to get a better rebalance when called multiple
# times.
start = self._last_part_gather_start / 4
start += random.randint(0, self.parts / 2) # GRAH PEP8!!!
self._last_part_gather_start = start
for replica, part2dev in enumerate(self._replica2part2dev):
# If we've got a partial replica, start may be out of
# range. Scale it down so that we get a similar movement
# pattern (but scaled down) on sequential runs.
this_start = int(float(start) * len(part2dev) / self.parts)
for part in itertools.chain(xrange(this_start, len(part2dev)),
xrange(0, this_start)):
if self._last_part_moves[part] < self.min_part_hours:
continue
if part in removed_dev_parts or part in spread_out_parts:
continue
dev = self.devs[part2dev[part]]
if dev['parts_wanted'] < 0:
self._last_part_moves[part] = 0
dev['parts_wanted'] += 1
dev['parts'] -= 1
reassign_parts[part].append(replica)
reassign_parts.update(spread_out_parts)
reassign_parts.update(removed_dev_parts)
reassign_parts_list = list(reassign_parts.iteritems())
# We shuffle the partitions to reassign so we get a more even
# distribution later. There has been discussion of trying to distribute
# partitions more "regularly" because that would actually reduce risk
# but 1) it is really difficult to do this with uneven clusters and 2)
# it would concentrate load during failure recovery scenarios
# (increasing risk). The "right" answer has yet to be debated to
# conclusion, but working code wins for now.
random.shuffle(reassign_parts_list)
return reassign_parts_list
def _reassign_parts(self, reassign_parts):
"""
For an existing ring data set, partitions are reassigned similarly to
the initial assignment. The devices are ordered by how many partitions
they still want and kept in that order throughout the process. The
gathered partitions are iterated through, assigning them to devices
according to the "most wanted" while keeping the replicas as "far
apart" as possible. Two different regions are considered the
farthest-apart things, followed by zones, then different ip/port pairs
within a zone; the least-far-apart things are different devices with
the same ip/port pair in the same zone.
If you want more replicas than devices, you won't get all your
replicas.
:param reassign_parts: An iterable of (part, replicas_to_replace)
pairs. replicas_to_replace is an iterable of the
replica (an int) to replace for that partition.
replicas_to_replace may be shared for multiple
partitions, so be sure you do not modify it.
"""
for dev in self._iter_devs():
dev['sort_key'] = self._sort_key_for(dev)
available_devs = \
sorted((d for d in self._iter_devs() if d['weight']),
key=lambda x: x['sort_key'])
tier2devs = defaultdict(list)
tier2sort_key = defaultdict(list)
max_tier_depth = 0
for dev in available_devs:
for tier in tiers_for_dev(dev):
tier2devs[tier].append(dev) # <-- starts out sorted!
tier2sort_key[tier].append(dev['sort_key'])
if len(tier) > max_tier_depth:
max_tier_depth = len(tier)
tier2children_sets = build_tier_tree(available_devs)
tier2children = defaultdict(list)
tier2children_sort_key = {}
tiers_list = [()]
depth = 1
while depth <= max_tier_depth:
new_tiers_list = []
for tier in tiers_list:
child_tiers = list(tier2children_sets[tier])
child_tiers.sort(key=lambda t: tier2sort_key[t][-1])
tier2children[tier] = child_tiers
tier2children_sort_key[tier] = map(
lambda t: tier2sort_key[t][-1], child_tiers)
new_tiers_list.extend(child_tiers)
tiers_list = new_tiers_list
depth += 1
for part, replace_replicas in reassign_parts:
# Gather up what other tiers (regions, zones, ip/ports, and
# devices) the replicas not-to-be-moved are in for this part.
other_replicas = defaultdict(int)
unique_tiers_by_tier_len = defaultdict(set)
for replica in self._replicas_for_part(part):
if replica not in replace_replicas:
dev = self.devs[self._replica2part2dev[replica][part]]
for tier in tiers_for_dev(dev):
other_replicas[tier] += 1
unique_tiers_by_tier_len[len(tier)].add(tier)
for replica in replace_replicas:
tier = ()
depth = 1
while depth <= max_tier_depth:
# Order the tiers by how many replicas of this
# partition they already have. Then, of the ones
# with the smallest number of replicas, pick the
# tier with the hungriest drive and then continue
# searching in that subtree.
#
# There are other strategies we could use here,
# such as hungriest-tier (i.e. biggest
# sum-of-parts-wanted) or picking one at random.
# However, hungriest-drive is what was used here
# before, and it worked pretty well in practice.
#
# Note that this allocator will balance things as
# evenly as possible at each level of the device
# layout. If your layout is extremely unbalanced,
# this may produce poor results.
#
# This used to be a cute, recursive function, but it's been
# unrolled for performance.
candidate_tiers = tier2children[tier]
candidates_with_replicas = \
unique_tiers_by_tier_len[len(tier) + 1]
if len(candidate_tiers) > len(candidates_with_replicas):
# There exists at least one tier with 0 other replicas,
# so work backward among the candidates, accepting the
# first which isn't in other_replicas.
#
# This optimization is to avoid calling the min()
# below, which is expensive if you've got thousands of
# drives.
for t in reversed(candidate_tiers):
if other_replicas[t] == 0:
tier = t
break
else:
min_count = min(other_replicas[t]
for t in candidate_tiers)
tier = (t for t in reversed(candidate_tiers)
if other_replicas[t] == min_count).next()
depth += 1
dev = tier2devs[tier][-1]
dev['parts_wanted'] -= 1
dev['parts'] += 1
old_sort_key = dev['sort_key']
new_sort_key = dev['sort_key'] = self._sort_key_for(dev)
for tier in tiers_for_dev(dev):
other_replicas[tier] += 1
unique_tiers_by_tier_len[len(tier)].add(tier)
index = bisect.bisect_left(tier2sort_key[tier],
old_sort_key)
tier2devs[tier].pop(index)
tier2sort_key[tier].pop(index)
new_index = bisect.bisect_left(tier2sort_key[tier],
new_sort_key)
tier2devs[tier].insert(new_index, dev)
tier2sort_key[tier].insert(new_index, new_sort_key)
# Now jiggle tier2children values to keep them sorted
new_last_sort_key = tier2sort_key[tier][-1]
parent_tier = tier[0:-1]
index = bisect.bisect_left(
tier2children_sort_key[parent_tier],
old_sort_key)
popped = tier2children[parent_tier].pop(index)
tier2children_sort_key[parent_tier].pop(index)
new_index = bisect.bisect_left(
tier2children_sort_key[parent_tier],
new_last_sort_key)
tier2children[parent_tier].insert(new_index, popped)
tier2children_sort_key[parent_tier].insert(
new_index, new_last_sort_key)
self._replica2part2dev[replica][part] = dev['id']
# Just to save memory and keep from accidental reuse.
for dev in self._iter_devs():
del dev['sort_key']
def _sort_key_for(self, dev):
# The maximum value of self.parts is 2^32, which is 9 hex
# digits wide (0x100000000). Using a width of 16 here gives us
# plenty of breathing room; you'd need more than 2^28 replicas
# to overflow it.
# Since the sort key is a string and therefore an ascii sort applies,
# the maximum_parts_wanted + parts_wanted is used so negative
# parts_wanted end up sorted above positive parts_wanted.
return '%016x.%04x.%04x' % (
(self.parts * self.replicas) + dev['parts_wanted'],
random.randint(0, 0xFFFF),
dev['id'])
def _build_max_replicas_by_tier(self):
"""
Returns a dict of (tier: replica_count) for all tiers in the ring.
There will always be a () entry as the root of the structure, whose
replica_count will equal the ring's replica_count.
Then there will be (dev_id,) entries for each device, indicating the
maximum number of replicas the device might have for any given
partition. Anything greater than 1 indicates a partition at serious
risk, as the data on that partition will not be stored distinctly at
the ring's replica_count.
Next there will be (dev_id, ip_port) entries for each device,
indicating the maximum number of replicas the device shares with other
devices on the same ip_port for any given partition. Anything greater
than 1 indicates a partition at elevated risk, as if that ip_port were
to fail multiple replicas of that partition would be unreachable.
Last there will be (dev_id, ip_port, zone) entries for each device,
indicating the maximum number of replicas the device shares with other
devices within the same zone for any given partition. Anything greater
than 1 indicates a partition at slightly elevated risk, as if that zone
were to fail multiple replicas of that partition would be unreachable.
Example return dict for the common SAIO setup::
{(): 3,
(1,): 1.0,
(1, '127.0.0.1:6010'): 1.0,
(1, '127.0.0.1:6010', 0): 1.0,
(2,): 1.0,
(2, '127.0.0.1:6020'): 1.0,
(2, '127.0.0.1:6020', 1): 1.0,
(3,): 1.0,
(3, '127.0.0.1:6030'): 1.0,
(3, '127.0.0.1:6030', 2): 1.0,
(4,): 1.0,
(4, '127.0.0.1:6040'): 1.0,
(4, '127.0.0.1:6040', 3): 1.0}
"""
# Used by walk_tree to know what entries to create for each recursive
# call.
tier2children = build_tier_tree(self._iter_devs())
def walk_tree(tier, replica_count):
mr = {tier: replica_count}
if tier in tier2children:
subtiers = tier2children[tier]
for subtier in subtiers:
submax = math.ceil(float(replica_count) / len(subtiers))
mr.update(walk_tree(subtier, submax))
return mr
return walk_tree((), self.replicas)
def _devs_for_part(self, part):
"""
Returns a list of devices for a specified partition.
Deliberately includes duplicates.
"""
if self._replica2part2dev is None:
return []
return [self.devs[part2dev[part]]
for part2dev in self._replica2part2dev
if part < len(part2dev)]
def _replicas_for_part(self, part):
"""
Returns a list of replicas for a specified partition.
These can be used as indices into self._replica2part2dev
without worrying about IndexErrors.
"""
return [replica for replica, part2dev
in enumerate(self._replica2part2dev)
if part < len(part2dev)]
def _each_part_replica(self):
"""
Generator yielding every (partition, replica) pair in the ring.
"""
for replica, part2dev in enumerate(self._replica2part2dev):
for part in xrange(len(part2dev)):
yield (part, replica)
@classmethod
def load(cls, builder_file, open=open):
"""
Obtain RingBuilder instance of the provided builder file
:param builder_file: path to builder file to load
:return: RingBuilder instance
"""
builder = pickle.load(open(builder_file, 'rb'))
if not hasattr(builder, 'devs'):
builder_dict = builder
builder = RingBuilder(1, 1, 1)
builder.copy_from(builder_dict)
for dev in builder.devs:
#really old rings didn't have meta keys
if dev and 'meta' not in dev:
dev['meta'] = ''
# NOTE(akscram): An old ring builder file don't contain
# replication parameters.
if dev:
if 'ip' in dev:
dev.setdefault('replication_ip', dev['ip'])
if 'port' in dev:
dev.setdefault('replication_port', dev['port'])
return builder
def save(self, builder_file):
"""Serialize this RingBuilder instance to disk.
:param builder_file: path to builder file to save
"""
with open(builder_file, 'wb') as f:
pickle.dump(self.to_dict(), f, protocol=2)
def search_devs(self, search_values):
"""Search devices by parameters.
:param search_values: a dictionary with search values to filter
devices, supported parameters are id,
region, zone, ip, port, replication_ip,
replication_port, device, weight, meta
:returns: list of device dicts
"""
matched_devs = []
for dev in self.devs:
if not dev:
continue
matched = True
for key in ('id', 'region', 'zone', 'ip', 'port', 'replication_ip',
'replication_port', 'device', 'weight', 'meta'):
if key in search_values:
value = search_values.get(key)
if value is not None:
if key == 'meta':
if value not in dev.get(key):
matched = False
elif dev.get(key) != value:
matched = False
if matched:
matched_devs.append(dev)
return matched_devs
| apache-2.0 | 6,192,969,205,520,987,000 | 42.89675 | 79 | 0.560001 | false |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/PyChart-1.39/demos/rangetest.py | 6 | 1404 | #
# Copyright (C) 2000-2005 by Yasushi Saito ([email protected])
#
# Pychart is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2, or (at your option) any
# later version.
#
# Pychart is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
from pychart import *
data = [ (0, 10, 30, 40, 60), (10, 20, 40, 50, 55), (20, 10, 35, 38, 43),
(30, 8, 30, 35, 39), (40, 8, 20, 28, 39) ]
ar = area.T(x_axis = axis.X(label="X axis"),
y_grid_interval = 10, y_grid_style = line_style.white,
y_axis = axis.Y(label="Y axis"),
y_grid_over_plot=1, legend = legend.T())
if theme.use_color:
colors = [ fill_style.darkseagreen, fill_style.white, fill_style.brown ]
else:
colors = [ fill_style.gray90, fill_style.white, fill_style.gray50 ]
ar.add_plot(range_plot.T(label="foo", data=data, fill_style = colors[0]))
ar.add_plot(range_plot.T(label="bar", data=data, min_col=2, max_col=3,
fill_style = colors[1]))
ar.add_plot(range_plot.T(label="baz", data=data, min_col=3, max_col=4,
fill_style = colors[2]))
ar.draw()
| mit | 2,242,719,558,959,210,200 | 41.545455 | 76 | 0.646011 | false |
bcl/anaconda | pyanaconda/iutil.py | 1 | 47063 | #
# iutil.py - generic install utility functions
#
# Copyright (C) 1999-2014
# Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author(s): Erik Troan <[email protected]>
#
import glob
import os
import stat
import os.path
import errno
import subprocess
import unicodedata
# Used for ascii_lowercase, ascii_uppercase constants
import string # pylint: disable=deprecated-module
import tempfile
import re
from urllib.parse import quote, unquote
import gettext
import signal
import sys
import requests
from requests_file import FileAdapter
from requests_ftp import FTPAdapter
from gi.repository import GLib
from pyanaconda.flags import flags
from pyanaconda.constants import DRACUT_SHUTDOWN_EJECT, TRANSLATIONS_UPDATE_DIR, UNSUPPORTED_HW
from pyanaconda.regexes import URL_PARSE
from pyanaconda.i18n import _
import logging
log = logging.getLogger("anaconda")
program_log = logging.getLogger("program")
from pyanaconda.anaconda_log import program_log_lock
_child_env = {}
def setenv(name, value):
""" Set an environment variable to be used by child processes.
This method does not modify os.environ for the running process, which
is not thread-safe. If setenv has already been called for a particular
variable name, the old value is overwritten.
:param str name: The name of the environment variable
:param str value: The value of the environment variable
"""
_child_env[name] = value
def augmentEnv():
env = os.environ.copy()
env.update({"ANA_INSTALL_PATH": getSysroot()})
env.update(_child_env)
return env
_root_path = "/mnt/sysimage"
def getTargetPhysicalRoot():
"""Returns the path to the "physical" storage root, traditionally /mnt/sysimage.
This may be distinct from the sysroot, which could be a
chroot-type subdirectory of the physical root. This is used for
example by all OSTree-based installations.
"""
# We always use the traditional /mnt/sysimage - the physical OS
# target is never mounted anywhere else. This API call just
# allows us to have a clean "git grep ROOT_PATH" in other parts of
# the code.
return _root_path
def setTargetPhysicalRoot(path):
"""Change the physical root path
:param string path: Path to use instead of /mnt/sysimage/
"""
global _root_path
_root_path = path
_sysroot = _root_path
def getSysroot():
"""Returns the path to the target OS installation.
For ordinary package-based installations, this is the same as the
target root.
"""
return _sysroot
def setSysroot(path):
"""Change the OS root path.
:param path: The new OS root path
This should only be used by Payload subclasses which install operating
systems to non-default roots.
"""
global _sysroot
_sysroot = path
def startProgram(argv, root='/', stdin=None, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
env_prune=None, env_add=None, reset_handlers=True, reset_lang=True, **kwargs):
""" Start an external program and return the Popen object.
The root and reset_handlers arguments are handled by passing a
preexec_fn argument to subprocess.Popen, but an additional preexec_fn
can still be specified and will be run. The user preexec_fn will be run
last.
:param argv: The command to run and argument
:param root: The directory to chroot to before running command.
:param stdin: The file object to read stdin from.
:param stdout: The file object to write stdout to.
:param stderr: The file object to write stderr to.
:param env_prune: environment variables to remove before execution
:param env_add: environment variables to add before execution
:param reset_handlers: whether to reset to SIG_DFL any signal handlers set to SIG_IGN
:param reset_lang: whether to set the locale of the child process to C
:param kwargs: Additional parameters to pass to subprocess.Popen
:return: A Popen object for the running command.
"""
if env_prune is None:
env_prune = []
# Transparently redirect callers requesting root=_root_path to the
# configured system root.
target_root = root
if target_root == _root_path:
target_root = getSysroot()
# Check for and save a preexec_fn argument
preexec_fn = kwargs.pop("preexec_fn", None)
# Map reset_handlers to the restore_signals Popen argument.
# restore_signals handles SIGPIPE, and preexec below handles any additional
# signals ignored by anaconda.
restore_signals = reset_handlers
def preexec():
# If a target root was specificed, chroot into it
if target_root and target_root != '/':
os.chroot(target_root)
os.chdir("/")
# Signal handlers set to SIG_IGN persist across exec. Reset
# these to SIG_DFL if requested. In particular this will include the
# SIGPIPE handler set by python.
if reset_handlers:
for signum in range(1, signal.NSIG):
if signal.getsignal(signum) == signal.SIG_IGN:
signal.signal(signum, signal.SIG_DFL)
# If the user specified an additional preexec_fn argument, run it
if preexec_fn is not None:
preexec_fn()
with program_log_lock:
program_log.info("Running... %s", " ".join(argv))
env = augmentEnv()
for var in env_prune:
env.pop(var, None)
if reset_lang:
env.update({"LC_ALL": "C"})
if env_add:
env.update(env_add)
return subprocess.Popen(argv,
stdin=stdin,
stdout=stdout,
stderr=stderr,
close_fds=True,
restore_signals=restore_signals,
preexec_fn=preexec, cwd=root, env=env, **kwargs)
def startX(argv, output_redirect=None):
""" Start X and return once X is ready to accept connections.
X11, if SIGUSR1 is set to SIG_IGN, will send SIGUSR1 to the parent
process once it is ready to accept client connections. This method
sets that up and waits for the signal or bombs out if nothing happens
for a minute. The process will also be added to the list of watched
processes.
:param argv: The command line to run, as a list
:param output_redirect: file or file descriptor to redirect stdout and stderr to
"""
# Use a list so the value can be modified from the handler function
x11_started = [False]
def sigusr1_handler(num, frame):
log.debug("X server has signalled a successful start.")
x11_started[0] = True
# Fail after, let's say a minute, in case something weird happens
# and we don't receive SIGUSR1
def sigalrm_handler(num, frame):
# Check that it didn't make it under the wire
if x11_started[0]:
return
log.error("Timeout trying to start %s", argv[0])
raise ExitError("Timeout trying to start %s" % argv[0])
# preexec_fn to add the SIGUSR1 handler in the child
def sigusr1_preexec():
signal.signal(signal.SIGUSR1, signal.SIG_IGN)
try:
old_sigusr1_handler = signal.signal(signal.SIGUSR1, sigusr1_handler)
old_sigalrm_handler = signal.signal(signal.SIGALRM, sigalrm_handler)
# Start the timer
signal.alarm(60)
childproc = startProgram(argv, stdout=output_redirect, stderr=output_redirect,
preexec_fn=sigusr1_preexec)
watchProcess(childproc, argv[0])
# Wait for SIGUSR1
while not x11_started[0]:
signal.pause()
finally:
# Put everything back where it was
signal.alarm(0)
signal.signal(signal.SIGUSR1, old_sigusr1_handler)
signal.signal(signal.SIGALRM, old_sigalrm_handler)
def _run_program(argv, root='/', stdin=None, stdout=None, env_prune=None, log_output=True,
binary_output=False, filter_stderr=False):
""" Run an external program, log the output and return it to the caller
NOTE/WARNING: UnicodeDecodeError will be raised if the output of the of the
external command can't be decoded as UTF-8.
:param argv: The command to run and argument
:param root: The directory to chroot to before running command.
:param stdin: The file object to read stdin from.
:param stdout: Optional file object to write the output to.
:param env_prune: environment variable to remove before execution
:param log_output: whether to log the output of command
:param binary_output: whether to treat the output of command as binary data
:param filter_stderr: whether to exclude the contents of stderr from the returned output
:return: The return code of the command and the output
"""
try:
if filter_stderr:
stderr = subprocess.PIPE
else:
stderr = subprocess.STDOUT
proc = startProgram(argv, root=root, stdin=stdin, stdout=subprocess.PIPE, stderr=stderr,
env_prune=env_prune)
(output_string, err_string) = proc.communicate()
if output_string:
if binary_output:
output_lines = [output_string]
else:
output_string = output_string.decode("utf-8")
if output_string[-1] != "\n":
output_string = output_string + "\n"
output_lines = output_string.splitlines(True)
if log_output:
with program_log_lock:
if binary_output:
# try to decode as utf-8 and replace all undecodable data by
# "safe" printable representations when logging binary output
decoded_output_lines = output_lines.decode("utf-8", "replace")
else:
# output_lines should already be a Unicode string
decoded_output_lines = output_lines
for line in decoded_output_lines:
program_log.info(line.strip())
if stdout:
stdout.write(output_string)
# If stderr was filtered, log it separately
if filter_stderr and err_string and log_output:
# try to decode as utf-8 and replace all undecodable data by
# "safe" printable representations when logging binary output
decoded_err_string = err_string.decode("utf-8", "replace")
err_lines = decoded_err_string.splitlines(True)
with program_log_lock:
for line in err_lines:
program_log.info(line.strip())
except OSError as e:
with program_log_lock:
program_log.error("Error running %s: %s", argv[0], e.strerror)
raise
with program_log_lock:
program_log.debug("Return code: %d", proc.returncode)
return (proc.returncode, output_string)
def execInSysroot(command, argv, stdin=None):
""" Run an external program in the target root.
:param command: The command to run
:param argv: The argument list
:param stdin: The file object to read stdin from.
:return: The return code of the command
"""
return execWithRedirect(command, argv, stdin=stdin, root=getSysroot())
def execWithRedirect(command, argv, stdin=None, stdout=None,
root='/', env_prune=None, log_output=True, binary_output=False):
""" Run an external program and redirect the output to a file.
:param command: The command to run
:param argv: The argument list
:param stdin: The file object to read stdin from.
:param stdout: Optional file object to redirect stdout and stderr to.
:param root: The directory to chroot to before running command.
:param env_prune: environment variable to remove before execution
:param log_output: whether to log the output of command
:param binary_output: whether to treat the output of command as binary data
:return: The return code of the command
"""
if flags.testing:
log.info("not running command because we're testing: %s %s",
command, " ".join(argv))
return 0
argv = [command] + argv
return _run_program(argv, stdin=stdin, stdout=stdout, root=root, env_prune=env_prune,
log_output=log_output, binary_output=binary_output)[0]
def execWithCapture(command, argv, stdin=None, root='/', log_output=True, filter_stderr=False):
""" Run an external program and capture standard out and err.
:param command: The command to run
:param argv: The argument list
:param stdin: The file object to read stdin from.
:param root: The directory to chroot to before running command.
:param log_output: Whether to log the output of command
:param filter_stderr: Whether stderr should be excluded from the returned output
:return: The output of the command
"""
if flags.testing:
log.info("not running command because we're testing: %s %s",
command, " ".join(argv))
return ""
argv = [command] + argv
return _run_program(argv, stdin=stdin, root=root, log_output=log_output,
filter_stderr=filter_stderr)[1]
def execWithCaptureBinary(command, argv, stdin=None, root='/', log_output=False, filter_stderr=False):
""" Run an external program and capture standard out and err as binary data.
The binary data output is not logged by default but logging can be enabled.
:param command: The command to run
:param argv: The argument list
:param stdin: The file object to read stdin from.
:param root: The directory to chroot to before running command.
:param log_output: Whether to log the binary output of the command
:param filter_stderr: Whether stderr should be excluded from the returned output
:return: The output of the command
"""
if flags.testing:
log.info("not running command because we're testing: %s %s",
command, " ".join(argv))
return ""
argv = [command] + argv
return _run_program(argv, stdin=stdin, root=root, log_output=log_output,
filter_stderr=filter_stderr, binary_output=True)[1]
def execReadlines(command, argv, stdin=None, root='/', env_prune=None, filter_stderr=False):
""" Execute an external command and return the line output of the command
in real-time.
This method assumes that there is a reasonably low delay between the
end of output and the process exiting. If the child process closes
stdout and then keeps on truckin' there will be problems.
NOTE/WARNING: UnicodeDecodeError will be raised if the output of the
external command can't be decoded as UTF-8.
:param command: The command to run
:param argv: The argument list
:param stdin: The file object to read stdin from.
:param stdout: Optional file object to redirect stdout and stderr to.
:param root: The directory to chroot to before running command.
:param env_prune: environment variable to remove before execution
:param filter_stderr: Whether stderr should be excluded from the returned output
Output from the file is not logged to program.log
This returns an iterator with the lines from the command until it has finished
"""
class ExecLineReader(object):
"""Iterator class for returning lines from a process and cleaning
up the process when the output is no longer needed.
"""
def __init__(self, proc, argv):
self._proc = proc
self._argv = argv
def __iter__(self):
return self
def __del__(self):
# See if the process is still running
if self._proc.poll() is None:
# Stop the process and ignore any problems that might arise
try:
self._proc.terminate()
except OSError:
pass
def __next__(self):
# Read the next line, blocking if a line is not yet available
line = self._proc.stdout.readline().decode("utf-8")
if line == '':
# Output finished, wait for the process to end
self._proc.communicate()
# Check for successful exit
if self._proc.returncode < 0:
raise OSError("process '%s' was killed by signal %s" %
(self._argv, -self._proc.returncode))
elif self._proc.returncode > 0:
raise OSError("process '%s' exited with status %s" %
(self._argv, self._proc.returncode))
raise StopIteration
return line.strip()
argv = [command] + argv
if filter_stderr:
stderr = subprocess.DEVNULL
else:
stderr = subprocess.STDOUT
try:
proc = startProgram(argv, root=root, stdin=stdin, stderr=stderr, env_prune=env_prune, bufsize=1)
except OSError as e:
with program_log_lock:
program_log.error("Error running %s: %s", argv[0], e.strerror)
raise
return ExecLineReader(proc, argv)
## Run a shell.
def execConsole():
try:
proc = startProgram(["/bin/sh"], stdout=None, stderr=None, reset_lang=False)
proc.wait()
except OSError as e:
raise RuntimeError("Error running /bin/sh: " + e.strerror)
# Dictionary of processes to watch in the form {pid: [name, GLib event source id], ...}
_forever_pids = {}
# Set to True if process watching is handled by GLib
_watch_process_glib = False
_watch_process_handler_set = False
class ExitError(RuntimeError):
pass
# Raise an error on process exit. The argument is a list of tuples
# of the form [(name, status), ...] with statuses in the subprocess
# format (>=0 is return codes, <0 is signal)
def _raise_exit_error(statuses):
exn_message = []
for proc_name, status in statuses:
if status >= 0:
status_str = "with status %s" % status
else:
status_str = "on signal %s" % -status
exn_message.append("%s exited %s" % (proc_name, status_str))
raise ExitError(", ".join(exn_message))
# Signal handler used with watchProcess
def _sigchld_handler(num=None, frame=None):
# Check whether anything in the list of processes being watched has
# exited. We don't want to call waitpid(-1), since that would break
# anything else using wait/waitpid (like the subprocess module).
exited_pids = []
exit_statuses = []
for child_pid in _forever_pids:
try:
pid_result, status = eintr_retry_call(os.waitpid, child_pid, os.WNOHANG)
except ChildProcessError:
continue
if pid_result:
proc_name = _forever_pids[child_pid][0]
exited_pids.append(child_pid)
# Convert the wait-encoded status to the format used by subprocess
if os.WIFEXITED(status):
sub_status = os.WEXITSTATUS(status)
else:
# subprocess uses negative return codes to indicate signal exit
sub_status = -os.WTERMSIG(status)
exit_statuses.append((proc_name, sub_status))
for child_pid in exited_pids:
if _forever_pids[child_pid][1]:
GLib.source_remove(_forever_pids[child_pid][1])
del _forever_pids[child_pid]
if exit_statuses:
_raise_exit_error(exit_statuses)
# GLib callback used with watchProcess
def _watch_process_cb(pid, status, proc_name):
# Convert the wait-encoded status to the format used by subprocess
if os.WIFEXITED(status):
sub_status = os.WEXITSTATUS(status)
else:
# subprocess uses negative return codes to indicate signal exit
sub_status = -os.WTERMSIG(status)
_raise_exit_error([(proc_name, sub_status)])
def watchProcess(proc, name):
"""Watch for a process exit, and raise a ExitError when it does.
This method installs a SIGCHLD signal handler and thus interferes
the child_watch_add methods in GLib. Use watchProcessGLib to convert
to GLib mode if using a GLib main loop.
Since the SIGCHLD handler calls wait() on the watched process, this call
cannot be combined with Popen.wait() or Popen.communicate, and also
doing so wouldn't make a whole lot of sense.
:param proc: The Popen object for the process
:param name: The name of the process
"""
global _watch_process_handler_set
if not _watch_process_glib and not _watch_process_handler_set:
signal.signal(signal.SIGCHLD, _sigchld_handler)
_watch_process_handler_set = True
# Add the PID to the dictionary
# The second item in the list is for the GLib event source id and will be
# replaced with the id once we have one.
_forever_pids[proc.pid] = [name, None]
# If GLib is watching processes, add a watcher. child_watch_add checks if
# the process has already exited.
if _watch_process_glib:
_forever_pids[proc.id][1] = GLib.child_watch_add(proc.pid, _watch_process_cb, name)
else:
# Check that the process didn't already exit
if proc.poll() is not None:
del _forever_pids[proc.pid]
_raise_exit_error([(name, proc.returncode)])
def watchProcessGLib():
"""Convert process watching to GLib mode.
This allows anaconda modes that use GLib main loops to use
GLib.child_watch_add and continue to watch processes started before the
main loop.
"""
global _watch_process_glib
# The first call to child_watch_add will replace our SIGCHLD handler, and
# child_watch_add checks if the process has already exited before it returns,
# which will handle processes that exit while we're in the loop.
_watch_process_glib = True
for child_pid in _forever_pids:
_forever_pids[child_pid][1] = GLib.child_watch_add(child_pid, _watch_process_cb,
_forever_pids[child_pid])
def unwatchProcess(proc):
"""Unwatch a process watched by watchProcess.
:param proc: The Popen object for the process.
"""
if _forever_pids[proc.pid][1]:
GLib.source_remove(_forever_pids[proc.pid][1])
del _forever_pids[proc.pid]
def unwatchAllProcesses():
"""Clear the watched process list."""
global _forever_pids
for child_pid in _forever_pids:
if _forever_pids[child_pid][1]:
GLib.source_remove(_forever_pids[child_pid][1])
_forever_pids = {}
def getDirSize(directory):
""" Get the size of a directory and all its subdirectories.
:param dir: The name of the directory to find the size of.
:return: The size of the directory in kilobytes.
"""
def getSubdirSize(directory):
# returns size in bytes
try:
mydev = os.lstat(directory)[stat.ST_DEV]
except OSError as e:
log.debug("failed to stat %s: %s", directory, e)
return 0
try:
dirlist = os.listdir(directory)
except OSError as e:
log.debug("failed to listdir %s: %s", directory, e)
return 0
dsize = 0
for f in dirlist:
curpath = '%s/%s' % (directory, f)
try:
sinfo = os.lstat(curpath)
except OSError as e:
log.debug("failed to stat %s/%s: %s", directory, f, e)
continue
if stat.S_ISDIR(sinfo[stat.ST_MODE]):
if os.path.ismount(curpath):
continue
if mydev == sinfo[stat.ST_DEV]:
dsize += getSubdirSize(curpath)
elif stat.S_ISREG(sinfo[stat.ST_MODE]):
dsize += sinfo[stat.ST_SIZE]
return dsize
return getSubdirSize(directory) // 1024
## Create a directory path. Don't fail if the directory already exists.
def mkdirChain(directory):
"""
:param dir: The directory path to create
"""
try:
os.makedirs(directory, 0o755)
except OSError as e:
try:
if e.errno == errno.EEXIST and stat.S_ISDIR(os.stat(directory).st_mode):
return
except OSError:
pass
log.error("could not create directory %s: %s", dir, e.strerror)
def get_active_console(dev="console"):
'''Find the active console device.
Some tty devices (/dev/console, /dev/tty0) aren't actual devices;
they just redirect input and output to the real console device(s).
These 'fake' ttys have an 'active' sysfs attribute, which lists the real
console device(s). (If there's more than one, the *last* one in the list
is the primary console.)
'''
# If there's an 'active' attribute, this is a fake console..
while os.path.exists("/sys/class/tty/%s/active" % dev):
# So read the name of the real, primary console out of the file.
dev = open("/sys/class/tty/%s/active" % dev).read().split()[-1]
return dev
def isConsoleOnVirtualTerminal(dev="console"):
console = get_active_console(dev) # e.g. 'tty1', 'ttyS0', 'hvc1'
consoletype = console.rstrip('0123456789') # remove the number
return consoletype == 'tty'
def reIPL(ipldev):
try:
rc = execWithRedirect("chreipl", ["node", "/dev/" + ipldev])
except RuntimeError as e:
rc = True
log.info("Unable to set reIPL device to %s: %s",
ipldev, e)
if rc:
log.info("reIPL configuration failed")
else:
log.info("reIPL configuration successful")
def resetRpmDb():
for rpmfile in glob.glob("%s/var/lib/rpm/__db.*" % getSysroot()):
try:
os.unlink(rpmfile)
except OSError as e:
log.debug("error %s removing file: %s", e, rpmfile)
def parseNfsUrl(nfsurl):
options = ''
host = ''
path = ''
if nfsurl:
s = nfsurl.split(":")
s.pop(0)
if len(s) >= 3:
(options, host, path) = s[:3]
elif len(s) == 2:
(host, path) = s
else:
host = s[0]
return (options, host, path)
def add_po_path(directory):
""" Looks to see what translations are under a given path and tells
the gettext module to use that path as the base dir """
for d in os.listdir(directory):
if not os.path.isdir("%s/%s" %(directory, d)):
continue
if not os.path.exists("%s/%s/LC_MESSAGES" %(directory, d)):
continue
for basename in os.listdir("%s/%s/LC_MESSAGES" %(directory, d)):
if not basename.endswith(".mo"):
continue
log.info("setting %s as translation source for %s", directory, basename[:-3])
gettext.bindtextdomain(basename[:-3], directory)
def setup_translations():
if os.path.isdir(TRANSLATIONS_UPDATE_DIR):
add_po_path(TRANSLATIONS_UPDATE_DIR)
gettext.textdomain("anaconda")
def _run_systemctl(command, service):
"""
Runs 'systemctl command service.service'
:return: exit status of the systemctl
"""
service_name = service + ".service"
ret = execWithRedirect("systemctl", [command, service_name])
return ret
def start_service(service):
return _run_systemctl("start", service)
def stop_service(service):
return _run_systemctl("stop", service)
def restart_service(service):
return _run_systemctl("restart", service)
def service_running(service):
ret = _run_systemctl("status", service)
return ret == 0
def dracut_eject(device):
"""
Use dracut shutdown hook to eject media after the system is shutdown.
This is needed because we are running from the squashfs.img on the media
so ejecting too early will crash the installer.
"""
if not device:
return
try:
if not os.path.exists(DRACUT_SHUTDOWN_EJECT):
mkdirChain(os.path.dirname(DRACUT_SHUTDOWN_EJECT))
f = open(DRACUT_SHUTDOWN_EJECT, "w")
f.write("#!/bin/sh\n")
f.write("# Created by Anaconda\n")
else:
f = open(DRACUT_SHUTDOWN_EJECT, "a")
f.write("eject %s\n" % (device,))
f.close()
eintr_retry_call(os.chmod, DRACUT_SHUTDOWN_EJECT, 0o755)
log.info("Wrote dracut shutdown eject hook for %s", device)
except (IOError, OSError) as e:
log.error("Error writing dracut shutdown eject hook for %s: %s", device, e)
def vtActivate(num):
"""
Try to switch to tty number $num.
:type num: int
:return: whether the switch was successful or not
:rtype: bool
"""
try:
ret = execWithRedirect("chvt", [str(num)])
except OSError as oserr:
ret = -1
log.error("Failed to run chvt: %s", oserr.strerror)
if ret != 0:
log.error("Failed to switch to tty%d", num)
return ret == 0
class ProxyStringError(Exception):
pass
class ProxyString(object):
""" Handle a proxy url
"""
def __init__(self, url=None, protocol="http://", host=None, port="3128",
username=None, password=None):
""" Initialize with either url
([protocol://][username[:password]@]host[:port]) or pass host and
optionally:
protocol http, https, ftp
host hostname without protocol
port port number (defaults to 3128)
username username
password password
The str() of the object is the full proxy url
ProxyString.url is the full url including username:password@
ProxyString.noauth_url is the url without username:password@
"""
self.url = ensure_str(url, keep_none=True)
self.protocol = ensure_str(protocol, keep_none=True)
self.host = ensure_str(host, keep_none=True)
self.port = str(port)
self.username = ensure_str(username, keep_none=True)
self.password = ensure_str(password, keep_none=True)
self.proxy_auth = ""
self.noauth_url = None
if url:
self.parse_url()
elif not host:
raise ProxyStringError(_("No host url"))
else:
self.parse_components()
def parse_url(self):
""" Parse the proxy url into its component pieces
"""
# NOTE: If this changes, update tests/regex/proxy.py
#
# proxy=[protocol://][username[:password]@]host[:port][path][?query][#fragment]
# groups (both named and numbered)
# 1 = protocol
# 2 = username
# 3 = password
# 4 = host
# 5 = port
# 6 = path
# 7 = query
# 8 = fragment
m = URL_PARSE.match(self.url)
if not m:
raise ProxyStringError(_("malformed URL, cannot parse it."))
# If no protocol was given default to http.
self.protocol = m.group("protocol") or "http://"
if m.group("username"):
self.username = ensure_str(unquote(m.group("username")))
if m.group("password"):
self.password = ensure_str(unquote(m.group("password")))
if m.group("host"):
self.host = m.group("host")
if m.group("port"):
self.port = m.group("port")
else:
raise ProxyStringError(_("URL has no host component"))
self.parse_components()
def parse_components(self):
""" Parse the components of a proxy url into url and noauth_url
"""
if self.username or self.password:
self.proxy_auth = "%s:%s@" % (quote(self.username or ""),
quote(self.password or ""))
self.url = self.protocol + self.proxy_auth + self.host + ":" + self.port
self.noauth_url = self.protocol + self.host + ":" + self.port
@property
def dict(self):
""" return a dict of all the elements of the proxy string
url, noauth_url, protocol, host, port, username, password
"""
components = ["url", "noauth_url", "protocol", "host", "port",
"username", "password"]
return dict((k, getattr(self, k)) for k in components)
def __str__(self):
return self.url
def getdeepattr(obj, name):
"""This behaves as the standard getattr, but supports
composite (containing dots) attribute names.
As an example:
>>> import os
>>> from os.path import split
>>> getdeepattr(os, "path.split") == split
True
"""
for attr in name.split("."):
obj = getattr(obj, attr)
return obj
def setdeepattr(obj, name, value):
"""This behaves as the standard setattr, but supports
composite (containing dots) attribute names.
As an example:
>>> class O:
>>> pass
>>> a = O()
>>> a.b = O()
>>> a.b.c = O()
>>> setdeepattr(a, "b.c.d", True)
>>> a.b.c.d
True
"""
path = name.split(".")
for attr in path[:-1]:
obj = getattr(obj, attr)
return setattr(obj, path[-1], value)
def strip_accents(s):
"""This function takes arbitrary unicode string
and returns it with all the diacritics removed.
:param s: arbitrary string
:type s: str
:return: s with diacritics removed
:rtype: str
"""
return ''.join((c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'))
def cmp_obj_attrs(obj1, obj2, attr_list):
""" Compare attributes of 2 objects for changes
Missing attrs are considered a mismatch
:param obj1: First object to compare
:type obj1: Any object
:param obj2: Second object to compare
:type obj2: Any object
:param attr_list: List of attributes to compare
:type attr_list: list or tuple of strings
:returns: True if the attrs all match
:rtype: bool
"""
for attr in attr_list:
if hasattr(obj1, attr) and hasattr(obj2, attr):
if getattr(obj1, attr) != getattr(obj2, attr):
return False
else:
return False
return True
def dir_tree_map(root, func, files=True, dirs=True):
"""
Apply the given function to all files and directories in the directory tree
under the given root directory.
:param root: root of the directory tree the function should be mapped to
:type root: str
:param func: a function taking the directory/file path
:type func: path -> None
:param files: whether to apply the function to the files in the dir. tree
:type files: bool
:param dirs: whether to apply the function to the directories in the dir. tree
:type dirs: bool
TODO: allow using globs and thus more trees?
"""
for (dir_ent, _dir_items, file_items) in os.walk(root):
if dirs:
# try to call the function on the directory entry
try:
func(dir_ent)
except OSError:
pass
if files:
# try to call the function on the files in the directory entry
for file_ent in (os.path.join(dir_ent, f) for f in file_items):
try:
func(file_ent)
except OSError:
pass
# directories under the directory entry will appear as directory entries
# in the loop
def chown_dir_tree(root, uid, gid, from_uid_only=None, from_gid_only=None):
"""
Change owner (uid and gid) of the files and directories under the given
directory tree (recursively).
:param root: root of the directory tree that should be chown'ed
:type root: str
:param uid: UID that should be set as the owner
:type uid: int
:param gid: GID that should be set as the owner
:type gid: int
:param from_uid_only: if given, the owner is changed only for the files and
directories owned by that UID
:type from_uid_only: int or None
:param from_gid_only: if given, the owner is changed only for the files and
directories owned by that GID
:type from_gid_only: int or None
"""
def conditional_chown(path, uid, gid, from_uid=None, from_gid=None):
stats = os.stat(path)
if (from_uid and stats.st_uid != from_uid) or \
(from_gid and stats.st_gid != from_gid):
# owner UID or GID not matching, do nothing
return
# UID and GID matching or not required
eintr_retry_call(os.chown, path, uid, gid)
if not from_uid_only and not from_gid_only:
# the easy way
dir_tree_map(root, lambda path: eintr_retry_call(os.chown, path, uid, gid))
else:
# conditional chown
dir_tree_map(root, lambda path: conditional_chown(path, uid, gid,
from_uid_only,
from_gid_only))
def is_unsupported_hw():
""" Check to see if the hardware is supported or not.
:returns: True if this is unsupported hardware, False otherwise
:rtype: bool
"""
try:
tainted = int(open("/proc/sys/kernel/tainted").read())
except (IOError, ValueError):
tainted = 0
status = bool(tainted & UNSUPPORTED_HW)
if status:
log.debug("Installing on Unsupported Hardware")
return status
def ensure_str(str_or_bytes, keep_none=True):
"""
Returns a str instance for given string or None if requested to keep it.
:param str_or_bytes: string to be kept or converted to str type
:type str_or_bytes: str or bytes
:param bool keep_none: whether to keep None as it is or raise ValueError if
None is passed
:raises ValueError: if applied on an object not being of type bytes nor str
(nor NoneType if :param:`keep_none` is False)
"""
if keep_none and str_or_bytes is None:
return None
elif isinstance(str_or_bytes, str):
return str_or_bytes
elif isinstance(str_or_bytes, bytes):
return str_or_bytes.decode(sys.getdefaultencoding())
else:
raise ValueError("str_or_bytes must be of type 'str' or 'bytes', not '%s'" % type(str_or_bytes))
# Define translations between ASCII uppercase and lowercase for
# locale-independent string conversions. The tables are 256-byte string used
# with str.translate. If str.translate is used with a unicode string,
# even if the string contains only 7-bit characters, str.translate will
# raise a UnicodeDecodeError.
_ASCIIlower_table = str.maketrans(string.ascii_uppercase, string.ascii_lowercase)
_ASCIIupper_table = str.maketrans(string.ascii_lowercase, string.ascii_uppercase)
def _toASCII(s):
"""Convert a unicode string to ASCII"""
if isinstance(s, str):
# Decompose the string using the NFK decomposition, which in addition
# to the canonical decomposition replaces characters based on
# compatibility equivalence (e.g., ROMAN NUMERAL ONE has its own code
# point but it's really just a capital I), so that we can keep as much
# of the ASCII part of the string as possible.
s = unicodedata.normalize('NFKD', s).encode('ascii', 'ignore').decode("ascii")
elif not isinstance(s, bytes):
s = ''
return s
def upperASCII(s):
"""Convert a string to uppercase using only ASCII character definitions.
The returned string will contain only ASCII characters. This function is
locale-independent.
"""
# XXX: Python 3 has str.maketrans() and bytes.maketrans() so we should
# ideally use one or the other depending on the type of 's'. But it turns
# out we expect this function to always return string even if given bytes.
s = ensure_str(s)
return str.translate(_toASCII(s), _ASCIIupper_table)
def lowerASCII(s):
"""Convert a string to lowercase using only ASCII character definitions.
The returned string will contain only ASCII characters. This function is
locale-independent.
"""
# XXX: Python 3 has str.maketrans() and bytes.maketrans() so we should
# ideally use one or the other depending on the type of 's'. But it turns
# out we expect this function to always return string even if given bytes.
s = ensure_str(s)
return str.translate(_toASCII(s), _ASCIIlower_table)
def upcase_first_letter(text):
"""
Helper function that upcases the first letter of the string. Python's
standard string.capitalize() not only upcases the first letter but also
lowercases all the others. string.title() capitalizes all words in the
string.
:type text: str
:return: the given text with the first letter upcased
:rtype: str
"""
if not text:
# cannot change anything
return text
elif len(text) == 1:
return text.upper()
else:
return text[0].upper() + text[1:]
def get_mount_paths(devnode):
'''given a device node, return a list of all active mountpoints.'''
devno = os.stat(devnode).st_rdev
majmin = "%d:%d" % (os.major(devno), os.minor(devno))
mountinfo = (line.split() for line in open("/proc/self/mountinfo"))
return [info[4] for info in mountinfo if info[2] == majmin]
def have_word_match(str1, str2):
"""Tells if all words from str1 exist in str2 or not."""
if str1 is None or str2 is None:
# None never matches
return False
if str1 == "":
# empty string matches everything except from None
return True
elif str2 == "":
# non-empty string cannot be found in an empty string
return False
# Convert both arguments to string if not already
str1 = ensure_str(str1)
str2 = ensure_str(str2)
str1 = str1.lower()
str1_words = str1.split()
str2 = str2.lower()
return all(word in str2 for word in str1_words)
class DataHolder(dict):
""" A dict that lets you also access keys using dot notation. """
def __init__(self, **kwargs):
""" kwargs are set as keys for the dict. """
dict.__init__(self)
for attr, value in kwargs.items():
self[attr] = value
def __getattr__(self, attr):
return self[attr]
def __setattr__(self, attr, value):
self[attr] = value
def copy(self):
return DataHolder(**dict.copy(self))
def xprogressive_delay():
""" A delay generator, the delay starts short and gets longer
as the internal counter increases.
For example for 10 retries, the delay will increases from
0.5 to 256 seconds.
:param int retry_number: retry counter
:returns float: time to wait in seconds
"""
counter = 1
while True:
yield 0.25*(2**counter)
counter += 1
def get_platform_groupid():
""" Return a platform group id string
This runs systemd-detect-virt and if the result is not 'none' it
prefixes the lower case result with "platform-" for use as a group id.
:returns: Empty string or a group id for the detected platform
:rtype: str
"""
try:
platform = execWithCapture("systemd-detect-virt", []).strip()
except (IOError, AttributeError):
return ""
if platform == "none":
return ""
return "platform-" + platform.lower()
def persistent_root_image():
""":returns: whether we are running from a persistent (not in RAM) root.img"""
for line in execReadlines("losetup", ["--list"]):
# if there is an active loop device for a curl-fetched file that has
# been deleted, it means we run from a non-persistent root image
# EXAMPLE line:
# /dev/loop0 0 0 0 1 /tmp/curl_fetch_url0/my_comps_squashfs.img (deleted)
if re.match(r'.*curl_fetch_url.*\(deleted\)\s*$', line):
return False
return True
_supports_ipmi = None
def ipmi_report(event):
global _supports_ipmi
if _supports_ipmi is None:
_supports_ipmi = os.path.exists("/dev/ipmi0") and os.path.exists("/usr/bin/ipmitool")
if not _supports_ipmi:
return
(fd, path) = tempfile.mkstemp()
# EVM revision - always 0x4
# Sensor type - always 0x1F for Base OS Boot/Installation Status
# Sensor num - always 0x0 for us
# Event dir & type - always 0x6f for us
# Event data 1 - the event code passed in
# Event data 2 & 3 - always 0x0 for us
event_string = "0x4 0x1F 0x0 0x6f %#x 0x0 0x0\n" % event
eintr_retry_call(os.write, fd, event_string.encode("utf-8"))
eintr_ignore(os.close, fd)
execWithCapture("ipmitool", ["sel", "add", path])
os.remove(path)
# Copied from python's subprocess.py
def eintr_retry_call(func, *args, **kwargs):
"""Retry an interruptible system call if interrupted."""
while True:
try:
return func(*args, **kwargs)
except InterruptedError:
continue
def eintr_ignore(func, *args, **kwargs):
"""Call a function and ignore EINTR.
This is useful for calls to close() and dup2(), which can return EINTR
but which should *not* be retried, since by the time they return the
file descriptor is already closed.
"""
try:
return func(*args, **kwargs)
except InterruptedError:
pass
def parent_dir(directory):
"""Return the parent's path"""
return "/".join(os.path.normpath(directory).split("/")[:-1])
def requests_session():
"""Return a requests.Session object with file and ftp support."""
session = requests.Session()
session.mount("file://", FileAdapter())
session.mount("ftp://", FTPAdapter())
return session
_open = open
def open(*args, **kwargs): # pylint: disable=redefined-builtin
"""Open a file, and retry on EINTR.
The arguments are the same as those to python's builtin open.
This is equivalent to eintr_retry_call(open, ...). Some other
high-level languages handle this for you, like C's fopen.
"""
return eintr_retry_call(_open, *args, **kwargs)
| gpl-2.0 | 4,872,098,951,905,588,000 | 34.0693 | 104 | 0.62308 | false |
rackerlabs/osrc | osrc/index.py | 4 | 4841 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (division, print_function, absolute_import,
unicode_literals)
__all__ = ["rebuild_index", "get_neighbors"]
import os
import h5py
import flask
import shutil
import pyflann
import numpy as np
from .database import get_pipeline, format_key
_basepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), "static")
evttypes = [l.strip() for l in open(os.path.join(_basepath, "evttypes.txt"))]
langs = [l.strip() for l in open(os.path.join(_basepath, "languages.txt"))]
index_filename = "index.h5"
points_filename = "points.h5"
nevts = len(evttypes)
nlangs = len(langs)
nvector = 1 + 7 + nevts + 1 + 1 + 1 + 1 + nlangs + 1
def get_vector(user, pipe=None):
"""
Given a username, fetch all of the data needed to build a behavior vector
from the database.
:param user: The GitHub username.
:param pipe: (optional) if provided, simply add the requests to the
existing redis pipeline and don't execute the request.
"""
no_pipe = False
if pipe is None:
pipe = get_pipeline()
no_pipe = True
user = user.lower()
pipe.zscore(format_key("user"), user)
pipe.hgetall(format_key("user:{0}:day".format(user)))
pipe.zrevrange(format_key("user:{0}:event".format(user)), 0, -1,
withscores=True)
pipe.zcard(format_key("user:{0}:contribution".format(user)))
pipe.zcard(format_key("user:{0}:connection".format(user)))
pipe.zcard(format_key("user:{0}:repo".format(user)))
pipe.zcard(format_key("user:{0}:lang".format(user)))
pipe.zrevrange(format_key("user:{0}:lang".format(user)), 0, -1,
withscores=True)
if no_pipe:
return pipe.execute()
def parse_vector(results):
"""
Parse the results of a call to ``get_vector`` into a numpy array.
:param results: The list of results from the redis request.
"""
points = np.zeros(nvector)
total = int(results[0])
points[0] = 1.0 / (total + 1)
# Week means.
for k, v in results[1].iteritems():
points[1 + int(k)] = float(v) / total
# Event types.
n = 8
for k, v in results[2]:
points[n + evttypes.index(k)] = float(v) / total
# Number of contributions, connections and languages.
n += nevts
points[n] = 1.0 / (float(results[3]) + 1)
points[n + 1] = 1.0 / (float(results[4]) + 1)
points[n + 2] = 1.0 / (float(results[5]) + 1)
points[n + 3] = 1.0 / (float(results[6]) + 1)
# Top languages.
n += 4
for k, v in results[7]:
if k in langs:
points[n + langs.index(k)] = float(v) / total
else:
# Unknown language.
points[-1] = float(v) / total
return points
def _h5_filename(fn):
return os.path.join(flask.current_app.config.get("INDEX_DIR", ""), fn)
def get_neighbors(name, num=5):
"""
Find the K nearest neighbors to a user in "behavior space".
:param name: The GitHub username.
:param num: (optioanl; default: 5) The number of neighbors to find.
"""
# Get the vector for this user.
vector = get_vector(name)
# If any of the components are None, bail.
if any([v is None for v in vector]):
return []
# Parse the vector.
vector = parse_vector(vector)
# Load the points and user names.
with h5py.File(_h5_filename(points_filename), "r") as f:
points = f["points"][...]
usernames = f["names"][...]
# Load the index.
flann = pyflann.FLANN()
flann.load_index(_h5_filename(index_filename), points)
# Find the neighbors.
inds, dists = flann.nn_index(vector, num_neighbors=num+1)
inds = inds[0]
if usernames[inds[0]] == name:
inds = inds[1:]
else:
inds = inds[:-1]
return list(usernames[inds])
def rebuild_index():
"""
Rebuild the K-nearest neighbors index based on 50000 of the most active
users (ignoring the top 500 most active).
"""
pipe = get_pipeline()
usernames = pipe.zrevrange(format_key("user"), 500, 50500).execute()[0]
for user in usernames:
get_vector(user, pipe=pipe)
results = pipe.execute()
points = np.zeros([len(usernames), nvector])
for i in range(len(usernames)):
points[i, :] = parse_vector(results[8 * i:8 * (i + 1)])
flann = pyflann.FLANN()
flann.build_index(points)
# Save the index.
fn1 = _h5_filename(index_filename)
tmp1 = fn1 + ".tmp"
flann.save_index(tmp1)
# Save the index coordinates.
fn2 = _h5_filename(points_filename)
tmp2 = fn2 + ".tmp"
with h5py.File(tmp2, "w") as f:
f["points"] = points
f["names"] = usernames
# Atomically move the index files into place.
shutil.move(tmp1, fn1)
shutil.move(tmp2, fn2)
| mit | -1,321,124,036,556,423,700 | 26.505682 | 78 | 0.602355 | false |
gnieboer/tensorflow | tensorflow/python/kernel_tests/tensor_array_ops_test.py | 10 | 48213 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tensor_array_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_grad
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def _make_converter(tf_dtype):
def _converter(x):
if tf_dtype == dtypes.string:
# In Python3, np.str is unicode, while we always want bytes
return np.asarray(x).astype("|S")
x = np.asarray(x).astype(tf_dtype.as_numpy_dtype)
if tf_dtype.is_complex:
# Add a non-zero imaginary component to x.
x -= 1j * x
return x
return _converter
class TensorArrayTest(test.TestCase):
def testTensorArrayWriteRead(self):
with self.test_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, -3.0)
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual([[4.0, 5.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual(-3.0, d2)
def _testTensorArrayWritePack(self, tf_dtype):
with self.test_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
convert = _make_converter(tf_dtype)
w0 = ta.write(0, convert([[4.0, 5.0]]))
w1 = w0.write(1, convert([[6.0, 7.0]]))
w2 = w1.write(2, convert([[8.0, 9.0]]))
c0 = w2.stack()
self.assertAllEqual(
convert([[[4.0, 5.0]], [[6.0, 7.0]], [[8.0, 9.0]]]), c0.eval())
def _testTensorArrayWritePackMaybeLegacy(self):
self._testTensorArrayWritePack(dtypes.float32)
self._testTensorArrayWritePack(dtypes.float64)
self._testTensorArrayWritePack(dtypes.int32)
self._testTensorArrayWritePack(dtypes.int64)
self._testTensorArrayWritePack(dtypes.complex64)
self._testTensorArrayWritePack(dtypes.complex128)
self._testTensorArrayWritePack(dtypes.string)
def testTensorArrayWritePack(self):
self._testTensorArrayWritePackMaybeLegacy()
def testEmptyTensorArrayPack(self):
with self.test_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
empty_element = np.zeros((0, 1), dtype=np.float32)
w0 = ta.write(0, empty_element)
w1 = w0.write(1, empty_element)
w2 = w1.write(2, empty_element)
c0 = w2.stack()
self.assertAllEqual([3, 0, 1], c0.eval().shape)
def _testTensorArrayWriteConcat(self, tf_dtype):
with self.test_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3, infer_shape=False)
convert = _make_converter(tf_dtype)
w0 = ta.write(0, convert([[4.0, 5.0], [104.0, 105.0], [204.0, 205.0]]))
w1 = w0.write(1, convert([[6.0, 7.0], [106.0, 107.0]]))
w2 = w1.write(2, convert([[8.0, 9.0]]))
c0 = w2.concat()
self.assertAllEqual(
convert([[4.0, 5.0], [104.0, 105.0], [204.0, 205.0], [6.0, 7.0],
[106.0, 107.0], [8.0, 9.0]]), c0.eval())
def testTensorArrayWriteConcat(self):
self._testTensorArrayWriteConcat(dtypes.float32)
self._testTensorArrayWriteConcat(dtypes.float64)
self._testTensorArrayWriteConcat(dtypes.int32)
self._testTensorArrayWriteConcat(dtypes.int64)
self._testTensorArrayWriteConcat(dtypes.complex64)
self._testTensorArrayWriteConcat(dtypes.complex128)
self._testTensorArrayWriteConcat(dtypes.string)
def _testTensorArrayPackNotAllValuesAvailableFails(self):
with self.test_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
with self.assertRaisesOpError("Could not read from TensorArray index 1 "
"because it has not yet been written to."):
ta.write(0, [[4.0, 5.0]]).stack().eval()
def testTensorArrayPackNotAllValuesAvailableFails(self):
self._testTensorArrayPackNotAllValuesAvailableFails()
def _testTensorArrayUnpackRead(self, tf_dtype):
with self.test_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
convert = _make_converter(tf_dtype)
# Unpack a vector into scalars
w0 = ta.unstack(convert([1.0, 2.0, 3.0]))
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert(1.0), d0)
self.assertAllEqual(convert(2.0), d1)
self.assertAllEqual(convert(3.0), d2)
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
# Unpack a matrix into vectors
w1 = ta.unstack(convert([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]]))
r0 = w1.read(0)
r1 = w1.read(1)
r2 = w1.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert([1.0, 1.1]), d0)
self.assertAllEqual(convert([2.0, 2.1]), d1)
self.assertAllEqual(convert([3.0, 3.1]), d2)
# Reset ta because we're going to change the shape, else shape
# inference will throw an error.
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
# Try unpacking an empty matrix, which should not cause an error.
w2 = ta.unstack(convert([[], [], []]))
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert([]), d0)
self.assertAllEqual(convert([]), d1)
self.assertAllEqual(convert([]), d2)
def _testTensorArrayUnpackReadMaybeLegacy(self):
self._testTensorArrayUnpackRead(dtypes.float32)
self._testTensorArrayUnpackRead(dtypes.float64)
self._testTensorArrayUnpackRead(dtypes.int32)
self._testTensorArrayUnpackRead(dtypes.int64)
self._testTensorArrayUnpackRead(dtypes.complex64)
self._testTensorArrayUnpackRead(dtypes.complex128)
self._testTensorArrayUnpackRead(dtypes.string)
def testTensorArrayUnpackRead(self):
self._testTensorArrayUnpackReadMaybeLegacy()
def _testTensorArraySplitRead(self, tf_dtype):
with self.test_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3, infer_shape=False)
convert = _make_converter(tf_dtype)
# Split an empty vector
lengths = constant_op.constant([0, 0, 0])
w0 = ta.split(convert([]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert([]), d0)
self.assertAllEqual(convert([]), d1)
self.assertAllEqual(convert([]), d2)
# Split a vector
lengths = constant_op.constant([2, 0, 1])
w0 = ta.split(convert([1.0, 2.0, 3.0]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert([1.0, 2.0]), d0)
self.assertAllEqual(convert([]), d1)
self.assertAllEqual(convert([3.0]), d2)
# Split a matrix
lengths = constant_op.constant([2, 0, 1])
w0 = ta.split(
convert([[1.0, 101.0], [2.0, 201.0], [3.0, 301.0]]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert([[1.0, 101.0], [2.0, 201.0]]), d0)
self.assertAllEqual(convert([]).reshape(0, 2), d1)
self.assertAllEqual(convert([[3.0, 301.0]]), d2)
def testTensorArraySplitRead(self):
self._testTensorArraySplitRead(dtypes.float32)
self._testTensorArraySplitRead(dtypes.float64)
self._testTensorArraySplitRead(dtypes.int32)
self._testTensorArraySplitRead(dtypes.int64)
self._testTensorArraySplitRead(dtypes.complex64)
self._testTensorArraySplitRead(dtypes.complex128)
self._testTensorArraySplitRead(dtypes.string)
def testTensorGradArrayWriteRead(self):
with self.test_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
g_ta = ta.grad("grad")
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, -3.0)
g_w0 = g_ta.write(0, [[5.0, 6.0]])
g_w1 = g_w0.write(1, [[2.0]])
g_w2 = g_w1.write(2, -2.0)
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
g_r0 = g_w2.read(0)
g_r1 = g_w2.read(1)
g_r2 = g_w2.read(2)
d0, d1, d2, g_d0, g_d1, g_d2 = session.run([r0, r1, r2, g_r0, g_r1, g_r2])
self.assertAllEqual([[4.0, 5.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual(-3.0, d2)
self.assertAllEqual([[5.0, 6.0]], g_d0)
self.assertAllEqual([[2.0]], g_d1)
self.assertAllEqual(-2.0, g_d2)
def testTensorGradArrayDynamicWriteRead(self):
with self.test_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
dynamic_size=True,
infer_shape=False)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, -3.0)
g_ta = w2.grad("grad") # Get gradient array here so we know the shape
s = w2.size()
g_s = g_ta.size()
g_w0 = g_ta.write(0, [[5.0, 6.0]])
g_w1 = g_w0.write(1, [[2.0]])
g_w2 = g_w1.write(2, -2.0)
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
g_r0 = g_w2.read(0)
g_r1 = g_w2.read(1)
g_r2 = g_w2.read(2)
d0, d1, d2, g_d0, g_d1, g_d2, vs, g_vs = session.run(
[r0, r1, r2, g_r0, g_r1, g_r2, s, g_s])
self.assertAllEqual([[4.0, 5.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual(-3.0, d2)
self.assertAllEqual([[5.0, 6.0]], g_d0)
self.assertAllEqual([[2.0]], g_d1)
self.assertAllEqual(-2.0, g_d2)
self.assertAllEqual(3, vs)
self.assertAllEqual(3, g_vs)
def testTensorGradAccessTwiceReceiveSameObject(self):
with self.test_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
g_ta_0 = ta.grad("grad")
g_ta_1 = ta.grad("grad")
with ops.control_dependencies([g_ta_0.write(0, [[4.0, 5.0]]).flow]):
# Write with one gradient handle, read with another copy of it
r1_0 = g_ta_1.read(0)
t_g_ta_0, t_g_ta_1, d_r1_0 = session.run(
[g_ta_0.handle.op, g_ta_1.handle.op, r1_0])
self.assertAllEqual(t_g_ta_0, t_g_ta_1)
self.assertAllEqual([[4.0, 5.0]], d_r1_0)
def testTensorArrayWriteWrongIndexOrDataTypeFails(self):
with self.test_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
# Test writing the wrong datatype
with self.assertRaisesOpError(
"TensorArray dtype is float but Op is trying to write dtype string"):
ta.write(-1, "wrong_type_scalar").flow.eval()
# Test writing to a negative index
with self.assertRaisesOpError(
"Tried to write to index -1 but array is not "
"resizeable and size is: 3"):
ta.write(-1, 3.0).flow.eval()
# Test reading from too large an index
with self.assertRaisesOpError(
"Tried to write to index 3 but array is not "
"resizeable and size is: 3"):
ta.write(3, 3.0).flow.eval()
def testTensorArrayReadWrongIndexOrDataTypeFails(self):
with self.test_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
w0 = ta.write(0, [[4.0, 5.0]])
# Test reading wrong datatype
r0_bad = gen_data_flow_ops._tensor_array_read_v3(
handle=w0.handle, index=0, dtype=dtypes.float64, flow_in=w0.flow)
with self.assertRaisesOpError(
"TensorArray dtype is float but Op requested dtype double."):
r0_bad.eval()
# Test reading from a different index than the one we wrote to
r1 = w0.read(1)
with self.assertRaisesOpError(
"Could not read from TensorArray index 1 because "
"it has not yet been written to."):
r1.eval()
# Test reading from a negative index
with self.assertRaisesOpError(
r"Tried to read from index -1 but array size is: 3"):
ta.read(-1).eval()
# Test reading from too large an index
with self.assertRaisesOpError(
"Tried to read from index 3 but array size is: 3"):
ta.read(3).eval()
def testTensorArrayWriteMultipleFails(self):
with self.test_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
with self.assertRaisesOpError(
"Could not write to TensorArray index 2 because "
"it has already been written to."):
ta.write(2, 3.0).write(2, 3.0).flow.eval()
def testTensorArrayConcatIncompatibleShapesFails(self):
with self.test_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
w1 = ta.write(0, 3.0)
w2 = w1.write(1, 4.0)
w3 = w2.write(2, [3.0])
with self.assertRaisesOpError(
"Concat saw a scalar shape at index 0 but requires at least vectors"):
w3.concat().eval()
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
w1 = ta.write(0, [3.0])
w2 = w1.write(1, [4.0])
w3 = w2.write(2, [[3.0]])
with self.assertRaisesOpError(
r"TensorArray has inconsistent shapes. Index 0 has "
r"\(excepting dimension 0\) shape: \[\] but index 2 has \(excepting "
r"dimension 0\) shape: \[1\]"):
w3.concat().eval()
def testTensorArraySplitIncompatibleShapesFails(self):
with self.test_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
with self.assertRaisesOpError(
r"Expected lengths to be a vector, received shape: \[\]"):
lengths = array_ops.placeholder(dtypes.int64)
ta.split([1.0, 2.0, 3.0], lengths).flow.eval(feed_dict={lengths: 1})
with self.assertRaisesOpError(
r"Expected sum of lengths to be equal to values.shape\[0\], "
r"but sum of lengths is 1 and value's shape is: \[3\]"):
ta.split([1.0, 2.0, 3.0], [1]).flow.eval()
with self.assertRaisesOpError(
r"Expected value to be at least a vector, but received shape: \[\]"):
ta.split(1.0, [1]).flow.eval()
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=2,
infer_shape=False)
with self.assertRaisesOpError(
r"TensorArray's size is not equal to the size of lengths "
r"\(2 vs. 1\), and the TensorArray is not marked as "
r"dynamically resizeable"):
ta.split([1.0], [1]).flow.eval()
def _testTensorArrayWriteGradientAddMultipleAdds(self, dtype):
with self.test_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtype, tensor_array_name="foo", size=3, infer_shape=False)
ta_grad = ta.grad("grad")
c = lambda x: np.asarray(x, dtype=dtype.as_numpy_dtype)
w0 = ta.write(2, c(3.0))
w1 = w0.write(2, c(4.0))
w0_grad = ta_grad.write(2, c(3.0))
w1_grad = w0_grad.write(2, c(4.0))
w2_grad = w1_grad.write(2, c(5.0))
# Assert that aggregation works correctly
self.assertAllEqual(c(12.00), w2_grad.read(2).eval())
# Assert that if multiple_writes_aggregate is not enabled,
# multiple writes raise an exception.
with self.assertRaisesOpError(
r"TensorArray foo_.*: Could not write to TensorArray index 2 because "
r"it has already been written to."):
w1.flow.eval()
# Using differing shapes causes an exception
wb0_grad = ta_grad.write(1, c(1.0))
wb1_grad = wb0_grad.write(1, c([1.0]))
with self.assertRaisesOpError(
r"Could not aggregate to TensorArray index 1 because the "
r"existing shape is \[\] but the new input shape is \[1\]"):
wb1_grad.flow.eval()
def testTensorArrayWriteGradientAddMultipleAdds(self):
for dtype in (dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64,
dtypes.complex64, dtypes.complex128):
self._testTensorArrayWriteGradientAddMultipleAdds(dtype)
def testMultiTensorArray(self):
with self.test_session(use_gpu=True):
h1 = tensor_array_ops.TensorArray(
size=1, dtype=dtypes.float32, tensor_array_name="foo")
w1 = h1.write(0, 4.0)
r1 = w1.read(0)
h2 = tensor_array_ops.TensorArray(
size=1, dtype=dtypes.float32, tensor_array_name="bar")
w2 = h2.write(0, 5.0)
r2 = w2.read(0)
r = r1 + r2
self.assertAllClose(9.0, r.eval())
def _testTensorArrayGradientWriteReadType(self, dtype):
with self.test_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.as_dtype(dtype),
tensor_array_name="foo",
size=3,
infer_shape=False)
c = lambda x: np.array(x, dtype=dtype)
value_0 = constant_op.constant(c([[4.0, 5.0]]))
value_1 = constant_op.constant(c(3.0))
w0 = ta.write(0, value_0)
w1 = w0.write(1, value_1)
r0 = w1.read(0)
r1 = w1.read(1)
r0_2 = w1.read(0)
# Test individual components' gradients
grad_just_r0 = gradients_impl.gradients(
ys=[r0], xs=[value_0], grad_ys=[c([[2.0, 3.0]])])
grad_just_r0_vals = session.run(grad_just_r0)
self.assertAllEqual(c([[2.0, 3.0]]), grad_just_r0_vals[0])
grad_r0_r0_2 = gradients_impl.gradients(
ys=[r0, r0_2],
xs=[value_0],
grad_ys=[c([[2.0, 3.0]]), c([[1.0, -1.0]])])
grad_r0_r0_2_vals = session.run(grad_r0_r0_2)
self.assertAllEqual(c([[3.0, 2.0]]), grad_r0_r0_2_vals[0])
grad_just_r1 = gradients_impl.gradients(
ys=[r1], xs=[value_1], grad_ys=[c(-2.0)])
grad_just_r1_vals = session.run(grad_just_r1)
self.assertAllEqual(c(-2.0), grad_just_r1_vals[0])
# Test combined gradients
grad = gradients_impl.gradients(
ys=[r0, r0_2, r1],
xs=[value_0, value_1],
grad_ys=[c([[2.0, 3.0]]), c([[1.0, -1.0]]), c(-2.0)])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 2)
self.assertAllEqual(c([[3.0, 2.0]]), grad_vals[0])
self.assertAllEqual(c(-2.0), grad_vals[1])
def testTensorArrayGradientWriteRead(self):
for dtype in (np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128):
self._testTensorArrayGradientWriteReadType(dtype)
def _testTensorArrayGradientWritePackConcatAndRead(self):
with self.test_session(use_gpu=True) as sess:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=2,
clear_after_read=False)
value_0 = constant_op.constant([-1.0, 1.0])
value_1 = constant_op.constant([-10.0, 10.0])
w0 = ta.write(0, value_0)
w1 = w0.write(1, value_1)
p0 = w1.stack()
r0 = w1.read(0)
s0 = w1.concat()
# Test gradient accumulation between read(0), pack(), and concat()
with ops.control_dependencies([p0, r0, s0]):
grad_r = gradients_impl.gradients(
ys=[p0, r0, s0],
xs=[value_0, value_1],
grad_ys=[
[[2.0, 3.0], [4.0, 5.0]], # pack gradient
[-0.5, 1.5], # read(0) gradient
[20.0, 30.0, 40.0, 50.0]
]) # concat gradient
grad_vals = sess.run(grad_r) # 2 + 2 entries
self.assertAllClose([2.0 - 0.5 + 20.0, 3.0 + 1.5 + 30.0], grad_vals[0])
self.assertAllEqual([4.0 + 40.0, 5.0 + 50.0], grad_vals[1])
def testTensorArrayGradientWritePackConcatAndRead(self):
self._testTensorArrayGradientWritePackConcatAndRead()
def testTensorArrayReadTwice(self):
with self.test_session(use_gpu=True):
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
ta_readonce = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=2)
w_readonce = ta_readonce.unstack(value)
r0_readonce = w_readonce.read(0)
with ops.control_dependencies([r0_readonce]):
r1_readonce = w_readonce.read(0)
with self.assertRaisesOpError(
r"Could not read index 0 twice because it was cleared after a "
r"previous read \(perhaps try setting clear_after_read = false\?\)"):
r1_readonce.eval()
ta_readtwice = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=2,
clear_after_read=False)
w_readtwice = ta_readtwice.unstack(value)
r0_readtwice = w_readtwice.read(0)
with ops.control_dependencies([r0_readtwice]):
r1_readtwice = w_readtwice.read(0)
self.assertAllEqual([1.0, -1.0], r1_readtwice.eval())
def _testTensorArrayGradientUnpackRead(self):
with self.test_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=2,
clear_after_read=False)
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.unstack(value)
r0 = w.read(0)
r0_1 = w.read(0)
r1 = w.read(1)
# Test combined gradients + aggregation of read(0)
grad = gradients_impl.gradients(
ys=[r0, r0_1, r1],
xs=[value],
grad_ys=[[2.0, 3.0], [-1.5, 1.5], [4.0, 5.0]])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[2.0 - 1.5, 3.0 + 1.5], [4.0, 5.0]], grad_vals[0])
def testTensorArrayGradientUnpackRead(self):
self._testTensorArrayGradientUnpackRead()
def testTensorArrayGradientSplitConcat(self):
with self.test_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=2)
value = constant_op.constant(
[[1.0, -1.0], [10.0, -10.0], [100.0, -100.0]])
w = ta.split(value, [2, 1])
r = w.concat()
# Test combined gradients
grad = gradients_impl.gradients(
ys=[r],
xs=[value],
grad_ys=[[[2.0, -2.0], [20.0, -20.0], [200.0, -200.0]]])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[2.0, -2.0], [20.0, -20.0], [200.0, -200.0]],
grad_vals[0])
def _testTensorArrayGradientDynamicUnpackRead(self):
with self.test_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
dynamic_size=True)
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.unstack(value)
r0 = w.read(0)
r1 = w.read(1)
# Test combined gradients + aggregation of read(0)
grad = gradients_impl.gradients(
ys=[r0, r1], xs=[value], grad_ys=[[2.0, 3.0], [4.0, 5.0]])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[2.0, 3.0], [4.0, 5.0]], grad_vals[0])
def testTensorArrayGradientDynamicUnpackRead(self):
self._testTensorArrayGradientDynamicUnpackRead()
def testCloseTensorArray(self):
with self.test_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
c1 = ta.close()
session.run(c1)
def testSizeTensorArray(self):
with self.test_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
s = ta.size()
self.assertAllEqual(3, s.eval())
def testWriteCloseTensorArray(self):
with self.test_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [3.0])
w1.close().run() # Expected to run without problems
def _testWhileLoopWritePackGradients(self, dynamic_size, dtype):
np_dtype = dtype.as_numpy_dtype
with self.test_session(use_gpu=True) as session:
v0 = array_ops.identity(np.arange(3 * 5, dtype=np_dtype).reshape(3, 5))
var = variables.Variable(np.arange(100, 105, dtype=np_dtype))
state0 = array_ops.identity(np.array([1] * 5, dtype=np_dtype))
ta = tensor_array_ops.TensorArray(
dtype=dtype,
tensor_array_name="foo",
size=0 if dynamic_size else 3,
dynamic_size=dynamic_size)
time_0 = array_ops.identity(0)
def body(time, ta_t, state):
sliced = array_ops.slice(
v0, begin=array_ops.stack([time, 0]), size=[1, -1])
sliced = array_ops.squeeze(sliced)
out = sliced + var + state
state += sliced
ta_t = ta_t.write(time, out)
return (time + 1, ta_t, state)
(unused_0, h_final, unused_2) = control_flow_ops.while_loop(
cond=lambda time, unused_1, unused_2: time < 3,
body=body,
loop_vars=(time_0, ta, state0),
shape_invariants=(time_0.get_shape(), tensor_shape.unknown_shape(),
tensor_shape.unknown_shape()),
parallel_iterations=3)
vout = h_final.stack()
grad_val = -np.arange(3 * 5, dtype=np_dtype).reshape(3, 5)
v0_grad = gradients_impl.gradients([vout], [v0], [grad_val])[0]
state0_grad = gradients_impl.gradients([vout], [state0], [grad_val])[0]
var_grad = gradients_impl.gradients([vout], [var], [grad_val])[0]
variables.global_variables_initializer().run()
state0_t, var_t, v0_t, vout_t, v0_grad_t, var_grad_t, state0_grad_t = (
session.run([state0, var, v0, vout, v0_grad, var_grad, state0_grad]))
just_v0_grad_t, = session.run([v0_grad])
# state = [ state0 | state0 + v0[0] | state0 + v0[0] + v0[1] ]
# vout = [ v0[0] + var + state[0] |
# v0[1] + var + state[1] |
# v0[2] + var + state[2] ]
# = [ v0[0] + var + state0 |
# v0[1] + var + state0 + v0[0] |
# v0[2] + var + state0 + v0[0] + v0[1] ]
#
# d(vout[0])/d(v0) = [1 | 0 | 0 ]
# d(vout[1])/d(v0) = [1 | 1 | 0 ]
# d(vout[2])/d(v0) = [1 | 1 | 1 ]
# d(vout)/d(var) = [1 | 1 | 1]
# d(vout)/d(state0) = [ 1 | 1 | 1 ]
state_per_time = np.array(
[state0_t, state0_t + v0_t[0, :], state0_t + v0_t[0, :] + v0_t[1, :]])
# Compare forward prop
self.assertAllClose(v0_t + var_t + state_per_time, vout_t)
# Compare backward prop
expected_v0_grad_t = np.array([
grad_val[0, :] + grad_val[1, :] + grad_val[2, :],
grad_val[1, :] + grad_val[2, :], grad_val[2, :]
])
self.assertAllEqual(expected_v0_grad_t, v0_grad_t)
self.assertAllEqual(expected_v0_grad_t, just_v0_grad_t)
self.assertAllClose(grad_val.sum(axis=0), var_grad_t)
self.assertAllClose(grad_val.sum(axis=0), state0_grad_t)
def testWhileLoopWritePackGradients(self):
self._testWhileLoopWritePackGradients(
dynamic_size=False, dtype=dtypes.float32)
# TODO(ebrevdo): re-enable when While supports non-float32 gradients.
# self._testWhileLoopWritePackGradients(
# dynamic_size=False, dtype=tf.int64)
def testWhileLoopDynamicWritePackGradients(self):
self._testWhileLoopWritePackGradients(
dynamic_size=True, dtype=dtypes.float32)
def testGradSerialTwoLoops(self):
with self.test_session(use_gpu=True):
num_steps = 100
acc = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
size=num_steps,
clear_after_read=False,
element_shape=tensor_shape.scalar())
i = constant_op.constant(0, name="i")
x = constant_op.constant(2.0, name="x")
c = lambda i, acc: i < 5
def b(i, acc):
x1 = control_flow_ops.cond(
math_ops.equal(i, 0), lambda: x,
lambda: math_ops.multiply(acc.read(i - 1), 2.0))
return i + 1, acc.write(i, x1)
i1, acc1 = control_flow_ops.while_loop(c, b, [i, acc])
z = constant_op.constant(0.0)
def fn(i, acc):
return i + 1, acc.write(i, z)
_, acc2 = control_flow_ops.while_loop(lambda i, acc: i < num_steps, fn,
[i1, acc1])
r = acc2.stack()
grad = gradients_impl.gradients(r, [x])[0]
self.assertAllClose(31.0, grad.eval())
def testSumOfTwoReadVariablesWithoutRepeatGrad(self):
with self.test_session(use_gpu=True) as session:
a = array_ops.identity(
np.arange(
3 * 5, dtype=np.float32).reshape(3, 5) + 1)
b = array_ops.identity(
np.arange(
3 * 5, dtype=np.float32).reshape(3, 5) + 1 + 3 * 5)
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
ta = ta.write(0, a, name="write_a")
ta = ta.write(1, b, name="write_b")
c = (
ta.read(
0, name="read_a_0") + # a + b
ta.read(
1, name="read_b_0"))
g0 = -(np.arange(3 * 5, dtype=np.float32).reshape(3, 5) + 1)
grad_a = gradients_impl.gradients([c], [a], [g0])[0] # d(a+b)/da = 1
grad_b = gradients_impl.gradients([c], [b], [g0])[0] # d(a+b)/db = 1
# Test gradients calculated individually
grad_a_t, = session.run([grad_a])
self.assertAllEqual(grad_a_t, g0)
grad_b_t, = session.run([grad_b])
self.assertAllEqual(grad_b_t, g0)
# Test gradients calculated jointly
joint_grad_a_t, joint_grad_b_t = session.run([grad_a, grad_b])
self.assertAllEqual(joint_grad_a_t, g0)
self.assertAllEqual(joint_grad_b_t, g0)
def _grad_source_for_name(self, name):
return tensor_array_grad._GetGradSource(constant_op.constant(0, name=name))
def testGetGradSource_Invalid(self):
with self.assertRaises(ValueError):
self._grad_source_for_name("")
with self.assertRaises(ValueError):
self._grad_source_for_name("foo")
with self.assertRaises(ValueError):
self._grad_source_for_name("foo/bar")
def testGetGradSource_NoEnclosingScope(self):
self.assertEqual("gradients:0", self._grad_source_for_name("gradients"))
self.assertEqual("gradients_0:0", self._grad_source_for_name("gradients_0"))
self.assertEqual("gradients", self._grad_source_for_name("gradients/foo"))
self.assertEqual("gradients_0",
self._grad_source_for_name("gradients_0/foo"))
self.assertEqual("gradients",
self._grad_source_for_name("gradients/foo/bar"))
self.assertEqual("gradients_0",
self._grad_source_for_name("gradients_0/foo/bar"))
def testGetGradSource_EnclosingScope(self):
self.assertEqual("foo/gradients:0",
self._grad_source_for_name("foo/gradients"))
self.assertEqual("foo/gradients_0:0",
self._grad_source_for_name("foo/gradients_0"))
self.assertEqual("foo/gradients",
self._grad_source_for_name("foo/gradients/bar"))
self.assertEqual("foo/gradients_0",
self._grad_source_for_name("foo/gradients_0/bar"))
self.assertEqual("foo/bar/gradients",
self._grad_source_for_name("foo/bar/gradients/baz"))
self.assertEqual("foo/bar/gradients_0",
self._grad_source_for_name("foo/bar/gradients_0/baz"))
def testGetGradSource_NestedUsesInnermost(self):
self.assertEqual(
"foo/gradients/bar/gradients_0",
self._grad_source_for_name("foo/gradients/bar/gradients_0/baz"))
def testWriteShape(self):
with self.test_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
c0 = constant_op.constant([4.0, 5.0])
w0 = ta.write(0, c0)
r0 = w0.read(0)
self.assertAllEqual(c0.get_shape(), r0.get_shape())
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
c1 = constant_op.constant([6.0, 7.0])
w1 = w0.write(1, c1)
r0 = w1.read(0)
r1 = w1.read(1)
self.assertAllEqual(c0.get_shape(), r0.get_shape())
self.assertAllEqual(c1.get_shape(), r1.get_shape())
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
c2 = constant_op.constant([4.0, 5.0, 6.0])
with self.assertRaises(ValueError):
w0.write(0, c2)
def testPartlyUnknownShape(self):
with self.test_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=6)
c0 = array_ops.placeholder(dtypes.float32, [None, None, None, 3])
w0 = ta.write(0, c0)
r0 = w0.read(0)
self.assertAllEqual([None, None, None, 3], r0.get_shape().as_list())
c1 = array_ops.placeholder(dtypes.float32, [None, None, None, 3])
w1 = w0.write(1, c1)
r1 = w1.read(0)
self.assertAllEqual([None, None, None, 3], r1.get_shape().as_list())
# Writing less specific shape (doesn't change type.)
c2 = array_ops.placeholder(dtypes.float32, [None, None, None, None])
w2 = w1.write(2, c2)
r2 = w2.read(0)
self.assertAllEqual([None, None, None, 3], r2.get_shape().as_list())
# Writing more specific shape in one dimension and less specific in
# another.
c3 = array_ops.placeholder(dtypes.float32, [None, None, 2, None])
w3 = w2.write(3, c3)
r3 = w3.read(0)
self.assertAllEqual([None, None, 2, 3], r3.get_shape().as_list())
# Writing partly defined shape using TensorArray.scatter.
c4 = array_ops.placeholder(dtypes.float32, [2, None, 4, 2, 3])
w4 = w3.scatter([4, 5], c4)
r4 = w4.read(0)
self.assertAllEqual([None, 4, 2, 3], r4.get_shape().as_list())
# Writing fully defined shape using TensorArray.split.
c5 = array_ops.placeholder(dtypes.float32, [10, 4, 2, 3])
w5 = w4.split(c5, constant_op.constant([5, 5]))
r5 = w5.read(0)
self.assertAllEqual([5, 4, 2, 3], r5.get_shape().as_list())
def _testUnpackShape(self):
with self.test_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
dynamic_size=True,
infer_shape=True)
value = constant_op.constant(
[[1.0, -1.0], [10.0, -10.0], [100.0, -100.0]])
w0 = ta.unstack(value)
r0 = w0.read(0)
self.assertAllEqual((2,), r0.get_shape())
c1 = constant_op.constant([4.0, 5.0])
w1 = w0.write(3, c1)
r1 = w1.read(0)
self.assertAllEqual(c1.get_shape(), r1.get_shape())
c2 = constant_op.constant([4.0, 5.0, 6.0])
with self.assertRaises(ValueError):
w1.write(4, c2)
def testUnpackShape(self):
self._testUnpackShape()
def testSplitShape(self):
with self.test_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
dynamic_size=True,
infer_shape=True)
value = constant_op.constant([[1.0, -1.0], [2.0, -2.0], [3.0, -3.0]])
w0 = ta.split(value, [1, 1, 1])
r0 = w0.read(0)
self.assertAllEqual((1, 2), r0.get_shape())
ta1 = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo1",
size=0,
dynamic_size=True,
infer_shape=True)
w0 = ta1.split(value, [1, 2])
r0 = w0.read(0)
self.assertAllEqual(r0.get_shape(), tensor_shape.unknown_shape())
def testWriteUnknownShape(self):
with self.test_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=True)
c0 = array_ops.placeholder(dtypes.float32)
w0 = ta.write(0, c0)
r0 = w0.read(0)
self.assertAllEqual(r0.get_shape(), tensor_shape.unknown_shape())
def _testGradientWhenNotAllComponentsRead(self):
with self.test_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
x = constant_op.constant([2.0, 3.0])
w = ta.unstack(x)
r0 = w.read(0)
# calculate (dr0/dx0, dr0/dx1). since r0 = x0, gradients are (1, 0).
grad_r0 = gradients_impl.gradients(ys=[r0], xs=[x], grad_ys=[1.0])
grad_r0_vals = session.run(grad_r0)[0]
self.assertAllEqual(grad_r0_vals, [1.0, 0.0])
def testGradientWhenNotAllComponentsRead(self):
self._testGradientWhenNotAllComponentsRead()
def _testTensorArrayUnpackDynamic(self):
with self.test_session(use_gpu=True) as sess:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=3, dynamic_size=True)
x = constant_op.constant([1.0, 2.0, 3.0])
w0 = ta.unstack(x)
w1 = w0.write(3, 4.0)
r = w1.stack()
self.assertAllEqual(np.array([1.0, 2.0, 3.0, 4.0]), r.eval())
grad = gradients_impl.gradients(ys=[r], xs=[x])
self.assertAllEqual(np.array([1.0, 1.0, 1.0]), sess.run(grad)[0])
def testTensorArrayUnpackDynamic(self):
self._testTensorArrayUnpackDynamic()
def testTensorArraySplitDynamic(self):
with self.test_session(use_gpu=True) as sess:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=3, dynamic_size=True)
x = constant_op.constant([1.0, 2.0, 3.0])
w0 = ta.split(x, [1, 1, 1])
w1 = w0.write(3, [4.0])
r = w1.concat()
self.assertAllEqual(np.array([1.0, 2.0, 3.0, 4.0]), r.eval())
grad = gradients_impl.gradients(ys=[r], xs=[x])
self.assertAllEqual(np.array([1.0, 1.0, 1.0]), sess.run(grad)[0])
def _testTensorArrayEvalEmpty(self):
with self.test_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=0, dynamic_size=False, infer_shape=False)
with self.assertRaisesOpError(
"TensorArray has size zero, but element shape <unknown> is not fully "
"defined. Currently only static shapes are supported when packing "
"zero-size TensorArrays."):
ta.stack().eval()
def testTensorArrayEvalEmpty(self):
self._testTensorArrayEvalEmpty()
def _testTensorArrayEvalEmptyWithDefault(self):
with self.test_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=0, dynamic_size=False, infer_shape=True)
self.assertEqual(0, ta.size().eval())
# Don't actually perform the pack. This stores the static shape.
ta.unstack(array_ops.zeros([0, 3, 5])).mark_used()
packed = ta.stack()
self.assertAllEqual([0, 3, 5], packed.eval().shape)
# Concatenating zero tensors along their first dimension gives a
# first dimension of zero
self.assertAllEqual([0, 5], ta.concat().eval().shape)
def testTensorArrayEvalEmptyWithDefault(self):
self._testTensorArrayEvalEmptyWithDefault()
def testTensorArrayScatterReadAndGradients(self):
with self.test_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
dynamic_size=True)
indices = constant_op.constant([1, 8])
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.scatter(indices, value)
r0 = w.read(1)
r1 = w.read(8)
# Test combined gradients + aggregation of read(0)
grad = gradients_impl.gradients(
ys=[r0, r1], xs=[value], grad_ys=[[2.0, 3.0], [4.0, 5.0]])
read_vals, grad_vals = session.run([[r0, r1], grad])
self.assertEqual(len(read_vals), 2)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([1.0, -1.0], read_vals[0])
self.assertAllEqual([10.0, -10.0], read_vals[1])
self.assertAllEqual([[2.0, 3.0], [4.0, 5.0]], grad_vals[0])
def testTensorArrayWriteGatherAndGradients(self):
with self.test_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
dynamic_size=True)
values = constant_op.constant([[1.0 * x, -1.0 * x] for x in range(10)])
indices = constant_op.constant([1, 8])
w = ta.unstack(values)
g = w.gather(indices)
# Test combined gradients + aggregation of read(0)
grad = gradients_impl.gradients(
ys=[g], xs=[values], grad_ys=[[[2.0, 3.0], [4.0, 5.0]]])
g_vals, grad_vals = session.run([[g], grad])
# Gradients for 8 of the 10 unread components are zero.
expected_grad = np.zeros((10, 2))
expected_grad[1] = [2.0, 3.0]
expected_grad[8] = [4.0, 5.0]
self.assertEqual(len(g_vals), 1)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[1.0, -1.0], [8.0, -8.0]], g_vals[0])
self.assertAllEqual(expected_grad, grad_vals[0])
def testTensorArrayGetsDeviceFromFirstWrite(self):
with ops.device("/gpu:1"):
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
# parent device was ignored when creating the TensorArray
self.assertEqual(ta.handle.device, "")
self.assertEqual(ta.flow.device, "")
with ops.device("/gpu:0"):
# the first write sets the op's device
ta = ta.write(0, 1.0)
self.assertTrue("gpu:0" in ta.handle.device.lower())
self.assertTrue("gpu:0" in ta.flow.device.lower())
with ops.device("/gpu:1"):
# subsequent writes do not modify the op's device
ta = ta.write(1, 1.0)
self.assertTrue("gpu:0" in ta.handle.device.lower())
self.assertTrue("gpu:0" in ta.flow.device.lower())
ta_grad = ta.grad("grad")
self.assertTrue("gpu:0" in ta_grad.handle.device.lower())
self.assertTrue("gpu:0" in ta_grad.flow.device.lower())
# Similar tests for unpack and split
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
self.assertEqual(ta.handle.device, "")
self.assertEqual(ta.flow.device, "")
with ops.device("/gpu:0"):
ta = ta.unstack([1.0, 2.0])
self.assertTrue("gpu:0" in ta.handle.device.lower())
self.assertTrue("gpu:0" in ta.flow.device.lower())
with ops.device("/gpu:1"):
ta = ta.unstack([1.0, 2.0])
self.assertTrue("gpu:0" in ta.handle.device.lower())
self.assertTrue("gpu:0" in ta.flow.device.lower())
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
self.assertEqual(ta.handle.device, "")
self.assertEqual(ta.flow.device, "")
with ops.device("/gpu:0"):
ta = ta.split([1.0, 2.0], [1, 1])
self.assertTrue("gpu:0" in ta.handle.device.lower())
self.assertTrue("gpu:0" in ta.flow.device.lower())
with ops.device("/gpu:1"):
ta = ta.split([1.0, 2.0], [1, 1])
self.assertTrue("gpu:0" in ta.handle.device.lower())
self.assertTrue("gpu:0" in ta.flow.device.lower())
def testTensorArrayGetsDeviceFromFirstWriteInWhileLoop(self):
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
def _body(i, ta_i):
with ops.device("/gpu:0"):
return i + 1, ta_i.write(i, 0.0)
self.assertEqual(ta.handle.device, "")
self.assertEqual(ta.flow.device, "")
_, ta_out = control_flow_ops.while_loop(
lambda i, ta: i < 2, _body, loop_vars=[0, ta])
self.assertTrue("gpu:0" in ta_out.handle.device.lower())
self.assertTrue("gpu:0" in ta.handle.device.lower())
def testTensorArrayLazyDeviceSettingDoesNotConfuseInitialAccess(self):
with self.test_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
self.assertEqual(ta.handle.device, "")
with ops.device("/cpu:0"):
size = ta.size()
with ops.device("/gpu:0"):
ta = ta.write(0, 0.0)
self.assertTrue("gpu:0" in ta.handle.device.lower())
# This should use the TensorArray on /gpu:0
size_value, _ = session.run((size, ta.flow))
self.assertEqual(2, size_value)
def testTensorArrayIdentity(self):
with self.test_session(use_gpu=True) as session:
ta0 = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2,
infer_shape=False)
ta1 = tensor_array_ops.TensorArray(dtype=dtypes.int32, size=4,
infer_shape=True)
ta0 = ta0.write(0, 0.)
ta1 = ta1.write(0, 1)
v0 = variables.Variable(0)
v1 = variables.Variable(0)
with ops.control_dependencies([v0.assign_add(1)]):
ta0 = ta0.identity()
with ops.control_dependencies([v1.assign_add(1)]):
ta1 = ta1.identity()
read0 = ta0.read(0)
read1 = ta1.read(0)
size0 = ta0.size()
size1 = ta1.size()
# Tests correct properties on new TensorArrays.
self.assertEqual(dtypes.float32, ta0.dtype)
self.assertEqual(dtypes.int32, ta1.dtype)
self.assertEqual(tensor_shape.unknown_shape(), read0.get_shape())
self.assertEqual(tensor_shape.scalar(), read1.get_shape())
variables.global_variables_initializer().run()
read0_v, read1_v, size0_v, size1_v = session.run(
(read0, read1, size0, size1))
# Tests that the control dependencies was added and executed.
self.assertEqual(1, v0.eval())
self.assertEqual(1, v1.eval())
# Tests correct TensorArray.
self.assertEqual(read0_v, 0)
self.assertEqual(read1_v, 1)
self.assertEqual(size0_v, 2)
self.assertEqual(size1_v, 4)
if __name__ == "__main__":
test.main()
| apache-2.0 | 3,521,932,278,056,882,700 | 35.469743 | 80 | 0.606849 | false |
ikeikeikeike/tastypie-queryset-client | docs/source/conf.py | 2 | 8389 | # -*- coding: utf-8 -*-
#
# Tastypie QuerySet Client documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 29 01:25:30 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
# import os
from os.path import abspath, dirname, join
sys.path.insert(0, abspath(join(dirname(__file__), "../../")))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Tastypie QuerySet Client'
copyright = u'2012, Tatsuo Ikeda'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.7.2'
# The full version, including alpha/beta/rc tags.
release = '0.7.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'pyramid'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'TastypieQuerySetClientdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'TastypieQuerySetClient.tex', u'Tastypie QuerySet Client Documentation',
u'Tatsuo Ikeda', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tastypiequerysetclient', u'Tastypie QuerySet Client Documentation',
[u'Tatsuo Ikeda'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'TastypieQuerySetClient', u'Tastypie QuerySet Client Documentation',
u'Tatsuo Ikeda', 'TastypieQuerySetClient', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
# autodoc
keep_warnings = True
autodoc_default_flags = ['members', 'undoc-members', 'show-inheritance']
| mit | -2,716,296,258,463,638,500 | 32.158103 | 170 | 0.709381 | false |
mylons/incubator-airflow | airflow/__init__.py | 5 | 2610 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Authentication is implemented using flask_login and different environments can
implement their own login mechanisms by providing an `airflow_login` module
in their PYTHONPATH. airflow_login should be based off the
`airflow.www.login`
"""
from builtins import object
from airflow import version
__version__ = version.version
import logging
import os
import sys
from airflow import configuration as conf
from airflow.models import DAG
from flask_admin import BaseView
from importlib import import_module
from airflow.exceptions import AirflowException
DAGS_FOLDER = os.path.expanduser(conf.get('core', 'DAGS_FOLDER'))
if DAGS_FOLDER not in sys.path:
sys.path.append(DAGS_FOLDER)
login = None
def load_login():
auth_backend = 'airflow.default_login'
try:
if conf.getboolean('webserver', 'AUTHENTICATE'):
auth_backend = conf.get('webserver', 'auth_backend')
except conf.AirflowConfigException:
if conf.getboolean('webserver', 'AUTHENTICATE'):
logging.warning(
"auth_backend not found in webserver config reverting to "
"*deprecated* behavior of importing airflow_login")
auth_backend = "airflow_login"
try:
global login
login = import_module(auth_backend)
except ImportError as err:
logging.critical(
"Cannot import authentication module %s. "
"Please correct your authentication backend or disable authentication: %s",
auth_backend, err
)
if conf.getboolean('webserver', 'AUTHENTICATE'):
raise AirflowException("Failed to import authentication backend")
class AirflowViewPlugin(BaseView):
pass
class AirflowMacroPlugin(object):
def __init__(self, namespace):
self.namespace = namespace
from airflow import operators
from airflow import hooks
from airflow import executors
from airflow import macros
from airflow import contrib
operators._integrate_plugins()
hooks._integrate_plugins()
macros._integrate_plugins()
| apache-2.0 | -9,065,169,097,647,636,000 | 29.705882 | 87 | 0.718774 | false |
hefen1/chromium | PRESUBMIT_test.py | 2 | 30203 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import glob
import json
import os
import re
import subprocess
import sys
import unittest
import PRESUBMIT
from PRESUBMIT_test_mocks import MockChange, MockFile, MockAffectedFile
from PRESUBMIT_test_mocks import MockInputApi, MockOutputApi
_TEST_DATA_DIR = 'base/test/data/presubmit'
class IncludeOrderTest(unittest.TestCase):
def testSystemHeaderOrder(self):
scope = [(1, '#include <csystem.h>'),
(2, '#include <cppsystem>'),
(3, '#include "acustom.h"')]
all_linenums = [linenum for (linenum, _) in scope]
mock_input_api = MockInputApi()
warnings = PRESUBMIT._CheckIncludeOrderForScope(scope, mock_input_api,
'', all_linenums)
self.assertEqual(0, len(warnings))
def testSystemHeaderOrderMismatch1(self):
scope = [(10, '#include <cppsystem>'),
(20, '#include <csystem.h>'),
(30, '#include "acustom.h"')]
all_linenums = [linenum for (linenum, _) in scope]
mock_input_api = MockInputApi()
warnings = PRESUBMIT._CheckIncludeOrderForScope(scope, mock_input_api,
'', all_linenums)
self.assertEqual(1, len(warnings))
self.assertTrue('20' in warnings[0])
def testSystemHeaderOrderMismatch2(self):
scope = [(10, '#include <cppsystem>'),
(20, '#include "acustom.h"'),
(30, '#include <csystem.h>')]
all_linenums = [linenum for (linenum, _) in scope]
mock_input_api = MockInputApi()
warnings = PRESUBMIT._CheckIncludeOrderForScope(scope, mock_input_api,
'', all_linenums)
self.assertEqual(1, len(warnings))
self.assertTrue('30' in warnings[0])
def testSystemHeaderOrderMismatch3(self):
scope = [(10, '#include "acustom.h"'),
(20, '#include <csystem.h>'),
(30, '#include <cppsystem>')]
all_linenums = [linenum for (linenum, _) in scope]
mock_input_api = MockInputApi()
warnings = PRESUBMIT._CheckIncludeOrderForScope(scope, mock_input_api,
'', all_linenums)
self.assertEqual(2, len(warnings))
self.assertTrue('20' in warnings[0])
self.assertTrue('30' in warnings[1])
def testAlphabeticalOrderMismatch(self):
scope = [(10, '#include <csystem.h>'),
(15, '#include <bsystem.h>'),
(20, '#include <cppsystem>'),
(25, '#include <bppsystem>'),
(30, '#include "bcustom.h"'),
(35, '#include "acustom.h"')]
all_linenums = [linenum for (linenum, _) in scope]
mock_input_api = MockInputApi()
warnings = PRESUBMIT._CheckIncludeOrderForScope(scope, mock_input_api,
'', all_linenums)
self.assertEqual(3, len(warnings))
self.assertTrue('15' in warnings[0])
self.assertTrue('25' in warnings[1])
self.assertTrue('35' in warnings[2])
def testSpecialFirstInclude1(self):
mock_input_api = MockInputApi()
contents = ['#include "some/path/foo.h"',
'#include "a/header.h"']
mock_file = MockFile('some/path/foo.cc', contents)
warnings = PRESUBMIT._CheckIncludeOrderInFile(
mock_input_api, mock_file, range(1, len(contents) + 1))
self.assertEqual(0, len(warnings))
def testSpecialFirstInclude2(self):
mock_input_api = MockInputApi()
contents = ['#include "some/other/path/foo.h"',
'#include "a/header.h"']
mock_file = MockFile('some/path/foo.cc', contents)
warnings = PRESUBMIT._CheckIncludeOrderInFile(
mock_input_api, mock_file, range(1, len(contents) + 1))
self.assertEqual(0, len(warnings))
def testSpecialFirstInclude3(self):
mock_input_api = MockInputApi()
contents = ['#include "some/path/foo.h"',
'#include "a/header.h"']
mock_file = MockFile('some/path/foo_platform.cc', contents)
warnings = PRESUBMIT._CheckIncludeOrderInFile(
mock_input_api, mock_file, range(1, len(contents) + 1))
self.assertEqual(0, len(warnings))
def testSpecialFirstInclude4(self):
mock_input_api = MockInputApi()
contents = ['#include "some/path/bar.h"',
'#include "a/header.h"']
mock_file = MockFile('some/path/foo_platform.cc', contents)
warnings = PRESUBMIT._CheckIncludeOrderInFile(
mock_input_api, mock_file, range(1, len(contents) + 1))
self.assertEqual(1, len(warnings))
self.assertTrue('2' in warnings[0])
def testSpecialFirstInclude5(self):
mock_input_api = MockInputApi()
contents = ['#include "some/other/path/foo.h"',
'#include "a/header.h"']
mock_file = MockFile('some/path/foo-suffix.h', contents)
warnings = PRESUBMIT._CheckIncludeOrderInFile(
mock_input_api, mock_file, range(1, len(contents) + 1))
self.assertEqual(0, len(warnings))
def testSpecialFirstInclude6(self):
mock_input_api = MockInputApi()
contents = ['#include "some/other/path/foo_win.h"',
'#include <set>',
'#include "a/header.h"']
mock_file = MockFile('some/path/foo_unittest_win.h', contents)
warnings = PRESUBMIT._CheckIncludeOrderInFile(
mock_input_api, mock_file, range(1, len(contents) + 1))
self.assertEqual(0, len(warnings))
def testOrderAlreadyWrong(self):
scope = [(1, '#include "b.h"'),
(2, '#include "a.h"'),
(3, '#include "c.h"')]
mock_input_api = MockInputApi()
warnings = PRESUBMIT._CheckIncludeOrderForScope(scope, mock_input_api,
'', [3])
self.assertEqual(0, len(warnings))
def testConflictAdded1(self):
scope = [(1, '#include "a.h"'),
(2, '#include "c.h"'),
(3, '#include "b.h"')]
mock_input_api = MockInputApi()
warnings = PRESUBMIT._CheckIncludeOrderForScope(scope, mock_input_api,
'', [2])
self.assertEqual(1, len(warnings))
self.assertTrue('3' in warnings[0])
def testConflictAdded2(self):
scope = [(1, '#include "c.h"'),
(2, '#include "b.h"'),
(3, '#include "d.h"')]
mock_input_api = MockInputApi()
warnings = PRESUBMIT._CheckIncludeOrderForScope(scope, mock_input_api,
'', [2])
self.assertEqual(1, len(warnings))
self.assertTrue('2' in warnings[0])
def testIfElifElseEndif(self):
mock_input_api = MockInputApi()
contents = ['#include "e.h"',
'#define foo',
'#include "f.h"',
'#undef foo',
'#include "e.h"',
'#if foo',
'#include "d.h"',
'#elif bar',
'#include "c.h"',
'#else',
'#include "b.h"',
'#endif',
'#include "a.h"']
mock_file = MockFile('', contents)
warnings = PRESUBMIT._CheckIncludeOrderInFile(
mock_input_api, mock_file, range(1, len(contents) + 1))
self.assertEqual(0, len(warnings))
def testExcludedIncludes(self):
# #include <sys/...>'s can appear in any order.
mock_input_api = MockInputApi()
contents = ['#include <sys/b.h>',
'#include <sys/a.h>']
mock_file = MockFile('', contents)
warnings = PRESUBMIT._CheckIncludeOrderInFile(
mock_input_api, mock_file, range(1, len(contents) + 1))
self.assertEqual(0, len(warnings))
contents = ['#include <atlbase.h>',
'#include <aaa.h>']
mock_file = MockFile('', contents)
warnings = PRESUBMIT._CheckIncludeOrderInFile(
mock_input_api, mock_file, range(1, len(contents) + 1))
self.assertEqual(0, len(warnings))
contents = ['#include "build/build_config.h"',
'#include "aaa.h"']
mock_file = MockFile('', contents)
warnings = PRESUBMIT._CheckIncludeOrderInFile(
mock_input_api, mock_file, range(1, len(contents) + 1))
self.assertEqual(0, len(warnings))
def testCheckOnlyCFiles(self):
mock_input_api = MockInputApi()
mock_output_api = MockOutputApi()
contents = ['#include <b.h>',
'#include <a.h>']
mock_file_cc = MockFile('something.cc', contents)
mock_file_h = MockFile('something.h', contents)
mock_file_other = MockFile('something.py', contents)
mock_input_api.files = [mock_file_cc, mock_file_h, mock_file_other]
warnings = PRESUBMIT._CheckIncludeOrder(mock_input_api, mock_output_api)
self.assertEqual(1, len(warnings))
self.assertEqual(2, len(warnings[0].items))
self.assertEqual('promptOrNotify', warnings[0].type)
def testUncheckableIncludes(self):
mock_input_api = MockInputApi()
contents = ['#include <windows.h>',
'#include "b.h"',
'#include "a.h"']
mock_file = MockFile('', contents)
warnings = PRESUBMIT._CheckIncludeOrderInFile(
mock_input_api, mock_file, range(1, len(contents) + 1))
self.assertEqual(1, len(warnings))
contents = ['#include "gpu/command_buffer/gles_autogen.h"',
'#include "b.h"',
'#include "a.h"']
mock_file = MockFile('', contents)
warnings = PRESUBMIT._CheckIncludeOrderInFile(
mock_input_api, mock_file, range(1, len(contents) + 1))
self.assertEqual(1, len(warnings))
contents = ['#include "gl_mock_autogen.h"',
'#include "b.h"',
'#include "a.h"']
mock_file = MockFile('', contents)
warnings = PRESUBMIT._CheckIncludeOrderInFile(
mock_input_api, mock_file, range(1, len(contents) + 1))
self.assertEqual(1, len(warnings))
contents = ['#include "ipc/some_macros.h"',
'#include "b.h"',
'#include "a.h"']
mock_file = MockFile('', contents)
warnings = PRESUBMIT._CheckIncludeOrderInFile(
mock_input_api, mock_file, range(1, len(contents) + 1))
self.assertEqual(1, len(warnings))
class VersionControlConflictsTest(unittest.TestCase):
def testTypicalConflict(self):
lines = ['<<<<<<< HEAD',
' base::ScopedTempDir temp_dir_;',
'=======',
' ScopedTempDir temp_dir_;',
'>>>>>>> master']
errors = PRESUBMIT._CheckForVersionControlConflictsInFile(
MockInputApi(), MockFile('some/path/foo_platform.cc', lines))
self.assertEqual(3, len(errors))
self.assertTrue('1' in errors[0])
self.assertTrue('3' in errors[1])
self.assertTrue('5' in errors[2])
class UmaHistogramChangeMatchedOrNotTest(unittest.TestCase):
def testTypicalCorrectlyMatchedChange(self):
diff_cc = ['UMA_HISTOGRAM_BOOL("Bla.Foo.Dummy", true)']
diff_xml = ['<histogram name="Bla.Foo.Dummy"> </histogram>']
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('some/path/foo.cc', diff_cc),
MockFile('tools/metrics/histograms/histograms.xml', diff_xml),
]
warnings = PRESUBMIT._CheckUmaHistogramChanges(mock_input_api,
MockOutputApi())
self.assertEqual(0, len(warnings))
def testTypicalNotMatchedChange(self):
diff_cc = ['UMA_HISTOGRAM_BOOL("Bla.Foo.Dummy", true)']
mock_input_api = MockInputApi()
mock_input_api.files = [MockFile('some/path/foo.cc', diff_cc)]
warnings = PRESUBMIT._CheckUmaHistogramChanges(mock_input_api,
MockOutputApi())
self.assertEqual(1, len(warnings))
self.assertEqual('warning', warnings[0].type)
def testTypicalNotMatchedChangeViaSuffixes(self):
diff_cc = ['UMA_HISTOGRAM_BOOL("Bla.Foo.Dummy", true)']
diff_xml = ['<histogram_suffixes name="SuperHistogram">',
' <suffix name="Dummy"/>',
' <affected-histogram name="Snafu.Dummy"/>',
'</histogram>']
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('some/path/foo.cc', diff_cc),
MockFile('tools/metrics/histograms/histograms.xml', diff_xml),
]
warnings = PRESUBMIT._CheckUmaHistogramChanges(mock_input_api,
MockOutputApi())
self.assertEqual(1, len(warnings))
self.assertEqual('warning', warnings[0].type)
def testTypicalCorrectlyMatchedChangeViaSuffixes(self):
diff_cc = ['UMA_HISTOGRAM_BOOL("Bla.Foo.Dummy", true)']
diff_xml = ['<histogram_suffixes name="SuperHistogram">',
' <suffix name="Dummy"/>',
' <affected-histogram name="Bla.Foo"/>',
'</histogram>']
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('some/path/foo.cc', diff_cc),
MockFile('tools/metrics/histograms/histograms.xml', diff_xml),
]
warnings = PRESUBMIT._CheckUmaHistogramChanges(mock_input_api,
MockOutputApi())
self.assertEqual(0, len(warnings))
def testTypicalCorrectlyMatchedChangeViaSuffixesWithSeparator(self):
diff_cc = ['UMA_HISTOGRAM_BOOL("Snafu_Dummy", true)']
diff_xml = ['<histogram_suffixes name="SuperHistogram" separator="_">',
' <suffix name="Dummy"/>',
' <affected-histogram name="Snafu"/>',
'</histogram>']
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('some/path/foo.cc', diff_cc),
MockFile('tools/metrics/histograms/histograms.xml', diff_xml),
]
warnings = PRESUBMIT._CheckUmaHistogramChanges(mock_input_api,
MockOutputApi())
self.assertEqual(0, len(warnings))
class BadExtensionsTest(unittest.TestCase):
def testBadRejFile(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('some/path/foo.cc', ''),
MockFile('some/path/foo.cc.rej', ''),
MockFile('some/path2/bar.h.rej', ''),
]
results = PRESUBMIT._CheckPatchFiles(mock_input_api, MockOutputApi())
self.assertEqual(1, len(results))
self.assertEqual(2, len(results[0].items))
self.assertTrue('foo.cc.rej' in results[0].items[0])
self.assertTrue('bar.h.rej' in results[0].items[1])
def testBadOrigFile(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('other/path/qux.h.orig', ''),
MockFile('other/path/qux.h', ''),
MockFile('other/path/qux.cc', ''),
]
results = PRESUBMIT._CheckPatchFiles(mock_input_api, MockOutputApi())
self.assertEqual(1, len(results))
self.assertEqual(1, len(results[0].items))
self.assertTrue('qux.h.orig' in results[0].items[0])
def testGoodFiles(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('other/path/qux.h', ''),
MockFile('other/path/qux.cc', ''),
]
results = PRESUBMIT._CheckPatchFiles(mock_input_api, MockOutputApi())
self.assertEqual(0, len(results))
def testOnlyOwnersFiles(self):
mock_change = MockChange([
'some/path/OWNERS',
'A\Windows\Path\OWNERS',
])
results = PRESUBMIT.GetPreferredTryMasters(None, mock_change)
self.assertEqual({}, results)
class CheckSingletonInHeadersTest(unittest.TestCase):
def testSingletonInArbitraryHeader(self):
diff_singleton_h = ['base::subtle::AtomicWord '
'Singleton<Type, Traits, DifferentiatingType>::']
diff_foo_h = ['// Singleton<Foo> in comment.',
'friend class Singleton<Foo>']
diff_bad_h = ['Foo* foo = Singleton<Foo>::get();']
mock_input_api = MockInputApi()
mock_input_api.files = [MockAffectedFile('base/memory/singleton.h',
diff_singleton_h),
MockAffectedFile('foo.h', diff_foo_h),
MockAffectedFile('bad.h', diff_bad_h)]
warnings = PRESUBMIT._CheckSingletonInHeaders(mock_input_api,
MockOutputApi())
self.assertEqual(1, len(warnings))
self.assertEqual('error', warnings[0].type)
self.assertTrue('Found Singleton<T>' in warnings[0].message)
def testSingletonInCC(self):
diff_cc = ['Foo* foo = Singleton<Foo>::get();']
mock_input_api = MockInputApi()
mock_input_api.files = [MockAffectedFile('some/path/foo.cc', diff_cc)]
warnings = PRESUBMIT._CheckSingletonInHeaders(mock_input_api,
MockOutputApi())
self.assertEqual(0, len(warnings))
class InvalidOSMacroNamesTest(unittest.TestCase):
def testInvalidOSMacroNames(self):
lines = ['#if defined(OS_WINDOWS)',
' #elif defined(OS_WINDOW)',
' # if defined(OS_MACOSX) || defined(OS_CHROME)',
'# else // defined(OS_MAC)',
'#endif // defined(OS_MACOS)']
errors = PRESUBMIT._CheckForInvalidOSMacrosInFile(
MockInputApi(), MockFile('some/path/foo_platform.cc', lines))
self.assertEqual(len(lines), len(errors))
self.assertTrue(':1 OS_WINDOWS' in errors[0])
self.assertTrue('(did you mean OS_WIN?)' in errors[0])
def testValidOSMacroNames(self):
lines = ['#if defined(%s)' % m for m in PRESUBMIT._VALID_OS_MACROS]
errors = PRESUBMIT._CheckForInvalidOSMacrosInFile(
MockInputApi(), MockFile('some/path/foo_platform.cc', lines))
self.assertEqual(0, len(errors))
class InvalidIfDefinedMacroNamesTest(unittest.TestCase):
def testInvalidIfDefinedMacroNames(self):
lines = ['#if defined(TARGET_IPHONE_SIMULATOR)',
'#if !defined(TARGET_IPHONE_SIMULATOR)',
'#elif defined(TARGET_IPHONE_SIMULATOR)',
'#ifdef TARGET_IPHONE_SIMULATOR',
' # ifdef TARGET_IPHONE_SIMULATOR',
'# if defined(VALID) || defined(TARGET_IPHONE_SIMULATOR)',
'# else // defined(TARGET_IPHONE_SIMULATOR)',
'#endif // defined(TARGET_IPHONE_SIMULATOR)',]
errors = PRESUBMIT._CheckForInvalidIfDefinedMacrosInFile(
MockInputApi(), MockFile('some/path/source.mm', lines))
self.assertEqual(len(lines), len(errors))
def testValidIfDefinedMacroNames(self):
lines = ['#if defined(FOO)',
'#ifdef BAR',]
errors = PRESUBMIT._CheckForInvalidIfDefinedMacrosInFile(
MockInputApi(), MockFile('some/path/source.cc', lines))
self.assertEqual(0, len(errors))
class CheckAddedDepsHaveTetsApprovalsTest(unittest.TestCase):
def testFilesToCheckForIncomingDeps(self):
changed_lines = [
'"+breakpad",',
'"+chrome/installer",',
'"+chrome/plugin/chrome_content_plugin_client.h",',
'"+chrome/utility/chrome_content_utility_client.h",',
'"+chromeos/chromeos_paths.h",',
'"+components/crash",',
'"+components/nacl/common",',
'"+content/public/browser/render_process_host.h",',
'"+jni/fooblat.h",',
'"+grit", # For generated headers',
'"+grit/generated_resources.h",',
'"+grit/",',
'"+policy", # For generated headers and source',
'"+sandbox",',
'"+tools/memory_watcher",',
'"+third_party/lss/linux_syscall_support.h",',
]
files_to_check = PRESUBMIT._FilesToCheckForIncomingDeps(re, changed_lines)
expected = set([
'breakpad/DEPS',
'chrome/installer/DEPS',
'chrome/plugin/chrome_content_plugin_client.h',
'chrome/utility/chrome_content_utility_client.h',
'chromeos/chromeos_paths.h',
'components/crash/DEPS',
'components/nacl/common/DEPS',
'content/public/browser/render_process_host.h',
'policy/DEPS',
'sandbox/DEPS',
'tools/memory_watcher/DEPS',
'third_party/lss/linux_syscall_support.h',
])
self.assertEqual(expected, files_to_check);
class JSONParsingTest(unittest.TestCase):
def testSuccess(self):
input_api = MockInputApi()
filename = 'valid_json.json'
contents = ['// This is a comment.',
'{',
' "key1": ["value1", "value2"],',
' "key2": 3 // This is an inline comment.',
'}'
]
input_api.files = [MockFile(filename, contents)]
self.assertEqual(None,
PRESUBMIT._GetJSONParseError(input_api, filename))
def testFailure(self):
input_api = MockInputApi()
test_data = [
('invalid_json_1.json',
['{ x }'],
'Expecting property name:'),
('invalid_json_2.json',
['// Hello world!',
'{ "hello": "world }'],
'Unterminated string starting at:'),
('invalid_json_3.json',
['{ "a": "b", "c": "d", }'],
'Expecting property name:'),
('invalid_json_4.json',
['{ "a": "b" "c": "d" }'],
'Expecting , delimiter:'),
]
input_api.files = [MockFile(filename, contents)
for (filename, contents, _) in test_data]
for (filename, _, expected_error) in test_data:
actual_error = PRESUBMIT._GetJSONParseError(input_api, filename)
self.assertTrue(expected_error in str(actual_error),
"'%s' not found in '%s'" % (expected_error, actual_error))
def testNoEatComments(self):
input_api = MockInputApi()
file_with_comments = 'file_with_comments.json'
contents_with_comments = ['// This is a comment.',
'{',
' "key1": ["value1", "value2"],',
' "key2": 3 // This is an inline comment.',
'}'
]
file_without_comments = 'file_without_comments.json'
contents_without_comments = ['{',
' "key1": ["value1", "value2"],',
' "key2": 3',
'}'
]
input_api.files = [MockFile(file_with_comments, contents_with_comments),
MockFile(file_without_comments,
contents_without_comments)]
self.assertEqual('No JSON object could be decoded',
str(PRESUBMIT._GetJSONParseError(input_api,
file_with_comments,
eat_comments=False)))
self.assertEqual(None,
PRESUBMIT._GetJSONParseError(input_api,
file_without_comments,
eat_comments=False))
class IDLParsingTest(unittest.TestCase):
def testSuccess(self):
input_api = MockInputApi()
filename = 'valid_idl_basics.idl'
contents = ['// Tests a valid IDL file.',
'namespace idl_basics {',
' enum EnumType {',
' name1,',
' name2',
' };',
'',
' dictionary MyType1 {',
' DOMString a;',
' };',
'',
' callback Callback1 = void();',
' callback Callback2 = void(long x);',
' callback Callback3 = void(MyType1 arg);',
' callback Callback4 = void(EnumType type);',
'',
' interface Functions {',
' static void function1();',
' static void function2(long x);',
' static void function3(MyType1 arg);',
' static void function4(Callback1 cb);',
' static void function5(Callback2 cb);',
' static void function6(Callback3 cb);',
' static void function7(Callback4 cb);',
' };',
'',
' interface Events {',
' static void onFoo1();',
' static void onFoo2(long x);',
' static void onFoo2(MyType1 arg);',
' static void onFoo3(EnumType type);',
' };',
'};'
]
input_api.files = [MockFile(filename, contents)]
self.assertEqual(None,
PRESUBMIT._GetIDLParseError(input_api, filename))
def testFailure(self):
input_api = MockInputApi()
test_data = [
('invalid_idl_1.idl',
['//',
'namespace test {',
' dictionary {',
' DOMString s;',
' };',
'};'],
'Unexpected "{" after keyword "dictionary".\n'),
# TODO(yoz): Disabled because it causes the IDL parser to hang.
# See crbug.com/363830.
# ('invalid_idl_2.idl',
# (['namespace test {',
# ' dictionary MissingSemicolon {',
# ' DOMString a',
# ' DOMString b;',
# ' };',
# '};'],
# 'Unexpected symbol DOMString after symbol a.'),
('invalid_idl_3.idl',
['//',
'namespace test {',
' enum MissingComma {',
' name1',
' name2',
' };',
'};'],
'Unexpected symbol name2 after symbol name1.'),
('invalid_idl_4.idl',
['//',
'namespace test {',
' enum TrailingComma {',
' name1,',
' name2,',
' };',
'};'],
'Trailing comma in block.'),
('invalid_idl_5.idl',
['//',
'namespace test {',
' callback Callback1 = void(;',
'};'],
'Unexpected ";" after "(".'),
('invalid_idl_6.idl',
['//',
'namespace test {',
' callback Callback1 = void(long );',
'};'],
'Unexpected ")" after symbol long.'),
('invalid_idl_7.idl',
['//',
'namespace test {',
' interace Events {',
' static void onFoo1();',
' };',
'};'],
'Unexpected symbol Events after symbol interace.'),
('invalid_idl_8.idl',
['//',
'namespace test {',
' interface NotEvent {',
' static void onFoo1();',
' };',
'};'],
'Did not process Interface Interface(NotEvent)'),
('invalid_idl_9.idl',
['//',
'namespace test {',
' interface {',
' static void function1();',
' };',
'};'],
'Interface missing name.'),
]
input_api.files = [MockFile(filename, contents)
for (filename, contents, _) in test_data]
for (filename, _, expected_error) in test_data:
actual_error = PRESUBMIT._GetIDLParseError(input_api, filename)
self.assertTrue(expected_error in str(actual_error),
"'%s' not found in '%s'" % (expected_error, actual_error))
class TryServerMasterTest(unittest.TestCase):
def testTryServerMasters(self):
bots = {
'tryserver.chromium.mac': [
'ios_dbg_simulator',
'ios_rel_device',
'ios_rel_device_ninja',
'mac_asan',
'mac_asan_64',
'mac_chromium_compile_dbg',
'mac_chromium_compile_rel',
'mac_chromium_dbg',
'mac_chromium_rel',
'mac_nacl_sdk',
'mac_nacl_sdk_build',
'mac_rel_naclmore',
'mac_valgrind',
'mac_x64_rel',
'mac_xcodebuild',
],
'tryserver.chromium.linux': [
'android_aosp',
'android_chromium_gn_compile_dbg',
'android_chromium_gn_compile_rel',
'android_clang_dbg',
'android_dbg',
'android_dbg_recipe',
'android_dbg_triggered_tests',
'android_dbg_triggered_tests_recipe',
'android_fyi_dbg',
'android_fyi_dbg_triggered_tests',
'android_rel',
'android_rel_triggered_tests',
'android_x86_dbg',
'blink_android_compile_dbg',
'blink_android_compile_rel',
'blink_presubmit',
'chromium_presubmit',
'linux_arm_cross_compile',
'linux_arm_tester',
'linux_chromeos_asan',
'linux_chromeos_browser_asan',
'linux_chromeos_valgrind',
'linux_chromium_chromeos_dbg',
'linux_chromium_chromeos_rel',
'linux_chromium_compile_dbg',
'linux_chromium_compile_rel',
'linux_chromium_dbg',
'linux_chromium_gn_dbg',
'linux_chromium_gn_rel',
'linux_chromium_rel',
'linux_chromium_trusty32_dbg',
'linux_chromium_trusty32_rel',
'linux_chromium_trusty_dbg',
'linux_chromium_trusty_rel',
'linux_clang_tsan',
'linux_ecs_ozone',
'linux_layout',
'linux_layout_asan',
'linux_layout_rel',
'linux_layout_rel_32',
'linux_nacl_sdk',
'linux_nacl_sdk_bionic',
'linux_nacl_sdk_bionic_build',
'linux_nacl_sdk_build',
'linux_redux',
'linux_rel_naclmore',
'linux_rel_precise32',
'linux_valgrind',
'tools_build_presubmit',
],
'tryserver.chromium.win': [
'win8_aura',
'win8_chromium_dbg',
'win8_chromium_rel',
'win_chromium_compile_dbg',
'win_chromium_compile_rel',
'win_chromium_dbg',
'win_chromium_rel',
'win_chromium_rel',
'win_chromium_x64_dbg',
'win_chromium_x64_rel',
'win_drmemory',
'win_nacl_sdk',
'win_nacl_sdk_build',
'win_rel_naclmore',
],
}
for master, bots in bots.iteritems():
for bot in bots:
self.assertEqual(master, PRESUBMIT.GetTryServerMasterForBot(bot),
'bot=%s: expected %s, computed %s' % (
bot, master, PRESUBMIT.GetTryServerMasterForBot(bot)))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 4,928,798,008,609,115,000 | 37.377382 | 80 | 0.55521 | false |
NINAnor/QGIS | python/plugins/GdalTools/tools/doSieve.py | 12 | 4153 | # -*- coding: utf-8 -*-
"""
***************************************************************************
doSieve.py
---------------------
Date : June 2010
Copyright : (C) 2010 by Giuseppe Sucameli
Email : brush dot tyler at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Giuseppe Sucameli'
__date__ = 'June 2010'
__copyright__ = '(C) 2010, Giuseppe Sucameli'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import SIGNAL
from PyQt4.QtGui import QWidget
from ui_widgetSieve import Ui_GdalToolsWidget as Ui_Widget
from widgetPluginBase import GdalToolsBasePluginWidget as BasePluginWidget
import GdalTools_utils as Utils
class GdalToolsDialog(QWidget, Ui_Widget, BasePluginWidget):
def __init__(self, iface):
QWidget.__init__(self)
self.iface = iface
self.setupUi(self)
BasePluginWidget.__init__(self, self.iface, "gdal_sieve.py")
self.outSelector.setType(self.outSelector.FILE)
self.outputFormat = Utils.fillRasterOutputFormat()
self.setParamsStatus([
(self.inSelector, SIGNAL("filenameChanged()")),
(self.outSelector, SIGNAL("filenameChanged()")),
(self.thresholdSpin, SIGNAL("valueChanged(int)"), self.thresholdCheck),
(self.connectionsCombo, SIGNAL("currentIndexChanged(int)"), self.connectionsCheck)
])
self.connect(self.inSelector, SIGNAL("selectClicked()"), self.fillInputFileEdit)
self.connect(self.outSelector, SIGNAL("selectClicked()"), self.fillOutputFileEdit)
def onLayersChanged(self):
self.inSelector.setLayers(Utils.LayerRegistry.instance().getRasterLayers())
def fillInputFileEdit(self):
lastUsedFilter = Utils.FileFilter.lastUsedRasterFilter()
inputFile = Utils.FileDialog.getOpenFileName(self, self.tr("Select the input file for Sieve"), Utils.FileFilter.allRastersFilter(), lastUsedFilter)
if not inputFile:
return
Utils.FileFilter.setLastUsedRasterFilter(lastUsedFilter)
self.inSelector.setFilename(inputFile)
def fillOutputFileEdit(self):
lastUsedFilter = Utils.FileFilter.lastUsedRasterFilter()
outputFile = Utils.FileDialog.getSaveFileName(self, self.tr("Select the raster file to save the results to"), Utils.FileFilter.saveRastersFilter(), lastUsedFilter)
if not outputFile:
return
Utils.FileFilter.setLastUsedRasterFilter(lastUsedFilter)
self.outputFormat = Utils.fillRasterOutputFormat(lastUsedFilter, outputFile)
self.outSelector.setFilename(outputFile)
def getArguments(self):
arguments = []
if self.thresholdCheck.isChecked():
arguments.append("-st")
arguments.append(unicode(self.thresholdSpin.value()))
if self.connectionsCheck.isChecked() and self.connectionsCombo.currentIndex() >= 0:
arguments.append("-" + self.connectionsCombo.currentText())
outputFn = self.getOutputFileName()
if outputFn:
arguments.append("-of")
arguments.append(self.outputFormat)
arguments.append(self.getInputFileName())
arguments.append(outputFn)
return arguments
def getOutputFileName(self):
return self.outSelector.filename()
def getInputFileName(self):
return self.inSelector.filename()
def addLayerIntoCanvas(self, fileInfo):
self.iface.addRasterLayer(fileInfo.filePath())
| gpl-2.0 | 4,575,895,126,424,551,000 | 40.53 | 171 | 0.610643 | false |
azumimuo/family-xbmc-addon | plugin.video.dragon.sports/lib/utils/github/AuthenticatedUser.py | 7 | 27215 | # -*- coding: utf-8 -*-
# Copyright 2012 Vincent Jacques
# [email protected]
# This file is part of PyGithub. http://vincent-jacques.net/PyGithub
# PyGithub is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License along with PyGithub. If not, see <http://www.gnu.org/licenses/>.
import GithubObject
import PaginatedList
import InputFileContent
import Gist
import Repository
import NamedUser
import Plan
import Organization
import UserKey
import Issue
import Event
import Authorization
class AuthenticatedUser(GithubObject.GithubObject):
@property
def avatar_url(self):
self._completeIfNotSet(self._avatar_url)
return self._NoneIfNotSet(self._avatar_url)
@property
def bio(self):
self._completeIfNotSet(self._bio)
return self._NoneIfNotSet(self._bio)
@property
def blog(self):
self._completeIfNotSet(self._blog)
return self._NoneIfNotSet(self._blog)
@property
def collaborators(self):
self._completeIfNotSet(self._collaborators)
return self._NoneIfNotSet(self._collaborators)
@property
def company(self):
self._completeIfNotSet(self._company)
return self._NoneIfNotSet(self._company)
@property
def created_at(self):
self._completeIfNotSet(self._created_at)
return self._NoneIfNotSet(self._created_at)
@property
def disk_usage(self):
self._completeIfNotSet(self._disk_usage)
return self._NoneIfNotSet(self._disk_usage)
@property
def email(self):
self._completeIfNotSet(self._email)
return self._NoneIfNotSet(self._email)
@property
def followers(self):
self._completeIfNotSet(self._followers)
return self._NoneIfNotSet(self._followers)
@property
def following(self):
self._completeIfNotSet(self._following)
return self._NoneIfNotSet(self._following)
@property
def gravatar_id(self):
self._completeIfNotSet(self._gravatar_id)
return self._NoneIfNotSet(self._gravatar_id)
@property
def hireable(self):
self._completeIfNotSet(self._hireable)
return self._NoneIfNotSet(self._hireable)
@property
def html_url(self):
self._completeIfNotSet(self._html_url)
return self._NoneIfNotSet(self._html_url)
@property
def id(self):
self._completeIfNotSet(self._id)
return self._NoneIfNotSet(self._id)
@property
def location(self):
self._completeIfNotSet(self._location)
return self._NoneIfNotSet(self._location)
@property
def login(self):
self._completeIfNotSet(self._login)
return self._NoneIfNotSet(self._login)
@property
def name(self):
self._completeIfNotSet(self._name)
return self._NoneIfNotSet(self._name)
@property
def owned_private_repos(self):
self._completeIfNotSet(self._owned_private_repos)
return self._NoneIfNotSet(self._owned_private_repos)
@property
def plan(self):
self._completeIfNotSet(self._plan)
return self._NoneIfNotSet(self._plan)
@property
def private_gists(self):
self._completeIfNotSet(self._private_gists)
return self._NoneIfNotSet(self._private_gists)
@property
def public_gists(self):
self._completeIfNotSet(self._public_gists)
return self._NoneIfNotSet(self._public_gists)
@property
def public_repos(self):
self._completeIfNotSet(self._public_repos)
return self._NoneIfNotSet(self._public_repos)
@property
def total_private_repos(self):
self._completeIfNotSet(self._total_private_repos)
return self._NoneIfNotSet(self._total_private_repos)
@property
def type(self):
self._completeIfNotSet(self._type)
return self._NoneIfNotSet(self._type)
@property
def url(self):
self._completeIfNotSet(self._url)
return self._NoneIfNotSet(self._url)
def add_to_emails(self, *emails):
assert all(isinstance(element, (str, unicode)) for element in emails), emails
post_parameters = emails
headers, data = self._requester.requestAndCheck(
"POST",
"/user/emails",
None,
post_parameters
)
def add_to_following(self, following):
assert isinstance(following, NamedUser.NamedUser), following
headers, data = self._requester.requestAndCheck(
"PUT",
"/user/following/" + following._identity,
None,
None
)
def add_to_starred(self, starred):
assert isinstance(starred, Repository.Repository), starred
headers, data = self._requester.requestAndCheck(
"PUT",
"/user/starred/" + starred._identity,
None,
None
)
def add_to_subscriptions(self, subscription):
assert isinstance(subscription, Repository.Repository), subscription
headers, data = self._requester.requestAndCheck(
"PUT",
"/user/subscriptions/" + subscription._identity,
None,
None
)
def add_to_watched(self, watched):
assert isinstance(watched, Repository.Repository), watched
headers, data = self._requester.requestAndCheck(
"PUT",
"/user/watched/" + watched._identity,
None,
None
)
def create_authorization(self, scopes=GithubObject.NotSet, note=GithubObject.NotSet, note_url=GithubObject.NotSet):
assert scopes is GithubObject.NotSet or all(isinstance(element, (str, unicode)) for element in scopes), scopes
assert note is GithubObject.NotSet or isinstance(note, (str, unicode)), note
assert note_url is GithubObject.NotSet or isinstance(note_url, (str, unicode)), note_url
post_parameters = dict()
if scopes is not GithubObject.NotSet:
post_parameters["scopes"] = scopes
if note is not GithubObject.NotSet:
post_parameters["note"] = note
if note_url is not GithubObject.NotSet:
post_parameters["note_url"] = note_url
headers, data = self._requester.requestAndCheck(
"POST",
"/authorizations",
None,
post_parameters
)
return Authorization.Authorization(self._requester, data, completed=True)
def create_fork(self, repo):
assert isinstance(repo, Repository.Repository), repo
headers, data = self._requester.requestAndCheck(
"POST",
"/repos/" + repo.owner.login + "/" + repo.name + "/forks",
None,
None
)
return Repository.Repository(self._requester, data, completed=True)
def create_gist(self, public, files, description=GithubObject.NotSet):
assert isinstance(public, bool), public
assert all(isinstance(element, InputFileContent.InputFileContent) for element in files.itervalues()), files
assert description is GithubObject.NotSet or isinstance(description, (str, unicode)), description
post_parameters = {
"public": public,
"files": dict((key, value._identity) for key, value in files.iteritems()),
}
if description is not GithubObject.NotSet:
post_parameters["description"] = description
headers, data = self._requester.requestAndCheck(
"POST",
"/gists",
None,
post_parameters
)
return Gist.Gist(self._requester, data, completed=True)
def create_key(self, title, key):
assert isinstance(title, (str, unicode)), title
assert isinstance(key, (str, unicode)), key
post_parameters = {
"title": title,
"key": key,
}
headers, data = self._requester.requestAndCheck(
"POST",
"/user/keys",
None,
post_parameters
)
return UserKey.UserKey(self._requester, data, completed=True)
def create_repo(self, name, description=GithubObject.NotSet, homepage=GithubObject.NotSet, private=GithubObject.NotSet, has_issues=GithubObject.NotSet, has_wiki=GithubObject.NotSet, has_downloads=GithubObject.NotSet, auto_init=GithubObject.NotSet, gitignore_template=GithubObject.NotSet):
assert isinstance(name, (str, unicode)), name
assert description is GithubObject.NotSet or isinstance(description, (str, unicode)), description
assert homepage is GithubObject.NotSet or isinstance(homepage, (str, unicode)), homepage
assert private is GithubObject.NotSet or isinstance(private, bool), private
assert has_issues is GithubObject.NotSet or isinstance(has_issues, bool), has_issues
assert has_wiki is GithubObject.NotSet or isinstance(has_wiki, bool), has_wiki
assert has_downloads is GithubObject.NotSet or isinstance(has_downloads, bool), has_downloads
assert auto_init is GithubObject.NotSet or isinstance(auto_init, bool), auto_init
assert gitignore_template is GithubObject.NotSet or isinstance(gitignore_template, (str, unicode)), gitignore_template
post_parameters = {
"name": name,
}
if description is not GithubObject.NotSet:
post_parameters["description"] = description
if homepage is not GithubObject.NotSet:
post_parameters["homepage"] = homepage
if private is not GithubObject.NotSet:
post_parameters["private"] = private
if has_issues is not GithubObject.NotSet:
post_parameters["has_issues"] = has_issues
if has_wiki is not GithubObject.NotSet:
post_parameters["has_wiki"] = has_wiki
if has_downloads is not GithubObject.NotSet:
post_parameters["has_downloads"] = has_downloads
if auto_init is not GithubObject.NotSet:
post_parameters["auto_init"] = auto_init
if gitignore_template is not GithubObject.NotSet:
post_parameters["gitignore_template"] = gitignore_template
headers, data = self._requester.requestAndCheck(
"POST",
"/user/repos",
None,
post_parameters
)
return Repository.Repository(self._requester, data, completed=True)
def edit(self, name=GithubObject.NotSet, email=GithubObject.NotSet, blog=GithubObject.NotSet, company=GithubObject.NotSet, location=GithubObject.NotSet, hireable=GithubObject.NotSet, bio=GithubObject.NotSet):
assert name is GithubObject.NotSet or isinstance(name, (str, unicode)), name
assert email is GithubObject.NotSet or isinstance(email, (str, unicode)), email
assert blog is GithubObject.NotSet or isinstance(blog, (str, unicode)), blog
assert company is GithubObject.NotSet or isinstance(company, (str, unicode)), company
assert location is GithubObject.NotSet or isinstance(location, (str, unicode)), location
assert hireable is GithubObject.NotSet or isinstance(hireable, bool), hireable
assert bio is GithubObject.NotSet or isinstance(bio, (str, unicode)), bio
post_parameters = dict()
if name is not GithubObject.NotSet:
post_parameters["name"] = name
if email is not GithubObject.NotSet:
post_parameters["email"] = email
if blog is not GithubObject.NotSet:
post_parameters["blog"] = blog
if company is not GithubObject.NotSet:
post_parameters["company"] = company
if location is not GithubObject.NotSet:
post_parameters["location"] = location
if hireable is not GithubObject.NotSet:
post_parameters["hireable"] = hireable
if bio is not GithubObject.NotSet:
post_parameters["bio"] = bio
headers, data = self._requester.requestAndCheck(
"PATCH",
"/user",
None,
post_parameters
)
self._useAttributes(data)
def get_authorization(self, id):
assert isinstance(id, (int, long)), id
headers, data = self._requester.requestAndCheck(
"GET",
"/authorizations/" + str(id),
None,
None
)
return Authorization.Authorization(self._requester, data, completed=True)
def get_authorizations(self):
return PaginatedList.PaginatedList(
Authorization.Authorization,
self._requester,
"/authorizations",
None
)
def get_emails(self):
headers, data = self._requester.requestAndCheck(
"GET",
"/user/emails",
None,
None
)
return data
def get_events(self):
return PaginatedList.PaginatedList(
Event.Event,
self._requester,
"/events",
None
)
def get_followers(self):
return PaginatedList.PaginatedList(
NamedUser.NamedUser,
self._requester,
"/user/followers",
None
)
def get_following(self):
return PaginatedList.PaginatedList(
NamedUser.NamedUser,
self._requester,
"/user/following",
None
)
def get_gists(self):
return PaginatedList.PaginatedList(
Gist.Gist,
self._requester,
"/gists",
None
)
def get_issues(self):
return PaginatedList.PaginatedList(
Issue.Issue,
self._requester,
"/issues",
None
)
def get_key(self, id):
assert isinstance(id, (int, long)), id
headers, data = self._requester.requestAndCheck(
"GET",
"/user/keys/" + str(id),
None,
None
)
return UserKey.UserKey(self._requester, data, completed=True)
def get_keys(self):
return PaginatedList.PaginatedList(
UserKey.UserKey,
self._requester,
"/user/keys",
None
)
def get_organization_events(self, org):
assert isinstance(org, Organization.Organization), org
return PaginatedList.PaginatedList(
Event.Event,
self._requester,
"/users/" + self.login + "/events/orgs/" + org.login,
None
)
def get_orgs(self):
return PaginatedList.PaginatedList(
Organization.Organization,
self._requester,
"/user/orgs",
None
)
def get_repo(self, name):
assert isinstance(name, (str, unicode)), name
headers, data = self._requester.requestAndCheck(
"GET",
"/repos/" + self.login + "/" + name,
None,
None
)
return Repository.Repository(self._requester, data, completed=True)
def get_repos(self, type=GithubObject.NotSet, sort=GithubObject.NotSet, direction=GithubObject.NotSet):
assert type is GithubObject.NotSet or isinstance(type, (str, unicode)), type
assert sort is GithubObject.NotSet or isinstance(sort, (str, unicode)), sort
assert direction is GithubObject.NotSet or isinstance(direction, (str, unicode)), direction
url_parameters = dict()
if type is not GithubObject.NotSet:
url_parameters["type"] = type
if sort is not GithubObject.NotSet:
url_parameters["sort"] = sort
if direction is not GithubObject.NotSet:
url_parameters["direction"] = direction
return PaginatedList.PaginatedList(
Repository.Repository,
self._requester,
"/user/repos",
url_parameters
)
def get_starred(self):
return PaginatedList.PaginatedList(
Repository.Repository,
self._requester,
"/user/starred",
None
)
def get_starred_gists(self):
return PaginatedList.PaginatedList(
Gist.Gist,
self._requester,
"/gists/starred",
None
)
def get_subscriptions(self):
return PaginatedList.PaginatedList(
Repository.Repository,
self._requester,
"/user/subscriptions",
None
)
def get_watched(self):
return PaginatedList.PaginatedList(
Repository.Repository,
self._requester,
"/user/watched",
None
)
def has_in_following(self, following):
assert isinstance(following, NamedUser.NamedUser), following
status, headers, data = self._requester.requestRaw(
"GET",
"/user/following/" + following._identity,
None,
None
)
return status == 204
def has_in_starred(self, starred):
assert isinstance(starred, Repository.Repository), starred
status, headers, data = self._requester.requestRaw(
"GET",
"/user/starred/" + starred._identity,
None,
None
)
return status == 204
def has_in_subscriptions(self, subscription):
assert isinstance(subscription, Repository.Repository), subscription
status, headers, data = self._requester.requestRaw(
"GET",
"/user/subscriptions/" + subscription._identity,
None,
None
)
return status == 204
def has_in_watched(self, watched):
assert isinstance(watched, Repository.Repository), watched
status, headers, data = self._requester.requestRaw(
"GET",
"/user/watched/" + watched._identity,
None,
None
)
return status == 204
def remove_from_emails(self, *emails):
assert all(isinstance(element, (str, unicode)) for element in emails), emails
post_parameters = emails
headers, data = self._requester.requestAndCheck(
"DELETE",
"/user/emails",
None,
post_parameters
)
def remove_from_following(self, following):
assert isinstance(following, NamedUser.NamedUser), following
headers, data = self._requester.requestAndCheck(
"DELETE",
"/user/following/" + following._identity,
None,
None
)
def remove_from_starred(self, starred):
assert isinstance(starred, Repository.Repository), starred
headers, data = self._requester.requestAndCheck(
"DELETE",
"/user/starred/" + starred._identity,
None,
None
)
def remove_from_subscriptions(self, subscription):
assert isinstance(subscription, Repository.Repository), subscription
headers, data = self._requester.requestAndCheck(
"DELETE",
"/user/subscriptions/" + subscription._identity,
None,
None
)
def remove_from_watched(self, watched):
assert isinstance(watched, Repository.Repository), watched
headers, data = self._requester.requestAndCheck(
"DELETE",
"/user/watched/" + watched._identity,
None,
None
)
def _initAttributes(self):
self._avatar_url = GithubObject.NotSet
self._bio = GithubObject.NotSet
self._blog = GithubObject.NotSet
self._collaborators = GithubObject.NotSet
self._company = GithubObject.NotSet
self._created_at = GithubObject.NotSet
self._disk_usage = GithubObject.NotSet
self._email = GithubObject.NotSet
self._followers = GithubObject.NotSet
self._following = GithubObject.NotSet
self._gravatar_id = GithubObject.NotSet
self._hireable = GithubObject.NotSet
self._html_url = GithubObject.NotSet
self._id = GithubObject.NotSet
self._location = GithubObject.NotSet
self._login = GithubObject.NotSet
self._name = GithubObject.NotSet
self._owned_private_repos = GithubObject.NotSet
self._plan = GithubObject.NotSet
self._private_gists = GithubObject.NotSet
self._public_gists = GithubObject.NotSet
self._public_repos = GithubObject.NotSet
self._total_private_repos = GithubObject.NotSet
self._type = GithubObject.NotSet
self._url = GithubObject.NotSet
def _useAttributes(self, attributes):
if "avatar_url" in attributes: # pragma no branch
assert attributes["avatar_url"] is None or isinstance(attributes["avatar_url"], (str, unicode)), attributes["avatar_url"]
self._avatar_url = attributes["avatar_url"]
if "bio" in attributes: # pragma no branch
assert attributes["bio"] is None or isinstance(attributes["bio"], (str, unicode)), attributes["bio"]
self._bio = attributes["bio"]
if "blog" in attributes: # pragma no branch
assert attributes["blog"] is None or isinstance(attributes["blog"], (str, unicode)), attributes["blog"]
self._blog = attributes["blog"]
if "collaborators" in attributes: # pragma no branch
assert attributes["collaborators"] is None or isinstance(attributes["collaborators"], (int, long)), attributes["collaborators"]
self._collaborators = attributes["collaborators"]
if "company" in attributes: # pragma no branch
assert attributes["company"] is None or isinstance(attributes["company"], (str, unicode)), attributes["company"]
self._company = attributes["company"]
if "created_at" in attributes: # pragma no branch
assert attributes["created_at"] is None or isinstance(attributes["created_at"], (str, unicode)), attributes["created_at"]
self._created_at = self._parseDatetime(attributes["created_at"])
if "disk_usage" in attributes: # pragma no branch
assert attributes["disk_usage"] is None or isinstance(attributes["disk_usage"], (int, long)), attributes["disk_usage"]
self._disk_usage = attributes["disk_usage"]
if "email" in attributes: # pragma no branch
assert attributes["email"] is None or isinstance(attributes["email"], (str, unicode)), attributes["email"]
self._email = attributes["email"]
if "followers" in attributes: # pragma no branch
assert attributes["followers"] is None or isinstance(attributes["followers"], (int, long)), attributes["followers"]
self._followers = attributes["followers"]
if "following" in attributes: # pragma no branch
assert attributes["following"] is None or isinstance(attributes["following"], (int, long)), attributes["following"]
self._following = attributes["following"]
if "gravatar_id" in attributes: # pragma no branch
assert attributes["gravatar_id"] is None or isinstance(attributes["gravatar_id"], (str, unicode)), attributes["gravatar_id"]
self._gravatar_id = attributes["gravatar_id"]
if "hireable" in attributes: # pragma no branch
assert attributes["hireable"] is None or isinstance(attributes["hireable"], bool), attributes["hireable"]
self._hireable = attributes["hireable"]
if "html_url" in attributes: # pragma no branch
assert attributes["html_url"] is None or isinstance(attributes["html_url"], (str, unicode)), attributes["html_url"]
self._html_url = attributes["html_url"]
if "id" in attributes: # pragma no branch
assert attributes["id"] is None or isinstance(attributes["id"], (int, long)), attributes["id"]
self._id = attributes["id"]
if "location" in attributes: # pragma no branch
assert attributes["location"] is None or isinstance(attributes["location"], (str, unicode)), attributes["location"]
self._location = attributes["location"]
if "login" in attributes: # pragma no branch
assert attributes["login"] is None or isinstance(attributes["login"], (str, unicode)), attributes["login"]
self._login = attributes["login"]
if "name" in attributes: # pragma no branch
assert attributes["name"] is None or isinstance(attributes["name"], (str, unicode)), attributes["name"]
self._name = attributes["name"]
if "owned_private_repos" in attributes: # pragma no branch
assert attributes["owned_private_repos"] is None or isinstance(attributes["owned_private_repos"], (int, long)), attributes["owned_private_repos"]
self._owned_private_repos = attributes["owned_private_repos"]
if "plan" in attributes: # pragma no branch
assert attributes["plan"] is None or isinstance(attributes["plan"], dict), attributes["plan"]
self._plan = None if attributes["plan"] is None else Plan.Plan(self._requester, attributes["plan"], completed=False)
if "private_gists" in attributes: # pragma no branch
assert attributes["private_gists"] is None or isinstance(attributes["private_gists"], (int, long)), attributes["private_gists"]
self._private_gists = attributes["private_gists"]
if "public_gists" in attributes: # pragma no branch
assert attributes["public_gists"] is None or isinstance(attributes["public_gists"], (int, long)), attributes["public_gists"]
self._public_gists = attributes["public_gists"]
if "public_repos" in attributes: # pragma no branch
assert attributes["public_repos"] is None or isinstance(attributes["public_repos"], (int, long)), attributes["public_repos"]
self._public_repos = attributes["public_repos"]
if "total_private_repos" in attributes: # pragma no branch
assert attributes["total_private_repos"] is None or isinstance(attributes["total_private_repos"], (int, long)), attributes["total_private_repos"]
self._total_private_repos = attributes["total_private_repos"]
if "type" in attributes: # pragma no branch
assert attributes["type"] is None or isinstance(attributes["type"], (str, unicode)), attributes["type"]
self._type = attributes["type"]
if "url" in attributes: # pragma no branch
assert attributes["url"] is None or isinstance(attributes["url"], (str, unicode)), attributes["url"]
self._url = attributes["url"]
| gpl-2.0 | -1,894,709,387,845,097,700 | 38.846266 | 292 | 0.620393 | false |
xenserver/xscontainer | src/xscontainer/docker_monitor/api.py | 3 | 1360 | """
API Entry points for interacting with the DockerMonitor service.
"""
from xscontainer import docker
from xscontainer import docker_monitor
from xscontainer.api_helper import VM
from xscontainer.api_helper import XenAPIClient
from xscontainer.util import log
def register_vm(vm_uuid, session):
log.info("register_vm %s" % (vm_uuid))
client = XenAPIClient(session)
thevm = VM(client, uuid=vm_uuid)
thevm.update_other_config(docker_monitor.REGISTRATION_KEY,
docker_monitor.REGISTRATION_KEY_ON)
return
def deregister_vm(vm_uuid, session):
log.info("deregister_vm %s" % (vm_uuid))
client = XenAPIClient(session)
thevm = VM(client, uuid=vm_uuid)
thevm.update_other_config(docker_monitor.REGISTRATION_KEY,
docker_monitor.REGISTRATION_KEY_OFF)
docker.wipe_docker_other_config(thevm)
return
def mark_monitorable_vm(vm_uuid, session):
""" Ensure the VM has a REGISTRATION_KEY in vm:other_config. This key is
used by XC to know whether monitoring is an option for this VM """
log.info("mark_monitorable_vm %s" % (vm_uuid))
client = XenAPIClient(session)
thevm = VM(client, uuid=vm_uuid)
other_config = thevm.get_other_config()
if (docker_monitor.REGISTRATION_KEY not in other_config):
deregister_vm(vm_uuid, session)
| bsd-2-clause | 65,622,347,360,845,940 | 34.789474 | 76 | 0.691176 | false |
Neohapsis/mptcp-abuse | tests/join/none-before-subflow-4th-ack.py | 1 | 1997 | #!/usr/bin/env python2
# Client part of the scenario
from tests.mptcptestlib import *
def main():
conf = {"printanswer":False, "debug":4, "check": False}
t = ProtoTester(conf)
s = MPTCPState()
m = MPTCPTest(tester=t, initstate=s)
# Client IPs
A1 = "10.1.1.2"
A2 = "10.1.2.2"
# Server IP
B = "10.2.1.2"
t.toggleKernelHandling(enable=False)
try:
sub1 = s.registerNewSubflow(dst=B, src=A1)
conn_open = [m.CapSYN, m.Wait, m.CapACK]
t.sendSequence(conn_open, initstate=s, sub=sub1)
join_accept = [m.Wait, m.JoinSYNACK, m.Wait] # m.ACK] # omit final ACK
t.sendSequence(join_accept, initstate=s)
# synchronization after Wait
t.syncReady(dst=B)
# server should send data. Does the firewall drop them ?
dataisDropped = False
try:
t.sendpkt(m.Wait, timeout=1)
except PktWaitTimeOutException as e:
print "No packet in last %i seconds"%e.timeval
dataisDropped=True
# Finally send 4th (JOIN) ACK
t.sendpkt(m.ACK)
# now, the data shouldnt be dropped anymore
data2isDropped = False
try:
t.sendpkt(m.Wait, timeout=1)
except PktWaitTimeOutException as e:
print "No packet in last %i seconds"%e.timeval
data2isDropped=True
data_fin_init = [m.DSSFIN, m.Wait, m.DSSACK]
t.sendSequence(data_fin_init, sub=sub1)
t.syncWait()
sub2 = s.getSubflow(1)
# assuming that the remote host uses a single FINACK packet
fin_init1 = [m.FIN, m.Wait, m.ACK]
t.sendSequence(fin_init1, sub=sub1)
t.syncWait()
fin_init2 = [m.FIN, m.Wait, m.ACK]
t.sendSequence(fin_init2, sub=sub2)
finally:
t.toggleKernelHandling(enable=True)
import sys
sys.exit(int(not (dataisDropped and not data2isDropped)))
if __name__ == "__main__":
main()
# vim: set ts=4 sts=4 sw=4 et:
| gpl-2.0 | 2,926,188,541,421,442,600 | 27.126761 | 78 | 0.590886 | false |
ferewuz/libcloud | libcloud/storage/drivers/google_storage.py | 28 | 5031 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import copy
import hmac
from email.utils import formatdate
from hashlib import sha1
from libcloud.utils.py3 import b
from libcloud.common.base import ConnectionUserAndKey
from libcloud.storage.drivers.s3 import BaseS3StorageDriver, S3Response
from libcloud.storage.drivers.s3 import S3RawResponse
SIGNATURE_IDENTIFIER = 'GOOG1'
# Docs are a lie. Actual namespace returned is different that the one listed in
# the docs.
AUTH_HOST = 'commondatastorage.googleapis.com'
API_VERSION = '2006-03-01'
NAMESPACE = 'http://doc.s3.amazonaws.com/%s' % (API_VERSION)
class GoogleStorageConnection(ConnectionUserAndKey):
"""
Repersents a single connection to the Google storage API endpoint.
"""
host = AUTH_HOST
responseCls = S3Response
rawResponseCls = S3RawResponse
def add_default_headers(self, headers):
date = formatdate(usegmt=True)
headers['Date'] = date
return headers
def pre_connect_hook(self, params, headers):
signature = self._get_aws_auth_param(method=self.method,
headers=headers,
params=params,
expires=None,
secret_key=self.key,
path=self.action)
headers['Authorization'] = '%s %s:%s' % (SIGNATURE_IDENTIFIER,
self.user_id, signature)
return params, headers
def _get_aws_auth_param(self, method, headers, params, expires,
secret_key, path='/'):
# TODO: Refactor and re-use in S3 driver
"""
Signature = URL-Encode( Base64( HMAC-SHA1( YourSecretAccessKeyID,
UTF-8-Encoding-Of( StringToSign ) ) ) );
StringToSign = HTTP-VERB + "\n" +
Content-MD5 + "\n" +
Content-Type + "\n" +
Date + "\n" +
CanonicalizedHeaders +
CanonicalizedResource;
"""
special_header_keys = ['content-md5', 'content-type', 'date']
special_header_values = {}
extension_header_values = {}
headers_copy = copy.deepcopy(headers)
for key, value in list(headers_copy.items()):
if key.lower() in special_header_keys:
if key.lower() == 'date':
value = value.strip()
else:
value = value.lower().strip()
special_header_values[key.lower()] = value
elif key.lower().startswith('x-goog-'):
extension_header_values[key.lower()] = value.strip()
if 'content-md5' not in special_header_values:
special_header_values['content-md5'] = ''
if 'content-type' not in special_header_values:
special_header_values['content-type'] = ''
keys_sorted = list(special_header_values.keys())
keys_sorted.sort()
buf = [method]
for key in keys_sorted:
value = special_header_values[key]
buf.append(value)
string_to_sign = '\n'.join(buf)
keys_sorted = list(extension_header_values.keys())
keys_sorted.sort()
extension_header_string = []
for key in keys_sorted:
value = extension_header_values[key]
extension_header_string.append('%s:%s' % (key, value))
extension_header_string = '\n'.join(extension_header_string)
values_to_sign = []
for value in [string_to_sign, extension_header_string, path]:
if value:
values_to_sign.append(value)
string_to_sign = '\n'.join(values_to_sign)
b64_hmac = base64.b64encode(
hmac.new(b(secret_key), b(string_to_sign), digestmod=sha1).digest()
)
return b64_hmac.decode('utf-8')
class GoogleStorageDriver(BaseS3StorageDriver):
name = 'Google Storage'
website = 'http://cloud.google.com/'
connectionCls = GoogleStorageConnection
hash_type = 'md5'
namespace = NAMESPACE
supports_chunked_encoding = False
supports_s3_multipart_upload = False
http_vendor_prefix = 'x-goog'
| apache-2.0 | -813,955,614,884,171,400 | 35.722628 | 79 | 0.604254 | false |
mdworks2016/work_development | Python/20_Third_Certification/venv/lib/python3.7/site-packages/django/contrib/gis/db/backends/oracle/schema.py | 102 | 3916 | from django.contrib.gis.db.models.fields import GeometryField
from django.db.backends.oracle.schema import DatabaseSchemaEditor
from django.db.backends.utils import strip_quotes, truncate_name
class OracleGISSchemaEditor(DatabaseSchemaEditor):
sql_add_geometry_metadata = ("""
INSERT INTO USER_SDO_GEOM_METADATA
("TABLE_NAME", "COLUMN_NAME", "DIMINFO", "SRID")
VALUES (
%(table)s,
%(column)s,
MDSYS.SDO_DIM_ARRAY(
MDSYS.SDO_DIM_ELEMENT('LONG', %(dim0)s, %(dim2)s, %(tolerance)s),
MDSYS.SDO_DIM_ELEMENT('LAT', %(dim1)s, %(dim3)s, %(tolerance)s)
),
%(srid)s
)""")
sql_add_spatial_index = 'CREATE INDEX %(index)s ON %(table)s(%(column)s) INDEXTYPE IS MDSYS.SPATIAL_INDEX'
sql_drop_spatial_index = 'DROP INDEX %(index)s'
sql_clear_geometry_table_metadata = 'DELETE FROM USER_SDO_GEOM_METADATA WHERE TABLE_NAME = %(table)s'
sql_clear_geometry_field_metadata = (
'DELETE FROM USER_SDO_GEOM_METADATA WHERE TABLE_NAME = %(table)s '
'AND COLUMN_NAME = %(column)s'
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.geometry_sql = []
def geo_quote_name(self, name):
return self.connection.ops.geo_quote_name(name)
def column_sql(self, model, field, include_default=False):
column_sql = super().column_sql(model, field, include_default)
if isinstance(field, GeometryField):
db_table = model._meta.db_table
self.geometry_sql.append(
self.sql_add_geometry_metadata % {
'table': self.geo_quote_name(db_table),
'column': self.geo_quote_name(field.column),
'dim0': field._extent[0],
'dim1': field._extent[1],
'dim2': field._extent[2],
'dim3': field._extent[3],
'tolerance': field._tolerance,
'srid': field.srid,
}
)
if field.spatial_index:
self.geometry_sql.append(
self.sql_add_spatial_index % {
'index': self.quote_name(self._create_spatial_index_name(model, field)),
'table': self.quote_name(db_table),
'column': self.quote_name(field.column),
}
)
return column_sql
def create_model(self, model):
super().create_model(model)
self.run_geometry_sql()
def delete_model(self, model):
super().delete_model(model)
self.execute(self.sql_clear_geometry_table_metadata % {
'table': self.geo_quote_name(model._meta.db_table),
})
def add_field(self, model, field):
super().add_field(model, field)
self.run_geometry_sql()
def remove_field(self, model, field):
if isinstance(field, GeometryField):
self.execute(self.sql_clear_geometry_field_metadata % {
'table': self.geo_quote_name(model._meta.db_table),
'column': self.geo_quote_name(field.column),
})
if field.spatial_index:
self.execute(self.sql_drop_spatial_index % {
'index': self.quote_name(self._create_spatial_index_name(model, field)),
})
super().remove_field(model, field)
def run_geometry_sql(self):
for sql in self.geometry_sql:
self.execute(sql)
self.geometry_sql = []
def _create_spatial_index_name(self, model, field):
# Oracle doesn't allow object names > 30 characters. Use this scheme
# instead of self._create_index_name() for backwards compatibility.
return truncate_name('%s_%s_id' % (strip_quotes(model._meta.db_table), field.column), 30)
| apache-2.0 | -4,419,039,309,304,658,000 | 40.659574 | 110 | 0.561032 | false |
iafan/zing | pootle/apps/pootle_comment/forms.py | 2 | 2237 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from datetime import datetime
from django import forms
from django.contrib.auth import get_user_model
from django.utils.functional import cached_property
from django_comments.forms import CommentForm as DjCommentForm
from .delegate import comment_should_not_be_saved
from .exceptions import CommentNotSaved
from .signals import comment_was_saved
User = get_user_model()
class CommentForm(DjCommentForm):
user = forms.ModelChoiceField(queryset=User.objects.all())
def __init__(self, target_object, data=None, *args, **kwargs):
if data:
data["object_pk"] = str(target_object.pk)
data["content_type"] = str(target_object._meta)
if data.get("user"):
data["user"] = str(data["user"].pk)
super(CommentForm, self).__init__(
target_object, data, *args, **kwargs)
if data and data.get("user"):
self.fields["name"].required = False
self.fields["email"].required = False
@cached_property
def comment(self):
return self.get_comment_object()
def clean(self):
super(CommentForm, self).clean()
should_not_save = comment_should_not_be_saved.get(
self.target_object.__class__,
instance=self.target_object,
comment=self.comment)
if should_not_save:
raise CommentNotSaved(dict(comment=should_not_save))
def save(self):
comment = self.comment
comment.user = self.cleaned_data["user"]
comment.submit_date = datetime.now()
comment.save()
comment_was_saved.send(
sender=comment.__class__,
comment=comment)
class UnsecuredCommentForm(CommentForm):
def __init__(self, target_object, data=None, *args, **kwargs):
super(UnsecuredCommentForm, self).__init__(
target_object, data, *args, **kwargs)
if data:
data.update(self.generate_security_data())
| gpl-3.0 | -6,611,040,397,666,857,000 | 30.507042 | 77 | 0.641037 | false |
jonathanslenders/python-prompt-toolkit | tests/test_inputstream.py | 1 | 4032 | import pytest
from prompt_toolkit.input.vt100_parser import Vt100Parser
from prompt_toolkit.keys import Keys
class _ProcessorMock(object):
def __init__(self):
self.keys = []
def feed_key(self, key_press):
self.keys.append(key_press)
@pytest.fixture
def processor():
return _ProcessorMock()
@pytest.fixture
def stream(processor):
return Vt100Parser(processor.feed_key)
def test_control_keys(processor, stream):
stream.feed("\x01\x02\x10")
assert len(processor.keys) == 3
assert processor.keys[0].key == Keys.ControlA
assert processor.keys[1].key == Keys.ControlB
assert processor.keys[2].key == Keys.ControlP
assert processor.keys[0].data == "\x01"
assert processor.keys[1].data == "\x02"
assert processor.keys[2].data == "\x10"
def test_arrows(processor, stream):
stream.feed("\x1b[A\x1b[B\x1b[C\x1b[D")
assert len(processor.keys) == 4
assert processor.keys[0].key == Keys.Up
assert processor.keys[1].key == Keys.Down
assert processor.keys[2].key == Keys.Right
assert processor.keys[3].key == Keys.Left
assert processor.keys[0].data == "\x1b[A"
assert processor.keys[1].data == "\x1b[B"
assert processor.keys[2].data == "\x1b[C"
assert processor.keys[3].data == "\x1b[D"
def test_escape(processor, stream):
stream.feed("\x1bhello")
assert len(processor.keys) == 1 + len("hello")
assert processor.keys[0].key == Keys.Escape
assert processor.keys[1].key == "h"
assert processor.keys[0].data == "\x1b"
assert processor.keys[1].data == "h"
def test_special_double_keys(processor, stream):
stream.feed("\x1b[1;3D") # Should both send escape and left.
assert len(processor.keys) == 2
assert processor.keys[0].key == Keys.Escape
assert processor.keys[1].key == Keys.Left
assert processor.keys[0].data == "\x1b[1;3D"
assert processor.keys[1].data == ""
def test_flush_1(processor, stream):
# Send left key in two parts without flush.
stream.feed("\x1b")
stream.feed("[D")
assert len(processor.keys) == 1
assert processor.keys[0].key == Keys.Left
assert processor.keys[0].data == "\x1b[D"
def test_flush_2(processor, stream):
# Send left key with a 'Flush' in between.
# The flush should make sure that we process everything before as-is,
# with makes the first part just an escape character instead.
stream.feed("\x1b")
stream.flush()
stream.feed("[D")
assert len(processor.keys) == 3
assert processor.keys[0].key == Keys.Escape
assert processor.keys[1].key == "["
assert processor.keys[2].key == "D"
assert processor.keys[0].data == "\x1b"
assert processor.keys[1].data == "["
assert processor.keys[2].data == "D"
def test_meta_arrows(processor, stream):
stream.feed("\x1b\x1b[D")
assert len(processor.keys) == 2
assert processor.keys[0].key == Keys.Escape
assert processor.keys[1].key == Keys.Left
def test_control_square_close(processor, stream):
stream.feed("\x1dC")
assert len(processor.keys) == 2
assert processor.keys[0].key == Keys.ControlSquareClose
assert processor.keys[1].key == "C"
def test_invalid(processor, stream):
# Invalid sequence that has at two characters in common with other
# sequences.
stream.feed("\x1b[*")
assert len(processor.keys) == 3
assert processor.keys[0].key == Keys.Escape
assert processor.keys[1].key == "["
assert processor.keys[2].key == "*"
def test_cpr_response(processor, stream):
stream.feed("a\x1b[40;10Rb")
assert len(processor.keys) == 3
assert processor.keys[0].key == "a"
assert processor.keys[1].key == Keys.CPRResponse
assert processor.keys[2].key == "b"
def test_cpr_response_2(processor, stream):
# Make sure that the newline is not included in the CPR response.
stream.feed("\x1b[40;1R\n")
assert len(processor.keys) == 2
assert processor.keys[0].key == Keys.CPRResponse
assert processor.keys[1].key == Keys.ControlJ
| bsd-3-clause | 7,922,073,704,762,516,000 | 28.007194 | 73 | 0.660466 | false |
camptocamp/c2c-rd-addons | account_financial_report_chricar/report/general_ledger_landscape.py | 5 | 18269 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2005-2006 CamptoCamp
# Copyright (c) 2009 Zikzakmedia S.L. (http://zikzakmedia.com) All Rights Reserved.
# Jordi Esteve <[email protected]>
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import time
from openerp.report import report_sxw
import rml_parse
from openerp.tools.translate import _
class general_ledger_landscape(rml_parse.rml_parse):
_name = 'report.account.general.ledger.cumulative.landscape'
def set_context(self, objects, data, ids, report_type = None):
self.get_context_date_period(data['form'])
new_ids = []
if (data['model'] == 'account.account'):
new_ids = ids
else:
new_ids = data['form']['account_list'][0][2]
objects = self.pool.get('account.account').browse(self.cr, self.uid, new_ids)
super(general_ledger_landscape, self).set_context(objects, data, new_ids, report_type)
def __init__(self, cr, uid, name, context):
super(general_ledger_landscape, self).__init__(cr, uid, name, context=context)
self.query = "" # SQL query to get account moves for given date or period
self.min_date = "" # Min date of the given date or period
self.ctx = {} # Context for given date or period
self.ctxfy = {} # Context from the date start or first period of the fiscal year
self.child_ids = ""
self.tot_currency = 0.0
self.balance_accounts = {}
self.localcontext.update( {
'time': time,
'lines': self.lines,
'sum_debit_account': self._sum_debit_account,
'sum_credit_account': self._sum_credit_account,
'sum_balance_account': self._sum_balance_account,
'get_children_accounts': self.get_children_accounts,
'sum_currency_amount_account': self._sum_currency_amount_account,
'get_fiscalyear':self.get_fiscalyear,
'get_periods':self.get_periods,
})
self.context = context
def get_fiscalyear(self, form):
res=[]
if form.has_key('fiscalyear'):
fisc_id = form['fiscalyear']
if not (fisc_id):
return ''
self.cr.execute("SELECT name FROM account_fiscalyear WHERE id = %s" , (int(fisc_id),))
res=self.cr.fetchone()
return res and res[0] or ''
def get_periods(self, form):
result=''
if form.has_key('periods') and form['periods'][0][2]:
period_ids = ",".join([str(x) for x in form['periods'][0][2] if x])
self.cr.execute("SELECT name FROM account_period WHERE id in (%s)" % (period_ids))
res = self.cr.fetchall()
len_res = len(res)
for r in res:
if (r == res[len_res-1]):
result+=r[0]+". "
else:
result+=r[0]+", "
elif form.has_key('date_from') and form.has_key('date_to'):
result = self.formatLang(form['date_from'], date=True) + ' - ' + self.formatLang(form['date_to'], date=True) + ' '
else:
fy_obj = self.pool.get('account.fiscalyear').browse(self.cr,self.uid,form['fiscalyear'])
res = fy_obj.period_ids
len_res = len(res)
for r in res:
if r == res[len_res-1]:
result+=r.name+". "
else:
result+=r.name+", "
return str(result and result[:-1]) or ''
def _calc_contrepartie(self, cr, uid, ids, context={}):
result = {}
#for id in ids:
# result.setdefault(id, False)
for account_line in self.pool.get('account.move.line').browse(cr, uid, ids, context):
# For avoid long text in the field we will limit it to 5 lines
#
result[account_line.id] = ' '
num_id_move = str(account_line.move_id.id)
num_id_line = str(account_line.id)
account_id = str(account_line.account_id.id)
# search the basic account
# We have the account ID we will search all account move line from now until this time
# We are in the case of we are on the top of the account move Line
cr.execute("SELECT distinct(ac.code) as code_rest,ac.name as name_rest "\
"FROM account_account AS ac, account_move_line mv "\
"WHERE ac.id = mv.account_id and mv.move_id = " + num_id_move + " and mv.account_id != " + account_id )
res_mv = cr.dictfetchall()
# we need a result more than 2 line to make the test so we will made the the on 1 because we have exclude the current line
if (len(res_mv) >=1):
concat = ''
rup_id = 0
for move_rest in res_mv:
concat = concat + move_rest['code_rest'] + '|'
result[account_line.id] = concat
if rup_id >5:
# we need to stop the computing and to escape but before we will add "..."
result[account_line.id] = concat + '...'
break
rup_id+=1
return result
def get_context_date_period(self, form):
date_min = period_min = False
# ctx: Context for the given date or period
ctx = self.context.copy()
ctx['state'] = form['context'].get('state','all')
if 'fiscalyear' in form and form['fiscalyear']:
ctx['fiscalyear'] = form['fiscalyear']
if form['state'] in ['byperiod', 'all']:
ctx['periods'] = form['periods'][0][2]
if form['state'] in ['bydate', 'all']:
ctx['date_from'] = form['date_from']
ctx['date_to'] = form['date_to']
if 'periods' not in ctx:
ctx['periods'] = []
self.ctx = ctx
self.query = self.pool.get('account.move.line')._query_get(self.cr, self.uid, context=ctx)
# ctxfy: Context from the date start / first period of the fiscal year
ctxfy = ctx.copy()
ctxfy['periods'] = ctx['periods'][:]
if form['state'] in ['byperiod', 'all'] and len(ctx['periods']):
self.cr.execute("""SELECT id, date_start, fiscalyear_id
FROM account_period
WHERE date_start = (SELECT min(date_start) FROM account_period WHERE id in (%s))"""
% (','.join([str(x) for x in ctx['periods']])))
res = self.cr.dictfetchone()
period_min = res['date_start']
self.cr.execute("""SELECT id
FROM account_period
WHERE fiscalyear_id in (%s) AND date_start < '%s'"""
% (res['fiscalyear_id'], res['date_start']))
ids = filter(None, map(lambda x:x[0], self.cr.fetchall()))
ctxfy['periods'].extend(ids)
if form['state'] in ['bydate', 'all']:
self.cr.execute("""SELECT date_start
FROM account_fiscalyear
WHERE '%s' BETWEEN date_start AND date_stop""" % (ctx['date_from']))
res = self.cr.dictfetchone()
ctxfy['date_from'] = res['date_start']
date_min = form['date_from']
if form['state'] == 'none' or (form['state'] == 'byperiod' and not len(ctx['periods'])):
if 'fiscalyear' in form and form['fiscalyear']:
sql = """SELECT id, date_start
FROM account_period
WHERE fiscalyear_id in (%s)
ORDER BY date_start""" % (ctx['fiscalyear'])
else:
sql = """SELECT id, date_start
FROM account_period
WHERE fiscalyear_id in (SELECT id FROM account_fiscalyear WHERE state='draft')
ORDER BY date_start"""
self.cr.execute(sql)
res = self.cr.dictfetchall()
period_min = res[0]['date_start']
ids = filter(None, map(lambda x:x['id'], res))
ctxfy['periods'] = ids
self.ctxfy = ctxfy
if not period_min:
self.min_date = date_min
elif not date_min:
self.min_date = period_min
else:
# If period and date are given, the maximum of the min dates is choosed
if period_min < date_min:
self.min_date = date_min
else:
self.min_date = period_min
def get_children_accounts(self, account, form):
move_line_obj = self.pool.get('account.move.line')
account_obj = self.pool.get('account.account')
invoice_obj = self.pool.get('account.invoice')
self.child_ids = account_obj.search(self.cr, self.uid, [('parent_id', 'child_of', self.ids)])
res = []
ctx = self.ctx.copy()
if account and account.child_consol_ids: # add ids of consolidated childs also of selected account
ctx['consolidate_childs'] = True
ctx['account_id'] = account.id
ids_acc = account_obj.search(self.cr, self.uid,[('parent_id', 'child_of', [account.id])], context=ctx)
for child_id in ids_acc:
child_account = account_obj.browse(self.cr, self.uid, child_id)
balance_account = self._sum_balance_account(child_account,form)
self.balance_accounts[child_account.id] = balance_account
if form['display_account'] == 'bal_mouvement':
if child_account.type != 'view' \
and len(move_line_obj.search(self.cr, self.uid,
[('account_id','=',child_account.id)],
context=ctx)) != 0 :
res.append(child_account)
elif form['display_account'] == 'bal_solde':
if child_account.type != 'view' \
and len(move_line_obj.search(self.cr, self.uid,
[('account_id','=',child_account.id)],
context=ctx)) != 0 :
if balance_account != 0.0:
res.append(child_account)
else:
if child_account.type != 'view' \
and len(move_line_obj.search(self.cr, self.uid,
[('account_id','>=',child_account.id)],
context=ctx)) != 0 :
res.append(child_account)
##
if not len(res):
return [account]
else:
## We will now compute initial balance
for move in res:
sql_balance_init = "SELECT sum(l.debit) AS sum_debit, sum(l.credit) AS sum_credit "\
"FROM account_move_line l "\
"WHERE l.account_id = " + str(move.id) + " AND %s" % (self.query)
self.cr.execute(sql_balance_init)
resultat = self.cr.dictfetchall()
if resultat[0] :
if resultat[0]['sum_debit'] == None:
sum_debit = 0
else:
sum_debit = resultat[0]['sum_debit']
if resultat[0]['sum_credit'] == None:
sum_credit = 0
else:
sum_credit = resultat[0]['sum_credit']
move.init_credit = sum_credit
move.init_debit = sum_debit
else:
move.init_credit = 0
move.init_debit = 0
return res
def lines(self, account, form):
inv_types = {
'out_invoice': _('CI: '),
'in_invoice': _('SI: '),
'out_refund': _('OR: '),
'in_refund': _('SR: '),
}
if form['sortbydate'] == 'sort_date':
sorttag = 'l.date'
else:
sorttag = 'j.code'
sql = """
SELECT l.id, l.date, j.code, c.symbol AS currency_code, l.amount_currency, l.ref, l.name , l.debit, l.credit, l.period_id
FROM account_move_line as l
LEFT JOIN res_currency c on (l.currency_id=c.id)
JOIN account_journal j on (l.journal_id=j.id)
AND account_id = %%s
AND %s
ORDER by %s""" % (self.query, sorttag)
self.cr.execute(sql % account.id)
res = self.cr.dictfetchall()
move_line_obj = self.pool.get('account.move.line')
account_obj = self.pool.get('account.account')
invoice_obj = self.pool.get('account.invoice')
# Balance from init fiscal year to last date given by the user
accounts = account_obj.read(self.cr, self.uid, [account.id], ['balance'], self.ctxfy)
sum = accounts[0]['balance']
for l in reversed(res):
line = move_line_obj.browse(self.cr, self.uid, l['id'])
l['move'] = line.move_id.name_split
self.cr.execute('Select id from account_invoice where move_id =%s'%(line.move_id.id))
tmpres = self.cr.dictfetchall()
if len(tmpres) > 0 :
inv = invoice_obj.browse(self.cr, self.uid, tmpres[0]['id'])
l['ref'] = inv_types[inv.type] + ': '+str(inv.number)
if line.partner_id :
l['partner'] = line.partner_id.name
else :
l['partner'] = ''
l['line_corresp'] = self._calc_contrepartie(self.cr,self.uid,[l['id']])[l['id']]
# Cumulative balance update
l['progress'] = sum
sum = sum - (l['debit'] or 0) + (l['credit'] or 0)
# Modification of currency amount
if (l['credit'] > 0):
if l['amount_currency'] != None:
l['amount_currency'] = abs(l['amount_currency']) * -1
if l['amount_currency'] != None:
self.tot_currency = self.tot_currency + l['amount_currency']
decimal_precision_obj = self.pool.get('decimal.precision')
ids = decimal_precision_obj.search(self.cr, self.uid, [('name', '=', 'Account')])
digits = decimal_precision_obj.browse(self.cr, self.uid, ids)[0].digits
#if abs(sum) > 10**-int(config['price_accuracy']) and form['initial_balance']:
if round(sum,digits) != 0.0 and form['initial_balance']:
res.insert(0, {
'date': self.min_date,
'name': _('Initial balance'),
'progress': sum,
'partner': '',
'move': '',
'ref': '',
'debit': '',
'credit': '',
'amount_currency': '',
'currency_code': '',
'code': '',
'line_corresp': '',
})
return res
def _sum_debit_account(self, account, form):
self.cr.execute("SELECT sum(debit) "\
"FROM account_move_line l "\
"WHERE l.account_id = %s AND %s " % (account.id, self.query))
sum_debit = self.cr.fetchone()[0] or 0.0
return sum_debit
def _sum_credit_account(self, account, form):
self.cr.execute("SELECT sum(credit) "\
"FROM account_move_line l "\
"WHERE l.account_id = %s AND %s " % (account.id, self.query))
sum_credit = self.cr.fetchone()[0] or 0.0
return sum_credit
def _sum_balance_account(self, account, form):
# Balance from init fiscal year to last date given by the user
accounts = self.pool.get('account.account').read(self.cr, self.uid, [account.id], ['balance'], self.ctxfy)
sum_balance = accounts[0]['balance']
return sum_balance
def _set_get_account_currency_code(self, account_id):
self.cr.execute("SELECT c.symbol as code "\
"FROM res_currency c, account_account as ac "\
"WHERE ac.id = %s AND ac.currency_id = c.id" % (account_id))
result = self.cr.fetchone()
if result:
self.account_currency = result[0]
else:
self.account_currency = False
def _sum_currency_amount_account(self, account, form):
self._set_get_account_currency_code(account.id)
self.cr.execute("SELECT sum(l.amount_currency) "\
"FROM account_move_line as l, res_currency as rc "\
"WHERE l.currency_id = rc.id AND l.account_id= %s AND %s" % (account.id, self.query))
total = self.cr.fetchone()
if self.account_currency:
return_field = str(total[0]) + self.account_currency
return return_field
else:
currency_total = self.tot_currency = 0.0
return currency_total
report_sxw.report_sxw('report.account.general.ledger.cumulative.landscape', 'account.account', 'addons/account_financial_report_chricar/report/general_ledger_landscape.rml', parser=general_ledger_landscape, header=False)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 6,291,520,179,597,972,000 | 43.128019 | 220 | 0.53183 | false |
dcoles/ivle | ivle/webapp/security/__init__.py | 1 | 1754 | # IVLE - Informatics Virtual Learning Environment
# Copyright (C) 2007-2009 The University of Melbourne
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# Author: William Grant
import ivle.database
from ivle.webapp.base.plugins import ViewPlugin
from ivle.webapp.security.views import LoginView, LogoutView
from ivle.webapp import ApplicationRoot
def get_user_details(req):
"""Gets the name of the logged in user, without presenting a login box
or attempting to authenticate.
Returns None if there is no user logged in.
"""
session = req.get_session()
# Check the session to see if someone is logged in. If so, go with it.
try:
login = session['login']
except KeyError:
return None
session.unlock()
# Get the full User object from the db associated with this login
return ivle.database.User.get_by_login(req.store, login)
class Plugin(ViewPlugin):
"""
The Plugin class for the security plugin.
"""
views = [(ApplicationRoot, '+login', LoginView),
(ApplicationRoot, '+logout', LogoutView),
]
| gpl-2.0 | 7,306,604,468,488,788,000 | 34.795918 | 76 | 0.721209 | false |
Twangist/log_calls | tests/test_record_history_log_methods.py | 1 | 8533 | __author__ = 'brianoneill'
from log_calls import record_history
#-----------------------------------------------------
# record_history.print, record_history.print_exprs
# Test in methods, in functions
#-----------------------------------------------------
def test_rh_log_message__output_expected():
"""
------------------------------------------------
log_message
------------------------------------------------
>>> @record_history(omit='not_decorated')
... class B():
... def __init__(self):
... record_history.print('Hi')
... # Test that the old version still works! It shares code.
... wrapper = self.get_own_record_history_wrapper()
... wrapper.log_message("Hi from original log_message")
...
... def method(self):
... record_history.print('Hi')
... def not_decorated(self):
... record_history.print('Hi')
... @classmethod
... def clsmethod(cls):
... record_history.print('Hi')
... @staticmethod
... def statmethod():
... record_history.print('Hi')
...
... @property
... def prop(self):
... record_history.print('Hi')
... @prop.setter
... @record_history(name='B.%s.setter')
... def prop(self, val):
... record_history.print('Hi')
...
... def setx(self, val):
... record_history.print('Hi from setx alias x.setter')
... def delx(self):
... record_history.print('Hi from delx alias x.deleter')
... x = property(None, setx, delx)
>>> b = B()
B.__init__ [1]: Hi
B.__init__ [1]: Hi from original log_message
>>> b.method()
B.method [1]: Hi
>>> # NO OUTPUT from this, nor an exception,
>>> # because by default
>>> record_history.print_methods_raise_if_no_deco
False
>>> b.not_decorated()
>>> b.statmethod()
B.statmethod [1]: Hi
>>> b.clsmethod()
B.clsmethod [1]: Hi
>>> b.prop
B.prop [1]: Hi
>>> b.prop = 17
B.prop.setter [1]: Hi
>>> b.x = 13
B.setx [1]: Hi from setx alias x.setter
>>> del b.x
B.delx [1]: Hi from delx alias x.deleter
------------------------------------------------
log_exprs
------------------------------------------------
>>> @record_history(omit='not_decorated')
... class D():
... def __init__(self):
... x = 2
... y = 3
... # Original first:
... wrapper = self.get_own_record_history_wrapper()
... wrapper.log_exprs('x', 'y', 'x+y')
...
... record_history.print_exprs('x', 'y', 'x+y')
...
... def method(self):
... x = 2; y = 3
... record_history.print_exprs('x', 'y', 'x+y')
...
... def not_decorated(self):
... x = 2; y = 3
... record_history.print_exprs('x', 'y', 'x+y')
...
... @classmethod
... def clsmethod(cls):
... x = 2; y = 3
... record_history.print_exprs('x', 'y', 'cls.__name__')
...
... @staticmethod
... def statmethod():
... x = 2; y = 3
... record_history.print_exprs('x', 'y', 'x+y')
...
... @property
... def prop(self):
... x = 2; y = 3
... record_history.print_exprs('x', 'y', 'x+y')
...
... @prop.setter
... @record_history(name='D.%s.setter')
... def prop(self, val):
... x = 2; y = 3
... record_history.print_exprs('x', 'y', 'x+y')
...
... def setx(self, val):
... x = 2; y = 3
... record_history.print_exprs('x', 'y', 'x+y')
...
... def delx(self):
... x = 2; y = 3
... record_history.print_exprs('x', 'y', 'x+y')
... x = property(None, setx, delx)
>>> d = D()
D.__init__ [1]: x = 2, y = 3, x+y = 5
D.__init__ [1]: x = 2, y = 3, x+y = 5
>>> d.method()
D.method [1]: x = 2, y = 3, x+y = 5
# NO OUTPUT from this, NOR AN EXCEPTION,
# because by default
# record_history.log_methods_raise_if_no_deco == False
>>> d.not_decorated()
>>> d.statmethod()
D.statmethod [1]: x = 2, y = 3, x+y = 5
>>> d.clsmethod()
D.clsmethod [1]: x = 2, y = 3, cls.__name__ = 'D'
>>> d.prop
D.prop [1]: x = 2, y = 3, x+y = 5
>>> d.prop = 17
D.prop.setter [1]: x = 2, y = 3, x+y = 5
>>> d.x = 13
D.setx [1]: x = 2, y = 3, x+y = 5
>>> del d.x
D.delx [1]: x = 2, y = 3, x+y = 5
------------------------------------------------
functions
------------------------------------------------
>>> @record_history()
... def bar(x, y, z):
... record_history.print("Hi", "there")
... pass
>>> bar(1, 2, 3)
bar [1]: Hi there
"""
pass
#-----------------------------------------------------
# Test record_history.log_methods_raise_if_no_deco (bool)
# On undecorated functions/methods,
# and deco'd but with NO_DECO=True parameter
#-----------------------------------------------------
def test_rh_log_message__no_output_no_exceptions_expected():
"""
>>> record_history.print_methods_raise_if_no_deco = False # the default
>>> def nodeco(x, y, z):
... record_history.print("Hi", "from", "function nodeco")
... pass
>>> nodeco(11, 12, 13) # no output, NO EXCEPTION
>>> @record_history(omit='not_decorated')
... class A():
... def __init__(self):
... record_history.print('Hi')
... def not_decorated(self):
... record_history.print('Hi')
>>> a = A()
A.__init__ [1]: Hi
>>> a.not_decorated() # no output, NO EXCEPTION
>>> @record_history(NO_DECO=True)
... class C():
... def __init__(self):
... record_history.print('Hi')
... def cmethod(self, x):
... record_history.print('Hi')
... record_history.print_exprs('x + 10')
>>> c = C() # no output, no exception
>>> c.cmethod(5) # no output, no exception
>>> def schmoe(x):
... record_history.print("Yo, schmoe")
... pass
>>> schmoe(170) # no output, no exception
"""
pass
def test_rh_log_message__exceptions_expected():
"""
>>> record_history.print_methods_raise_if_no_deco = True # not the default
>>> @record_history(omit='not_decorated')
... class A():
... def __init__(self):
... record_history.print('Hi')
... def not_decorated(self):
... record_history.print('Hi')
>>> a = A()
A.__init__ [1]: Hi
>>> a.not_decorated() # doctest: +IGNORE_EXCEPTION_DETAIL, +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: ... is not decorated...
>>> @record_history(NO_DECO=True)
... class B():
... def __init__(self):
... # Comment out so we can create a B object!
... # record_history.print('Hi')
... pass
... def bmethod1(self):
... record_history.print('Hi')
... def bmethod2(self, z):
... record_history.print_exprs('z * 3')
>>> b = B() # no harm, noop
>>> b.bmethod1() # doctest: +IGNORE_EXCEPTION_DETAIL, +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: ... is not decorated...
>>> b.bmethod2(1) # doctest: +IGNORE_EXCEPTION_DETAIL, +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: ... is not decorated...
>>> @record_history(NO_DECO=True)
... def foo(x, y, z):
... record_history.print("Hi", "from", "function foo")
... pass
>>> foo(1, 2, 3) # doctest: +IGNORE_EXCEPTION_DETAIL, +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: ... is not decorated...
Undecorated, ever
>>> def schmoe(x):
... record_history.print("Yo, schmoe")
... pass
>>> schmoe(100) # doctest: +IGNORE_EXCEPTION_DETAIL, +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: ... is not decorated...
"""
pass
###############################################################
import doctest
# For unittest integration
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite())
return tests
if __name__ == '__main__':
doctest.testmod()
| mit | -5,525,261,937,872,755,000 | 27.538462 | 80 | 0.452244 | false |
shahar-stratoscale/nova | nova/image/s3.py | 16 | 17627 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Proxy AMI-related calls from cloud controller to objectstore service."""
import base64
import binascii
import os
import shutil
import tarfile
import tempfile
import boto.s3.connection
import eventlet
from lxml import etree
from oslo.config import cfg
from nova.api.ec2 import ec2utils
import nova.cert.rpcapi
from nova import exception
from nova.image import glance
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova import utils
LOG = logging.getLogger(__name__)
s3_opts = [
cfg.StrOpt('image_decryption_dir',
default='/tmp',
help='Parent directory for tempdir used for image decryption'),
cfg.StrOpt('s3_host',
default='$my_ip',
help='Hostname or IP for OpenStack to use when accessing '
'the S3 api'),
cfg.IntOpt('s3_port',
default=3333,
help='Port used when accessing the S3 api'),
cfg.StrOpt('s3_access_key',
default='notchecked',
help='Access key to use for S3 server for images'),
cfg.StrOpt('s3_secret_key',
default='notchecked',
help='Secret key to use for S3 server for images'),
cfg.BoolOpt('s3_use_ssl',
default=False,
help='Whether to use SSL when talking to S3'),
cfg.BoolOpt('s3_affix_tenant',
default=False,
help='Whether to affix the tenant id to the access key '
'when downloading from S3'),
]
CONF = cfg.CONF
CONF.register_opts(s3_opts)
CONF.import_opt('my_ip', 'nova.netconf')
class S3ImageService(object):
"""Wraps an existing image service to support s3 based register."""
# translate our internal state to states valid by the EC2 API documentation
image_state_map = {'downloading': 'pending',
'failed_download': 'failed',
'decrypting': 'pending',
'failed_decrypt': 'failed',
'untarring': 'pending',
'failed_untar': 'failed',
'uploading': 'pending',
'failed_upload': 'failed',
'available': 'available'}
def __init__(self, service=None, *args, **kwargs):
self.cert_rpcapi = nova.cert.rpcapi.CertAPI()
self.service = service or glance.get_default_image_service()
self.service.__init__(*args, **kwargs)
def _translate_uuids_to_ids(self, context, images):
return [self._translate_uuid_to_id(context, img) for img in images]
def _translate_uuid_to_id(self, context, image):
image_copy = image.copy()
try:
image_uuid = image_copy['id']
except KeyError:
pass
else:
image_copy['id'] = ec2utils.glance_id_to_id(context, image_uuid)
for prop in ['kernel_id', 'ramdisk_id']:
try:
image_uuid = image_copy['properties'][prop]
except (KeyError, ValueError):
pass
else:
image_id = ec2utils.glance_id_to_id(context, image_uuid)
image_copy['properties'][prop] = image_id
try:
image_copy['properties']['image_state'] = self.image_state_map[
image['properties']['image_state']]
except (KeyError, ValueError):
pass
return image_copy
def _translate_id_to_uuid(self, context, image):
image_copy = image.copy()
try:
image_id = image_copy['id']
except KeyError:
pass
else:
image_copy['id'] = ec2utils.id_to_glance_id(context, image_id)
for prop in ['kernel_id', 'ramdisk_id']:
try:
image_id = image_copy['properties'][prop]
except (KeyError, ValueError):
pass
else:
image_uuid = ec2utils.id_to_glance_id(context, image_id)
image_copy['properties'][prop] = image_uuid
return image_copy
def create(self, context, metadata, data=None):
"""Create an image.
metadata['properties'] should contain image_location.
"""
image = self._s3_create(context, metadata)
return image
def delete(self, context, image_id):
image_uuid = ec2utils.id_to_glance_id(context, image_id)
self.service.delete(context, image_uuid)
def update(self, context, image_id, metadata, data=None):
image_uuid = ec2utils.id_to_glance_id(context, image_id)
metadata = self._translate_id_to_uuid(context, metadata)
image = self.service.update(context, image_uuid, metadata, data)
return self._translate_uuid_to_id(context, image)
def detail(self, context, **kwargs):
#NOTE(bcwaldon): sort asc to make sure we assign lower ids
# to older images
kwargs.setdefault('sort_dir', 'asc')
images = self.service.detail(context, **kwargs)
return self._translate_uuids_to_ids(context, images)
def show(self, context, image_id):
image_uuid = ec2utils.id_to_glance_id(context, image_id)
image = self.service.show(context, image_uuid)
return self._translate_uuid_to_id(context, image)
@staticmethod
def _conn(context):
# NOTE(vish): access and secret keys for s3 server are not
# checked in nova-objectstore
access = CONF.s3_access_key
if CONF.s3_affix_tenant:
access = '%s:%s' % (access, context.project_id)
secret = CONF.s3_secret_key
calling = boto.s3.connection.OrdinaryCallingFormat()
return boto.s3.connection.S3Connection(aws_access_key_id=access,
aws_secret_access_key=secret,
is_secure=CONF.s3_use_ssl,
calling_format=calling,
port=CONF.s3_port,
host=CONF.s3_host)
@staticmethod
def _download_file(bucket, filename, local_dir):
key = bucket.get_key(filename)
local_filename = os.path.join(local_dir, os.path.basename(filename))
key.get_contents_to_filename(local_filename)
return local_filename
def _s3_parse_manifest(self, context, metadata, manifest):
manifest = etree.fromstring(manifest)
image_format = 'ami'
try:
kernel_id = manifest.find('machine_configuration/kernel_id').text
if kernel_id == 'true':
image_format = 'aki'
kernel_id = None
except Exception:
kernel_id = None
try:
ramdisk_id = manifest.find('machine_configuration/ramdisk_id').text
if ramdisk_id == 'true':
image_format = 'ari'
ramdisk_id = None
except Exception:
ramdisk_id = None
try:
arch = manifest.find('machine_configuration/architecture').text
except Exception:
arch = 'x86_64'
# NOTE(yamahata):
# EC2 ec2-budlne-image --block-device-mapping accepts
# <virtual name>=<device name> where
# virtual name = {ami, root, swap, ephemeral<N>}
# where N is no negative integer
# device name = the device name seen by guest kernel.
# They are converted into
# block_device_mapping/mapping/{virtual, device}
#
# Do NOT confuse this with ec2-register's block device mapping
# argument.
mappings = []
try:
block_device_mapping = manifest.findall('machine_configuration/'
'block_device_mapping/'
'mapping')
for bdm in block_device_mapping:
mappings.append({'virtual': bdm.find('virtual').text,
'device': bdm.find('device').text})
except Exception:
mappings = []
properties = metadata['properties']
properties['architecture'] = arch
def _translate_dependent_image_id(image_key, image_id):
image_uuid = ec2utils.ec2_id_to_glance_id(context, image_id)
properties[image_key] = image_uuid
if kernel_id:
_translate_dependent_image_id('kernel_id', kernel_id)
if ramdisk_id:
_translate_dependent_image_id('ramdisk_id', ramdisk_id)
if mappings:
properties['mappings'] = mappings
metadata.update({'disk_format': image_format,
'container_format': image_format,
'status': 'queued',
'is_public': False,
'properties': properties})
metadata['properties']['image_state'] = 'pending'
#TODO(bcwaldon): right now, this removes user-defined ids.
# We need to re-enable this.
metadata.pop('id', None)
image = self.service.create(context, metadata)
# extract the new uuid and generate an int id to present back to user
image_uuid = image['id']
image['id'] = ec2utils.glance_id_to_id(context, image_uuid)
# return image_uuid so the caller can still make use of image_service
return manifest, image, image_uuid
def _s3_create(self, context, metadata):
"""Gets a manifest from s3 and makes an image."""
image_path = tempfile.mkdtemp(dir=CONF.image_decryption_dir)
image_location = metadata['properties']['image_location'].lstrip('/')
bucket_name = image_location.split('/')[0]
manifest_path = image_location[len(bucket_name) + 1:]
bucket = self._conn(context).get_bucket(bucket_name)
key = bucket.get_key(manifest_path)
manifest = key.get_contents_as_string()
manifest, image, image_uuid = self._s3_parse_manifest(context,
metadata,
manifest)
def delayed_create():
"""This handles the fetching and decrypting of the part files."""
context.update_store()
log_vars = {'image_location': image_location,
'image_path': image_path}
def _update_image_state(context, image_uuid, image_state):
metadata = {'properties': {'image_state': image_state}}
self.service.update(context, image_uuid, metadata,
purge_props=False)
def _update_image_data(context, image_uuid, image_data):
metadata = {}
self.service.update(context, image_uuid, metadata, image_data,
purge_props=False)
try:
_update_image_state(context, image_uuid, 'downloading')
try:
parts = []
elements = manifest.find('image').getiterator('filename')
for fn_element in elements:
part = self._download_file(bucket,
fn_element.text,
image_path)
parts.append(part)
# NOTE(vish): this may be suboptimal, should we use cat?
enc_filename = os.path.join(image_path, 'image.encrypted')
with open(enc_filename, 'w') as combined:
for filename in parts:
with open(filename) as part:
shutil.copyfileobj(part, combined)
except Exception:
LOG.exception(_("Failed to download %(image_location)s "
"to %(image_path)s"), log_vars)
_update_image_state(context, image_uuid, 'failed_download')
return
_update_image_state(context, image_uuid, 'decrypting')
try:
hex_key = manifest.find('image/ec2_encrypted_key').text
encrypted_key = binascii.a2b_hex(hex_key)
hex_iv = manifest.find('image/ec2_encrypted_iv').text
encrypted_iv = binascii.a2b_hex(hex_iv)
dec_filename = os.path.join(image_path, 'image.tar.gz')
self._decrypt_image(context, enc_filename, encrypted_key,
encrypted_iv, dec_filename)
except Exception:
LOG.exception(_("Failed to decrypt %(image_location)s "
"to %(image_path)s"), log_vars)
_update_image_state(context, image_uuid, 'failed_decrypt')
return
_update_image_state(context, image_uuid, 'untarring')
try:
unz_filename = self._untarzip_image(image_path,
dec_filename)
except Exception:
LOG.exception(_("Failed to untar %(image_location)s "
"to %(image_path)s"), log_vars)
_update_image_state(context, image_uuid, 'failed_untar')
return
_update_image_state(context, image_uuid, 'uploading')
try:
with open(unz_filename) as image_file:
_update_image_data(context, image_uuid, image_file)
except Exception:
LOG.exception(_("Failed to upload %(image_location)s "
"to %(image_path)s"), log_vars)
_update_image_state(context, image_uuid, 'failed_upload')
return
metadata = {'status': 'active',
'properties': {'image_state': 'available'}}
self.service.update(context, image_uuid, metadata,
purge_props=False)
shutil.rmtree(image_path)
except exception.ImageNotFound:
LOG.info(_("Image %s was deleted underneath us"), image_uuid)
return
eventlet.spawn_n(delayed_create)
return image
def _decrypt_image(self, context, encrypted_filename, encrypted_key,
encrypted_iv, decrypted_filename):
elevated = context.elevated()
try:
key = self.cert_rpcapi.decrypt_text(elevated,
project_id=context.project_id,
text=base64.b64encode(encrypted_key))
except Exception as exc:
msg = _('Failed to decrypt private key: %s') % exc
raise exception.NovaException(msg)
try:
iv = self.cert_rpcapi.decrypt_text(elevated,
project_id=context.project_id,
text=base64.b64encode(encrypted_iv))
except Exception as exc:
raise exception.NovaException(_('Failed to decrypt initialization '
'vector: %s') % exc)
try:
utils.execute('openssl', 'enc',
'-d', '-aes-128-cbc',
'-in', '%s' % (encrypted_filename,),
'-K', '%s' % (key,),
'-iv', '%s' % (iv,),
'-out', '%s' % (decrypted_filename,))
except processutils.ProcessExecutionError as exc:
raise exception.NovaException(_('Failed to decrypt image file '
'%(image_file)s: %(err)s') %
{'image_file': encrypted_filename,
'err': exc.stdout})
@staticmethod
def _test_for_malicious_tarball(path, filename):
"""Raises exception if extracting tarball would escape extract path."""
tar_file = tarfile.open(filename, 'r|gz')
for n in tar_file.getnames():
if not os.path.abspath(os.path.join(path, n)).startswith(path):
tar_file.close()
raise exception.NovaException(_('Unsafe filenames in image'))
tar_file.close()
@staticmethod
def _untarzip_image(path, filename):
S3ImageService._test_for_malicious_tarball(path, filename)
tar_file = tarfile.open(filename, 'r|gz')
tar_file.extractall(path)
image_file = tar_file.getnames()[0]
tar_file.close()
return os.path.join(path, image_file)
| apache-2.0 | -7,729,528,344,108,920,000 | 39.428899 | 79 | 0.539343 | false |
angelapper/odoo | addons/report/models/abstract_report.py | 47 | 2092 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.osv import osv
class AbstractReport(osv.AbstractModel):
"""Model used to embed old style reports"""
_name = 'report.abstract_report'
_template = None
_wrapped_report_class = None
def render_html(self, cr, uid, ids, data=None, context=None):
context = dict(context or {})
# If the key 'landscape' is present in data['form'], passing it into the context
if data and data.get('form', {}).get('landscape'):
context['landscape'] = True
if context and context.get('active_ids'):
# Browse the selected objects via their reference in context
model = context.get('active_model') or context.get('model')
objects_model = self.pool[model]
objects = objects_model.browse(cr, uid, context['active_ids'], context=context)
else:
# If no context is set (for instance, during test execution), build one
model = self.pool['report']._get_report_from_name(cr, uid, self._template).model
objects_model = self.pool[model]
objects = objects_model.browse(cr, uid, ids, context=context)
context['active_model'] = model
context['active_ids'] = ids
# Generate the old style report
wrapped_report = self._wrapped_report_class(cr, uid, '', context=context)
wrapped_report.set_context(objects, data, context['active_ids'])
# Rendering self._template with the wrapped report instance localcontext as
# rendering environment
docargs = dict(wrapped_report.localcontext)
if not docargs.get('lang'):
docargs.pop('lang', False)
docargs['docs'] = docargs.get('objects')
# Used in template translation (see translate_doc method from report model)
docargs['doc_ids'] = context['active_ids']
docargs['doc_model'] = model
return self.pool['report'].render(cr, uid, [], self._template, docargs, context=context)
| agpl-3.0 | 286,134,075,678,085,900 | 42.583333 | 96 | 0.630019 | false |
IntelLabs/numba | numba/cuda/api.py | 3 | 17872 | """
API that are reported to numba.cuda
"""
import contextlib
import os
import numpy as np
from .cudadrv import devicearray, devices, driver
from numba.core import config
# NDarray device helper
require_context = devices.require_context
current_context = devices.get_context
gpus = devices.gpus
@require_context
def from_cuda_array_interface(desc, owner=None, sync=True):
"""Create a DeviceNDArray from a cuda-array-interface description.
The ``owner`` is the owner of the underlying memory.
The resulting DeviceNDArray will acquire a reference from it.
If ``sync`` is ``True``, then the imported stream (if present) will be
synchronized.
"""
version = desc.get('version')
# Mask introduced in version 1
if 1 <= version:
mask = desc.get('mask')
# Would ideally be better to detect if the mask is all valid
if mask is not None:
raise NotImplementedError('Masked arrays are not supported')
shape = desc['shape']
strides = desc.get('strides')
dtype = np.dtype(desc['typestr'])
shape, strides, dtype = _prepare_shape_strides_dtype(
shape, strides, dtype, order='C')
size = driver.memory_size_from_info(shape, strides, dtype.itemsize)
devptr = driver.get_devptr_for_active_ctx(desc['data'][0])
data = driver.MemoryPointer(
current_context(), devptr, size=size, owner=owner)
stream_ptr = desc.get('stream', None)
if stream_ptr is not None:
stream = external_stream(stream_ptr)
if sync and config.CUDA_ARRAY_INTERFACE_SYNC:
stream.synchronize()
else:
stream = 0 # No "Numba default stream", not the CUDA default stream
da = devicearray.DeviceNDArray(shape=shape, strides=strides,
dtype=dtype, gpu_data=data,
stream=stream)
return da
def as_cuda_array(obj, sync=True):
"""Create a DeviceNDArray from any object that implements
the :ref:`cuda array interface <cuda-array-interface>`.
A view of the underlying GPU buffer is created. No copying of the data
is done. The resulting DeviceNDArray will acquire a reference from `obj`.
If ``sync`` is ``True``, then the imported stream (if present) will be
synchronized.
"""
if not is_cuda_array(obj):
raise TypeError("*obj* doesn't implement the cuda array interface.")
else:
return from_cuda_array_interface(obj.__cuda_array_interface__,
owner=obj, sync=sync)
def is_cuda_array(obj):
"""Test if the object has defined the `__cuda_array_interface__` attribute.
Does not verify the validity of the interface.
"""
return hasattr(obj, '__cuda_array_interface__')
@require_context
def to_device(obj, stream=0, copy=True, to=None):
"""to_device(obj, stream=0, copy=True, to=None)
Allocate and transfer a numpy ndarray or structured scalar to the device.
To copy host->device a numpy array::
ary = np.arange(10)
d_ary = cuda.to_device(ary)
To enqueue the transfer to a stream::
stream = cuda.stream()
d_ary = cuda.to_device(ary, stream=stream)
The resulting ``d_ary`` is a ``DeviceNDArray``.
To copy device->host::
hary = d_ary.copy_to_host()
To copy device->host to an existing array::
ary = np.empty(shape=d_ary.shape, dtype=d_ary.dtype)
d_ary.copy_to_host(ary)
To enqueue the transfer to a stream::
hary = d_ary.copy_to_host(stream=stream)
"""
if to is None:
to, new = devicearray.auto_device(obj, stream=stream, copy=copy,
user_explicit=True)
return to
if copy:
to.copy_to_device(obj, stream=stream)
return to
@require_context
def device_array(shape, dtype=np.float_, strides=None, order='C', stream=0):
"""device_array(shape, dtype=np.float_, strides=None, order='C', stream=0)
Allocate an empty device ndarray. Similar to :meth:`numpy.empty`.
"""
shape, strides, dtype = _prepare_shape_strides_dtype(shape, strides, dtype,
order)
return devicearray.DeviceNDArray(shape=shape, strides=strides, dtype=dtype,
stream=stream)
@require_context
def managed_array(shape, dtype=np.float_, strides=None, order='C', stream=0,
attach_global=True):
"""managed_array(shape, dtype=np.float_, strides=None, order='C', stream=0,
attach_global=True)
Allocate a np.ndarray with a buffer that is managed.
Similar to np.empty().
Managed memory is supported on Linux, and is considered experimental on
Windows.
:param attach_global: A flag indicating whether to attach globally. Global
attachment implies that the memory is accessible from
any stream on any device. If ``False``, attachment is
*host*, and memory is only accessible by devices
with Compute Capability 6.0 and later.
"""
shape, strides, dtype = _prepare_shape_strides_dtype(shape, strides, dtype,
order)
bytesize = driver.memory_size_from_info(shape, strides, dtype.itemsize)
buffer = current_context().memallocmanaged(bytesize,
attach_global=attach_global)
npary = np.ndarray(shape=shape, strides=strides, dtype=dtype, order=order,
buffer=buffer)
managedview = np.ndarray.view(npary, type=devicearray.ManagedNDArray)
managedview.device_setup(buffer, stream=stream)
return managedview
@require_context
def pinned_array(shape, dtype=np.float_, strides=None, order='C'):
"""pinned_array(shape, dtype=np.float_, strides=None, order='C')
Allocate an :class:`ndarray <numpy.ndarray>` with a buffer that is pinned
(pagelocked). Similar to :func:`np.empty() <numpy.empty>`.
"""
shape, strides, dtype = _prepare_shape_strides_dtype(shape, strides, dtype,
order)
bytesize = driver.memory_size_from_info(shape, strides,
dtype.itemsize)
buffer = current_context().memhostalloc(bytesize)
return np.ndarray(shape=shape, strides=strides, dtype=dtype, order=order,
buffer=buffer)
@require_context
def mapped_array(shape, dtype=np.float_, strides=None, order='C', stream=0,
portable=False, wc=False):
"""mapped_array(shape, dtype=np.float_, strides=None, order='C', stream=0,
portable=False, wc=False)
Allocate a mapped ndarray with a buffer that is pinned and mapped on
to the device. Similar to np.empty()
:param portable: a boolean flag to allow the allocated device memory to be
usable in multiple devices.
:param wc: a boolean flag to enable writecombined allocation which is faster
to write by the host and to read by the device, but slower to
write by the host and slower to write by the device.
"""
shape, strides, dtype = _prepare_shape_strides_dtype(shape, strides, dtype,
order)
bytesize = driver.memory_size_from_info(shape, strides, dtype.itemsize)
buffer = current_context().memhostalloc(bytesize, mapped=True)
npary = np.ndarray(shape=shape, strides=strides, dtype=dtype, order=order,
buffer=buffer)
mappedview = np.ndarray.view(npary, type=devicearray.MappedNDArray)
mappedview.device_setup(buffer, stream=stream)
return mappedview
@contextlib.contextmanager
@require_context
def open_ipc_array(handle, shape, dtype, strides=None, offset=0):
"""
A context manager that opens a IPC *handle* (*CUipcMemHandle*) that is
represented as a sequence of bytes (e.g. *bytes*, tuple of int)
and represent it as an array of the given *shape*, *strides* and *dtype*.
The *strides* can be omitted. In that case, it is assumed to be a 1D
C contiguous array.
Yields a device array.
The IPC handle is closed automatically when context manager exits.
"""
dtype = np.dtype(dtype)
# compute size
size = np.prod(shape) * dtype.itemsize
# manually recreate the IPC mem handle
handle = driver.drvapi.cu_ipc_mem_handle(*handle)
# use *IpcHandle* to open the IPC memory
ipchandle = driver.IpcHandle(None, handle, size, offset=offset)
yield ipchandle.open_array(current_context(), shape=shape,
strides=strides, dtype=dtype)
ipchandle.close()
def synchronize():
"Synchronize the current context."
return current_context().synchronize()
def _prepare_shape_strides_dtype(shape, strides, dtype, order):
dtype = np.dtype(dtype)
if isinstance(shape, int):
shape = (shape,)
if isinstance(strides, int):
strides = (strides,)
else:
strides = strides or _fill_stride_by_order(shape, dtype, order)
return shape, strides, dtype
def _fill_stride_by_order(shape, dtype, order):
nd = len(shape)
if nd == 0:
return ()
strides = [0] * nd
if order == 'C':
strides[-1] = dtype.itemsize
for d in reversed(range(nd - 1)):
strides[d] = strides[d + 1] * shape[d + 1]
elif order == 'F':
strides[0] = dtype.itemsize
for d in range(1, nd):
strides[d] = strides[d - 1] * shape[d - 1]
else:
raise ValueError('must be either C/F order')
return tuple(strides)
def _contiguous_strides_like_array(ary):
"""
Given an array, compute strides for a new contiguous array of the same
shape.
"""
# Don't recompute strides if the default strides will be sufficient to
# create a contiguous array.
if ary.flags['C_CONTIGUOUS'] or ary.flags['F_CONTIGUOUS'] or ary.ndim <= 1:
return None
# Otherwise, we need to compute new strides using an algorithm adapted from
# NumPy v1.17.4's PyArray_NewLikeArrayWithShape in
# core/src/multiarray/ctors.c. We permute the strides in ascending order
# then compute the stride for the dimensions with the same permutation.
# Stride permutation. E.g. a stride array (4, -2, 12) becomes
# [(1, -2), (0, 4), (2, 12)]
strideperm = [ x for x in enumerate(ary.strides) ]
strideperm.sort(key=lambda x: x[1])
# Compute new strides using permutation
strides = [0] * len(ary.strides)
stride = ary.dtype.itemsize
for i_perm, _ in strideperm:
strides[i_perm] = stride
stride *= ary.shape[i_perm]
return tuple(strides)
def _order_like_array(ary):
if ary.flags['F_CONTIGUOUS'] and not ary.flags['C_CONTIGUOUS']:
return 'F'
else:
return 'C'
def device_array_like(ary, stream=0):
"""
Call :func:`device_array() <numba.cuda.device_array>` with information from
the array.
"""
strides = _contiguous_strides_like_array(ary)
order = _order_like_array(ary)
return device_array(shape=ary.shape, dtype=ary.dtype, strides=strides,
order=order, stream=stream)
def mapped_array_like(ary, stream=0, portable=False, wc=False):
"""
Call :func:`mapped_array() <numba.cuda.mapped_array>` with the information
from the array.
"""
strides = _contiguous_strides_like_array(ary)
order = _order_like_array(ary)
return mapped_array(shape=ary.shape, dtype=ary.dtype, strides=strides,
order=order, stream=stream, portable=portable, wc=wc)
def pinned_array_like(ary):
"""
Call :func:`pinned_array() <numba.cuda.pinned_array>` with the information
from the array.
"""
strides = _contiguous_strides_like_array(ary)
order = _order_like_array(ary)
return pinned_array(shape=ary.shape, dtype=ary.dtype, strides=strides,
order=order)
# Stream helper
@require_context
def stream():
"""
Create a CUDA stream that represents a command queue for the device.
"""
return current_context().create_stream()
@require_context
def default_stream():
"""
Get the default CUDA stream. CUDA semantics in general are that the default
stream is either the legacy default stream or the per-thread default stream
depending on which CUDA APIs are in use. In Numba, the APIs for the legacy
default stream are always the ones in use, but an option to use APIs for
the per-thread default stream may be provided in future.
"""
return current_context().get_default_stream()
@require_context
def legacy_default_stream():
"""
Get the legacy default CUDA stream.
"""
return current_context().get_legacy_default_stream()
@require_context
def per_thread_default_stream():
"""
Get the per-thread default CUDA stream.
"""
return current_context().get_per_thread_default_stream()
@require_context
def external_stream(ptr):
"""Create a Numba stream object for a stream allocated outside Numba.
:param ptr: Pointer to the external stream to wrap in a Numba Stream
:type ptr: int
"""
return current_context().create_external_stream(ptr)
# Page lock
@require_context
@contextlib.contextmanager
def pinned(*arylist):
"""A context manager for temporary pinning a sequence of host ndarrays.
"""
pmlist = []
for ary in arylist:
pm = current_context().mempin(ary, driver.host_pointer(ary),
driver.host_memory_size(ary),
mapped=False)
pmlist.append(pm)
yield
@require_context
@contextlib.contextmanager
def mapped(*arylist, **kws):
"""A context manager for temporarily mapping a sequence of host ndarrays.
"""
assert not kws or 'stream' in kws, "Only accept 'stream' as keyword."
stream = kws.get('stream', 0)
pmlist = []
devarylist = []
for ary in arylist:
pm = current_context().mempin(ary, driver.host_pointer(ary),
driver.host_memory_size(ary),
mapped=True)
pmlist.append(pm)
devary = devicearray.from_array_like(ary, gpu_data=pm, stream=stream)
devarylist.append(devary)
try:
if len(devarylist) == 1:
yield devarylist[0]
else:
yield devarylist
finally:
# When exiting from `with cuda.mapped(*arrs) as mapped_arrs:`, the name
# `mapped_arrs` stays in scope, blocking automatic unmapping based on
# reference count. We therefore invoke the finalizer manually.
for pm in pmlist:
pm.free()
def event(timing=True):
"""
Create a CUDA event. Timing data is only recorded by the event if it is
created with ``timing=True``.
"""
evt = current_context().create_event(timing=timing)
return evt
event_elapsed_time = driver.event_elapsed_time
# Device selection
def select_device(device_id):
"""
Make the context associated with device *device_id* the current context.
Returns a Device instance.
Raises exception on error.
"""
context = devices.get_context(device_id)
return context.device
def get_current_device():
"Get current device associated with the current thread"
return current_context().device
def list_devices():
"Return a list of all detected devices"
return devices.gpus
def close():
"""
Explicitly clears all contexts in the current thread, and destroys all
contexts if the current thread is the main thread.
"""
devices.reset()
def _auto_device(ary, stream=0, copy=True):
return devicearray.auto_device(ary, stream=stream, copy=copy)
def detect():
"""
Detect supported CUDA hardware and print a summary of the detected hardware.
Returns a boolean indicating whether any supported devices were detected.
"""
devlist = list_devices()
print('Found %d CUDA devices' % len(devlist))
supported_count = 0
for dev in devlist:
attrs = []
cc = dev.compute_capability
kernel_timeout = dev.KERNEL_EXEC_TIMEOUT
tcc = dev.TCC_DRIVER
fp32_to_fp64_ratio = dev.SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO
attrs += [('Compute Capability', '%d.%d' % cc)]
attrs += [('PCI Device ID', dev.PCI_DEVICE_ID)]
attrs += [('PCI Bus ID', dev.PCI_BUS_ID)]
attrs += [('Watchdog', 'Enabled' if kernel_timeout else 'Disabled')]
if os.name == "nt":
attrs += [('Compute Mode', 'TCC' if tcc else 'WDDM')]
attrs += [('FP32/FP64 Performance Ratio', fp32_to_fp64_ratio)]
if cc < (2, 0):
support = '[NOT SUPPORTED: CC < 2.0]'
else:
support = '[SUPPORTED]'
supported_count += 1
print('id %d %20s %40s' % (dev.id, dev.name, support))
for key, val in attrs:
print('%40s: %s' % (key, val))
print('Summary:')
print('\t%d/%d devices are supported' % (supported_count, len(devlist)))
return supported_count > 0
@contextlib.contextmanager
def defer_cleanup():
"""
Temporarily disable memory deallocation.
Use this to prevent resource deallocation breaking asynchronous execution.
For example::
with defer_cleanup():
# all cleanup is deferred in here
do_speed_critical_code()
# cleanup can occur here
Note: this context manager can be nested.
"""
with current_context().defer_cleanup():
yield
profiling = require_context(driver.profiling)
profile_start = require_context(driver.profile_start)
profile_stop = require_context(driver.profile_stop)
| bsd-2-clause | 926,152,474,975,385,100 | 32.219331 | 80 | 0.630651 | false |
spjmurray/openstack-sentinel | sentinel/api/controllers/image/v2/schemas.py | 1 | 1399 | # Copyright 2017 DataCentred Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
import pecan.decorators
from sentinel.api.controllers.base import BaseController
class ImageV2SchemasImageController(BaseController):
@pecan.expose('json')
def get(self):
# Rather than return a schema resource (e.g. {"schema": {}}), glance
# returns a raw blob of JSON, simply unmarshal from its container
# bypassing all format_* methods and let pecan render
return self.image.schemas.get('image').raw()
class ImageV2SchemasController(object):
# Don't be tempted to make this a RestController and have a
# get(self, target) handler, one of the targets happens to
# be 'image' which the routing picks up as the image client!
def __init__(self):
self.image = ImageV2SchemasImageController()
# vi: ts=4 et:
| apache-2.0 | 4,468,057,246,812,892,700 | 33.975 | 78 | 0.711937 | false |
Lektorium-LLC/edx-platform | lms/djangoapps/certificates/tests/test_cert_management.py | 5 | 10678 | """Tests for the resubmit_error_certificates management command. """
import ddt
from django.core.management.base import CommandError
from django.test.utils import override_settings
from mock import patch
from nose.plugins.attrib import attr
from opaque_keys.edx.locator import CourseLocator
from badges.events.course_complete import get_completion_badge
from badges.models import BadgeAssertion
from badges.tests.factories import BadgeAssertionFactory, CourseCompleteImageConfigurationFactory
from certificates.management.commands import regenerate_user, resubmit_error_certificates, ungenerated_certs
from certificates.models import CertificateStatuses, GeneratedCertificate
from course_modes.models import CourseMode
from lms.djangoapps.grades.tests.utils import mock_passing_grade
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory, check_mongo_calls
class CertificateManagementTest(ModuleStoreTestCase):
"""
Base test class for Certificate Management command tests.
"""
# Override with the command module you wish to test.
command = resubmit_error_certificates
def setUp(self):
super(CertificateManagementTest, self).setUp()
self.user = UserFactory.create()
self.courses = [
CourseFactory.create()
for __ in range(3)
]
for course in self.courses:
chapter = ItemFactory.create(parent_location=course.location)
ItemFactory.create(parent_location=chapter.location, category='sequential', graded=True)
CourseCompleteImageConfigurationFactory.create()
def _create_cert(self, course_key, user, status, mode=CourseMode.HONOR):
"""Create a certificate entry. """
# Enroll the user in the course
CourseEnrollmentFactory.create(
user=user,
course_id=course_key,
mode=mode
)
# Create the certificate
GeneratedCertificate.eligible_certificates.create(
user=user,
course_id=course_key,
status=status
)
def _run_command(self, *args, **kwargs):
"""Run the management command to generate a fake cert. """
command = self.command.Command()
return command.handle(*args, **kwargs)
def _assert_cert_status(self, course_key, user, expected_status):
"""Check the status of a certificate. """
cert = GeneratedCertificate.eligible_certificates.get(user=user, course_id=course_key)
self.assertEqual(cert.status, expected_status)
@attr(shard=1)
@ddt.ddt
class ResubmitErrorCertificatesTest(CertificateManagementTest):
"""Tests for the resubmit_error_certificates management command. """
ENABLED_SIGNALS = ['course_published']
@ddt.data(CourseMode.HONOR, CourseMode.VERIFIED)
def test_resubmit_error_certificate(self, mode):
# Create a certificate with status 'error'
self._create_cert(self.courses[0].id, self.user, CertificateStatuses.error, mode)
# Re-submit all certificates with status 'error'
with check_mongo_calls(1):
self._run_command()
# Expect that the certificate was re-submitted
self._assert_cert_status(self.courses[0].id, self.user, CertificateStatuses.notpassing)
def test_resubmit_error_certificate_in_a_course(self):
# Create a certificate with status 'error'
# in three courses.
for idx in range(3):
self._create_cert(self.courses[idx].id, self.user, CertificateStatuses.error)
# Re-submit certificates for two of the courses
self._run_command(course_key_list=[
unicode(self.courses[0].id),
unicode(self.courses[1].id)
])
# Expect that the first two courses have been re-submitted,
# but not the third course.
self._assert_cert_status(self.courses[0].id, self.user, CertificateStatuses.notpassing)
self._assert_cert_status(self.courses[1].id, self.user, CertificateStatuses.notpassing)
self._assert_cert_status(self.courses[2].id, self.user, CertificateStatuses.error)
@ddt.data(
CertificateStatuses.deleted,
CertificateStatuses.deleting,
CertificateStatuses.downloadable,
CertificateStatuses.generating,
CertificateStatuses.notpassing,
CertificateStatuses.restricted,
CertificateStatuses.unavailable,
)
def test_resubmit_error_certificate_skips_non_error_certificates(self, other_status):
# Create certificates with an error status and some other status
self._create_cert(self.courses[0].id, self.user, CertificateStatuses.error)
self._create_cert(self.courses[1].id, self.user, other_status)
# Re-submit certificates for all courses
self._run_command()
# Only the certificate with status "error" should have been re-submitted
self._assert_cert_status(self.courses[0].id, self.user, CertificateStatuses.notpassing)
self._assert_cert_status(self.courses[1].id, self.user, other_status)
def test_resubmit_error_certificate_none_found(self):
self._create_cert(self.courses[0].id, self.user, CertificateStatuses.downloadable)
self._run_command()
self._assert_cert_status(self.courses[0].id, self.user, CertificateStatuses.downloadable)
def test_course_caching(self):
# Create multiple certificates for the same course
self._create_cert(self.courses[0].id, UserFactory.create(), CertificateStatuses.error)
self._create_cert(self.courses[0].id, UserFactory.create(), CertificateStatuses.error)
self._create_cert(self.courses[0].id, UserFactory.create(), CertificateStatuses.error)
# Verify that we make only one Mongo query
# because the course is cached.
with check_mongo_calls(1):
self._run_command()
def test_invalid_course_key(self):
invalid_key = u"invalid/"
with self.assertRaisesRegexp(CommandError, invalid_key):
self._run_command(course_key_list=[invalid_key])
def test_course_does_not_exist(self):
phantom_course = CourseLocator(org='phantom', course='phantom', run='phantom')
self._create_cert(phantom_course, self.user, 'error')
self._run_command()
# Expect that the certificate was NOT resubmitted
# since the course doesn't actually exist.
self._assert_cert_status(phantom_course, self.user, CertificateStatuses.error)
@ddt.ddt
@attr(shard=1)
class RegenerateCertificatesTest(CertificateManagementTest):
"""
Tests for regenerating certificates.
"""
command = regenerate_user
def setUp(self):
"""
We just need one course here.
"""
super(RegenerateCertificatesTest, self).setUp()
self.course = self.courses[0]
@ddt.data(True, False)
@override_settings(CERT_QUEUE='test-queue')
@patch('certificates.api.XQueueCertInterface', spec=True)
def test_clear_badge(self, issue_badges, xqueue):
"""
Given that I have a user with a badge
If I run regeneration for a user
Then certificate generation will be requested
And the badge will be deleted if badge issuing is enabled
"""
key = self.course.location.course_key
self._create_cert(key, self.user, CertificateStatuses.downloadable)
badge_class = get_completion_badge(key, self.user)
BadgeAssertionFactory(badge_class=badge_class, user=self.user)
self.assertTrue(BadgeAssertion.objects.filter(user=self.user, badge_class=badge_class))
self.course.issue_badges = issue_badges
self.store.update_item(self.course, None)
self._run_command(
username=self.user.email, course=unicode(key), noop=False, insecure=False, template_file=None,
grade_value=None
)
xqueue.return_value.regen_cert.assert_called_with(
self.user,
key,
course=self.course,
forced_grade=None,
template_file=None,
generate_pdf=True
)
self.assertEquals(
bool(BadgeAssertion.objects.filter(user=self.user, badge_class=badge_class)), not issue_badges
)
@override_settings(CERT_QUEUE='test-queue')
@patch('capa.xqueue_interface.XQueueInterface.send_to_queue', spec=True)
def test_regenerating_certificate(self, mock_send_to_queue):
"""
Given that I have a user who has not passed course
If I run regeneration for that user
Then certificate generation will be not be requested
"""
key = self.course.location.course_key
self._create_cert(key, self.user, CertificateStatuses.downloadable)
self._run_command(
username=self.user.email, course=unicode(key), noop=False, insecure=True, template_file=None,
grade_value=None
)
certificate = GeneratedCertificate.eligible_certificates.get(
user=self.user,
course_id=key
)
self.assertEqual(certificate.status, CertificateStatuses.notpassing)
self.assertFalse(mock_send_to_queue.called)
@attr(shard=1)
class UngenerateCertificatesTest(CertificateManagementTest):
"""
Tests for generating certificates.
"""
command = ungenerated_certs
def setUp(self):
"""
We just need one course here.
"""
super(UngenerateCertificatesTest, self).setUp()
self.course = self.courses[0]
@override_settings(CERT_QUEUE='test-queue')
@patch('capa.xqueue_interface.XQueueInterface.send_to_queue', spec=True)
def test_ungenerated_certificate(self, mock_send_to_queue):
"""
Given that I have ended course
If I run ungenerated certs command
Then certificates should be generated for all users who passed course
"""
mock_send_to_queue.return_value = (0, "Successfully queued")
key = self.course.location.course_key
self._create_cert(key, self.user, CertificateStatuses.unavailable)
with mock_passing_grade():
self._run_command(
course=unicode(key), noop=False, insecure=True, force=False
)
self.assertTrue(mock_send_to_queue.called)
certificate = GeneratedCertificate.eligible_certificates.get(
user=self.user,
course_id=key
)
self.assertEqual(certificate.status, CertificateStatuses.generating)
| agpl-3.0 | -184,744,424,829,223,230 | 40.227799 | 108 | 0.678498 | false |
alkyl1978/gnuradio | gr-fec/python/fec/extended_tagged_encoder.py | 23 | 3264 | #!/usr/bin/env python
#
# Copyright 2014 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, blocks
import fec_swig as fec
from bitflip import read_bitlist
class extended_tagged_encoder(gr.hier_block2):
def __init__(self, encoder_obj_list, puncpat=None, lentagname=None):
gr.hier_block2.__init__(self, "extended_tagged_encoder",
gr.io_signature(1, 1, gr.sizeof_char),
gr.io_signature(1, 1, gr.sizeof_char))
self.blocks=[]
self.puncpat=puncpat
# If it's a list of encoders, take the first one, unless it's
# a list of lists of encoders.
if(type(encoder_obj_list) == list):
# This block doesn't handle parallelism of > 1
# We could just grab encoder [0][0], but we don't want to encourage this.
if(type(encoder_obj_list[0]) == list):
gr.log.info("fec.extended_tagged_encoder: Parallelism must be 0 or 1.")
raise AttributeError
encoder_obj = encoder_obj_list[0]
# Otherwise, just take it as is
else:
encoder_obj = encoder_obj_list
# If lentagname is None, fall back to using the non tagged
# stream version
if type(lentagname) == str:
if(lentagname.lower() == 'none'):
lentagname = None
if fec.get_encoder_input_conversion(encoder_obj) == "pack":
self.blocks.append(blocks.pack_k_bits_bb(8))
if(not lentagname):
self.blocks.append(fec.encoder(encoder_obj,
gr.sizeof_char,
gr.sizeof_char))
else:
self.blocks.append(fec.tagged_encoder(encoder_obj,
gr.sizeof_char,
gr.sizeof_char,
lentagname))
if self.puncpat != '11':
self.blocks.append(fec.puncture_bb(len(puncpat), read_bitlist(puncpat), 0))
# Connect the input to the encoder and the output to the
# puncture if used or the encoder if not.
self.connect((self, 0), (self.blocks[0], 0));
self.connect((self.blocks[-1], 0), (self, 0));
# If using the puncture block, add it into the flowgraph after
# the encoder.
for i in range(len(self.blocks) - 1):
self.connect((self.blocks[i], 0), (self.blocks[i+1], 0));
| gpl-3.0 | 2,625,645,992,004,734,500 | 38.804878 | 87 | 0.584865 | false |
ArseniyK/Sunflower | application/gui/preferences/toolbar.py | 9 | 7705 | import gtk
from widgets.settings_page import SettingsPage
class Column:
NAME = 0
DESCRIPTION = 1
TYPE = 2
ICON = 3
class ToolbarOptions(SettingsPage):
"""Toolbar options extension class"""
def __init__(self, parent, application):
SettingsPage.__init__(self, parent, application, 'toolbar', _('Toolbar'))
self._toolbar_manager = self._application.toolbar_manager
# create list box
container = gtk.ScrolledWindow()
container.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_ALWAYS)
container.set_shadow_type(gtk.SHADOW_IN)
self._store = gtk.ListStore(str, str, str, str)
self._list = gtk.TreeView()
self._list.set_model(self._store)
cell_icon = gtk.CellRendererPixbuf()
cell_name = gtk.CellRendererText()
cell_type = gtk.CellRendererText()
# create name column
col_name = gtk.TreeViewColumn(_('Name'))
col_name.set_min_width(200)
col_name.set_resizable(True)
# pack and configure renderes
col_name.pack_start(cell_icon, False)
col_name.pack_start(cell_name, True)
col_name.add_attribute(cell_icon, 'icon-name', Column.ICON)
col_name.add_attribute(cell_name, 'text', Column.NAME)
# create type column
col_type = gtk.TreeViewColumn(_('Type'), cell_type, markup=Column.DESCRIPTION)
col_type.set_resizable(True)
col_type.set_expand(True)
# add columns to the list
self._list.append_column(col_name)
self._list.append_column(col_type)
container.add(self._list)
# create controls
button_box = gtk.HBox(False, 5)
button_add = gtk.Button(stock=gtk.STOCK_ADD)
button_add.connect('clicked', self._add_widget)
button_delete = gtk.Button(stock=gtk.STOCK_DELETE)
button_delete.connect('clicked', self._delete_widget)
button_edit = gtk.Button(stock=gtk.STOCK_EDIT)
button_edit.connect('clicked', self._edit_widget)
image_up = gtk.Image()
image_up.set_from_stock(gtk.STOCK_GO_UP, gtk.ICON_SIZE_BUTTON)
button_move_up = gtk.Button(label=None)
button_move_up.add(image_up)
button_move_up.set_tooltip_text(_('Move Up'))
button_move_up.connect('clicked', self._move_widget, -1)
image_down = gtk.Image()
image_down.set_from_stock(gtk.STOCK_GO_DOWN, gtk.ICON_SIZE_BUTTON)
button_move_down = gtk.Button(label=None)
button_move_down.add(image_down)
button_move_down.set_tooltip_text(_('Move Down'))
button_move_down.connect('clicked', self._move_widget, 1)
# pack ui
button_box.pack_start(button_add, False, False, 0)
button_box.pack_start(button_delete, False, False, 0)
button_box.pack_start(button_edit, False, False, 0)
button_box.pack_end(button_move_down, False, False, 0)
button_box.pack_end(button_move_up, False, False, 0)
# toolbar style
label_style = gtk.Label(_('Toolbar style:'))
list_styles = gtk.ListStore(str, int)
list_styles.append((_('Icons'), gtk.TOOLBAR_ICONS))
list_styles.append((_('Text'), gtk.TOOLBAR_TEXT))
list_styles.append((_('Both'), gtk.TOOLBAR_BOTH))
list_styles.append((_('Both horizontal'), gtk.TOOLBAR_BOTH_HORIZ))
renderer = gtk.CellRendererText()
self._combobox_styles = gtk.ComboBox(list_styles)
self._combobox_styles.pack_start(renderer)
self._combobox_styles.add_attribute(renderer, 'text', 0)
self._combobox_styles.connect('changed', self._parent.enable_save)
# toolbar icon size
label_icon_size = gtk.Label(_('Icon size:'))
list_icon_size = gtk.ListStore(str, int)
list_icon_size.append((_('Same as menu item'), gtk.ICON_SIZE_MENU))
list_icon_size.append((_('Small toolbar icon'), gtk.ICON_SIZE_SMALL_TOOLBAR))
list_icon_size.append((_('Large toolbar icon'), gtk.ICON_SIZE_LARGE_TOOLBAR))
list_icon_size.append((_('Same as buttons'), gtk.ICON_SIZE_BUTTON))
list_icon_size.append((_('Same as drag icons'), gtk.ICON_SIZE_DND))
list_icon_size.append((_('Same as dialog'), gtk.ICON_SIZE_DIALOG))
renderer = gtk.CellRendererText()
self._combobox_icon_size = gtk.ComboBox(list_icon_size)
self._combobox_icon_size.pack_start(renderer)
self._combobox_icon_size.add_attribute(renderer, 'text', 0)
self._combobox_icon_size.connect('changed', self._parent.enable_save)
style_box = gtk.HBox(False, 5)
style_box.pack_start(label_style, False, False, 0)
style_box.pack_start(self._combobox_styles, False, False, 0)
size_box = gtk.HBox(False, 5)
size_box.pack_start(label_icon_size, False, False, 0)
size_box.pack_start(self._combobox_icon_size, False, False, 0)
self.pack_start(style_box, False, False, 0)
self.pack_start(size_box, False, False, 0)
self.pack_start(container, True, True, 0)
self.pack_start(button_box, False, False, 0)
def _add_widget(self, widget, data=None):
"""Show dialog for creating toolbar widget"""
widget_added = self._toolbar_manager.show_create_widget_dialog(self._parent)
if widget_added:
# reload configuratin file
self._load_options()
# enable save button
self._parent.enable_save()
def _delete_widget(self, widget, data=None):
"""Delete selected toolbar widget"""
selection = self._list.get_selection()
list_, iter_ = selection.get_selected()
if iter_ is not None:
# remove item from list
list_.remove(iter_)
# enable save button if item was removed
self._parent.enable_save()
def _edit_widget(self, widget, data=None):
"""Edit selected toolbar widget"""
selection = self._list.get_selection()
list_, iter_ = selection.get_selected()
if iter_ is not None:
name = list_.get_value(iter_, 0)
widget_type = list_.get_value(iter_, 2)
edited = self._toolbar_manager.show_configure_widget_dialog(
name,
widget_type,
self._parent
)
# enable save button
if edited:
self._parent.enable_save()
def _move_widget(self, widget, direction):
"""Move selected bookmark up"""
selection = self._list.get_selection()
list_, iter_ = selection.get_selected()
if iter_ is not None:
# get iter index
index = list_.get_path(iter_)[0]
# depending on direction, swap iters
if (direction == -1 and index > 0) \
or (direction == 1 and index < len(list_) - 1):
list_.swap(iter_, list_[index + direction].iter)
# enable save button if iters were swapped
self._parent.enable_save()
def _load_options(self):
"""Load options from file"""
options = self._application.toolbar_options
self._combobox_styles.set_active(options.get('style'))
self._combobox_icon_size.set_active(options.get('icon_size'))
# clear list store
self._store.clear()
for name in options.get_sections():
section = options.section(name)
widget_type = section.get('type')
data = self._toolbar_manager.get_widget_data(widget_type)
if data is not None:
icon = data[1]
description = data[0]
else: # failsafe, display raw widget type
icon = ''
description = '{0} <small><i>({1})</i></small>'.format(widget_type, _('missing plugin'))
self._store.append((name, description, widget_type, icon))
def _save_options(self):
"""Save settings to config file"""
options = self._application.toolbar_options
options.set('style', self._combobox_styles.get_active())
options.set('icon_size', self._combobox_icon_size.get_active())
# get section list, we'll use this
# list to remove orphan configurations
section_list = options.get_sections()
# get list from configuration window
new_list = []
for data in self._store:
new_list.append(data[Column.NAME])
# get only sections for removal
sections_to_remove = filter(lambda name: name not in new_list, section_list)
map(lambda name: options.remove_section(name), sections_to_remove)
| gpl-3.0 | 5,710,118,001,502,252,000 | 31.37395 | 92 | 0.678131 | false |
simobasso/ansible | lib/ansible/parsing/splitter.py | 9 | 10866 | # (c) 2014 James Cammarata, <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import codecs
from ansible.errors import AnsibleError
from ansible.parsing.quoting import unquote
# Decode escapes adapted from rspeer's answer here:
# http://stackoverflow.com/questions/4020539/process-escape-sequences-in-a-string-in-python
_HEXCHAR = '[a-fA-F0-9]'
_ESCAPE_SEQUENCE_RE = re.compile(r'''
( \\U{0} # 8-digit hex escapes
| \\u{1} # 4-digit hex escapes
| \\x{2} # 2-digit hex escapes
| \\N\{{[^}}]+\}} # Unicode characters by name
| \\[\\'"abfnrtv] # Single-character escapes
)'''.format(_HEXCHAR*8, _HEXCHAR*4, _HEXCHAR*2), re.UNICODE | re.VERBOSE)
def _decode_escapes(s):
def decode_match(match):
return codecs.decode(match.group(0), 'unicode-escape')
return _ESCAPE_SEQUENCE_RE.sub(decode_match, s)
def parse_kv(args, check_raw=False):
'''
Convert a string of key/value items to a dict. If any free-form params
are found and the check_raw option is set to True, they will be added
to a new parameter called '_raw_params'. If check_raw is not enabled,
they will simply be ignored.
'''
### FIXME: args should already be a unicode string
from ansible.utils.unicode import to_unicode
args = to_unicode(args, nonstring='passthru')
options = {}
if args is not None:
try:
vargs = split_args(args)
except ValueError as ve:
if 'no closing quotation' in str(ve).lower():
raise AnsibleError("error parsing argument string, try quoting the entire line.")
else:
raise
raw_params = []
for x in vargs:
x = _decode_escapes(x)
if "=" in x:
pos = 0
try:
while True:
pos = x.index('=', pos + 1)
if pos > 0 and x[pos - 1] != '\\':
break
except ValueError:
# ran out of string, but we must have some escaped equals,
# so replace those and append this to the list of raw params
raw_params.append(x.replace('\\=', '='))
continue
k = x[:pos]
v = x[pos + 1:]
# only internal variables can start with an underscore, so
# we don't allow users to set them directy in arguments
if k.startswith('_'):
raise AnsibleError("invalid parameter specified: '%s'" % k)
# FIXME: make the retrieval of this list of shell/command
# options a function, so the list is centralized
if check_raw and k not in ('creates', 'removes', 'chdir', 'executable', 'warn'):
raw_params.append(x)
else:
options[k.strip()] = unquote(v.strip())
else:
raw_params.append(x)
# recombine the free-form params, if any were found, and assign
# them to a special option for use later by the shell/command module
if len(raw_params) > 0:
options[u'_raw_params'] = ' '.join(raw_params)
return options
def _get_quote_state(token, quote_char):
'''
the goal of this block is to determine if the quoted string
is unterminated in which case it needs to be put back together
'''
# the char before the current one, used to see if
# the current character is escaped
prev_char = None
for idx, cur_char in enumerate(token):
if idx > 0:
prev_char = token[idx-1]
if cur_char in '"\'' and prev_char != '\\':
if quote_char:
if cur_char == quote_char:
quote_char = None
else:
quote_char = cur_char
return quote_char
def _count_jinja2_blocks(token, cur_depth, open_token, close_token):
'''
this function counts the number of opening/closing blocks for a
given opening/closing type and adjusts the current depth for that
block based on the difference
'''
num_open = token.count(open_token)
num_close = token.count(close_token)
if num_open != num_close:
cur_depth += (num_open - num_close)
if cur_depth < 0:
cur_depth = 0
return cur_depth
def split_args(args):
'''
Splits args on whitespace, but intelligently reassembles
those that may have been split over a jinja2 block or quotes.
When used in a remote module, we won't ever have to be concerned about
jinja2 blocks, however this function is/will be used in the
core portions as well before the args are templated.
example input: a=b c="foo bar"
example output: ['a=b', 'c="foo bar"']
Basically this is a variation shlex that has some more intelligence for
how Ansible needs to use it.
'''
# the list of params parsed out of the arg string
# this is going to be the result value when we are done
params = []
# Initial split on white space
args = args.strip()
items = args.strip().split('\n')
# iterate over the tokens, and reassemble any that may have been
# split on a space inside a jinja2 block.
# ex if tokens are "{{", "foo", "}}" these go together
# These variables are used
# to keep track of the state of the parsing, since blocks and quotes
# may be nested within each other.
quote_char = None
inside_quotes = False
print_depth = 0 # used to count nested jinja2 {{ }} blocks
block_depth = 0 # used to count nested jinja2 {% %} blocks
comment_depth = 0 # used to count nested jinja2 {# #} blocks
# now we loop over each split chunk, coalescing tokens if the white space
# split occurred within quotes or a jinja2 block of some kind
for itemidx,item in enumerate(items):
# we split on spaces and newlines separately, so that we
# can tell which character we split on for reassembly
# inside quotation characters
tokens = item.strip().split(' ')
line_continuation = False
for idx,token in enumerate(tokens):
# if we hit a line continuation character, but
# we're not inside quotes, ignore it and continue
# on to the next token while setting a flag
if token == '\\' and not inside_quotes:
line_continuation = True
continue
# store the previous quoting state for checking later
was_inside_quotes = inside_quotes
quote_char = _get_quote_state(token, quote_char)
inside_quotes = quote_char is not None
# multiple conditions may append a token to the list of params,
# so we keep track with this flag to make sure it only happens once
# append means add to the end of the list, don't append means concatenate
# it to the end of the last token
appended = False
# if we're inside quotes now, but weren't before, append the token
# to the end of the list, since we'll tack on more to it later
# otherwise, if we're inside any jinja2 block, inside quotes, or we were
# inside quotes (but aren't now) concat this token to the last param
if inside_quotes and not was_inside_quotes:
params.append(token)
appended = True
elif print_depth or block_depth or comment_depth or inside_quotes or was_inside_quotes:
if idx == 0 and was_inside_quotes:
params[-1] = "%s%s" % (params[-1], token)
elif len(tokens) > 1:
spacer = ''
if idx > 0:
spacer = ' '
params[-1] = "%s%s%s" % (params[-1], spacer, token)
else:
params[-1] = "%s\n%s" % (params[-1], token)
appended = True
# if the number of paired block tags is not the same, the depth has changed, so we calculate that here
# and may append the current token to the params (if we haven't previously done so)
prev_print_depth = print_depth
print_depth = _count_jinja2_blocks(token, print_depth, "{{", "}}")
if print_depth != prev_print_depth and not appended:
params.append(token)
appended = True
prev_block_depth = block_depth
block_depth = _count_jinja2_blocks(token, block_depth, "{%", "%}")
if block_depth != prev_block_depth and not appended:
params.append(token)
appended = True
prev_comment_depth = comment_depth
comment_depth = _count_jinja2_blocks(token, comment_depth, "{#", "#}")
if comment_depth != prev_comment_depth and not appended:
params.append(token)
appended = True
# finally, if we're at zero depth for all blocks and not inside quotes, and have not
# yet appended anything to the list of params, we do so now
if not (print_depth or block_depth or comment_depth) and not inside_quotes and not appended and token != '':
params.append(token)
# if this was the last token in the list, and we have more than
# one item (meaning we split on newlines), add a newline back here
# to preserve the original structure
if len(items) > 1 and itemidx != len(items) - 1 and not line_continuation:
params[-1] += '\n'
# always clear the line continuation flag
line_continuation = False
# If we're done and things are not at zero depth or we're still inside quotes,
# raise an error to indicate that the args were unbalanced
if print_depth or block_depth or comment_depth or inside_quotes:
raise Exception("error while splitting arguments, either an unbalanced jinja2 block or quotes")
return params
| gpl-3.0 | -4,722,345,781,369,629,000 | 39.849624 | 120 | 0.597368 | false |
Crypto-Expert/Electrum-obsolete | lib/verifier.py | 4 | 5843 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 [email protected]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import threading, time, Queue, os, sys, shutil
from util import user_dir, appdata_dir, print_error
from bitcoin import *
class TxVerifier(threading.Thread):
""" Simple Payment Verification """
def __init__(self, network, storage):
threading.Thread.__init__(self)
self.daemon = True
self.storage = storage
self.network = network
self.blockchain = network.blockchain
self.transactions = {} # requested verifications (with height sent by the requestor)
self.verified_tx = storage.get('verified_tx3',{}) # height, timestamp of verified transactions
self.merkle_roots = storage.get('merkle_roots',{}) # hashed by me
self.lock = threading.Lock()
self.running = False
self.queue = Queue.Queue()
def get_confirmations(self, tx):
""" return the number of confirmations of a monitored transaction. """
with self.lock:
if tx in self.verified_tx:
height, timestamp, pos = self.verified_tx[tx]
conf = (self.blockchain.local_height - height + 1)
if conf <= 0: timestamp = None
elif tx in self.transactions:
conf = -1
timestamp = None
else:
conf = 0
timestamp = None
return conf, timestamp
def get_txpos(self, tx_hash):
"return position, even if the tx is unverified"
with self.lock:
x = self.verified_tx.get(tx_hash)
y = self.transactions.get(tx_hash)
if x:
height, timestamp, pos = x
return height, pos
elif y:
return y, 0
else:
return 1e12, 0
def get_height(self, tx_hash):
with self.lock:
v = self.verified_tx.get(tx_hash)
height = v[0] if v else None
return height
def add(self, tx_hash, tx_height):
""" add a transaction to the list of monitored transactions. """
assert tx_height > 0
with self.lock:
if tx_hash not in self.transactions.keys():
self.transactions[tx_hash] = tx_height
def stop(self):
with self.lock: self.running = False
def is_running(self):
with self.lock: return self.running
def run(self):
with self.lock:
self.running = True
requested_merkle = []
while self.is_running():
# request missing tx
for tx_hash, tx_height in self.transactions.items():
if tx_hash not in self.verified_tx:
if self.merkle_roots.get(tx_hash) is None and tx_hash not in requested_merkle:
if self.network.send([ ('blockchain.transaction.get_merkle',[tx_hash, tx_height]) ], lambda i,r: self.queue.put(r)):
print_error('requesting merkle', tx_hash)
requested_merkle.append(tx_hash)
try:
r = self.queue.get(timeout=1)
except Queue.Empty:
continue
if not r: continue
if r.get('error'):
print_error('Verifier received an error:', r)
continue
# 3. handle response
method = r['method']
params = r['params']
result = r['result']
if method == 'blockchain.transaction.get_merkle':
tx_hash = params[0]
self.verify_merkle(tx_hash, result)
requested_merkle.remove(tx_hash)
def verify_merkle(self, tx_hash, result):
tx_height = result.get('block_height')
pos = result.get('pos')
self.merkle_roots[tx_hash] = self.hash_merkle_root(result['merkle'], tx_hash, pos)
header = self.blockchain.read_header(tx_height)
if not header: return
assert header.get('merkle_root') == self.merkle_roots[tx_hash]
# we passed all the tests
timestamp = header.get('timestamp')
with self.lock:
self.verified_tx[tx_hash] = (tx_height, timestamp, pos)
print_error("verified %s"%tx_hash)
self.storage.put('verified_tx3', self.verified_tx, True)
self.network.trigger_callback('updated')
def hash_merkle_root(self, merkle_s, target_hash, pos):
h = hash_decode(target_hash)
for i in range(len(merkle_s)):
item = merkle_s[i]
h = Hash( hash_decode(item) + h ) if ((pos >> i) & 1) else Hash( h + hash_decode(item) )
return hash_encode(h)
def undo_verifications(self, height):
with self.lock:
items = self.verified_tx.items()[:]
for tx_hash, item in items:
tx_height, timestamp, pos = item
if tx_height >= height:
print_error("redoing", tx_hash)
with self.lock:
self.verified_tx.pop(tx_hash)
if tx_hash in self.merkle_roots:
self.merkle_roots.pop(tx_hash)
| gpl-3.0 | 2,701,930,159,249,060,000 | 33.779762 | 140 | 0.571624 | false |
beiko-lab/gengis | bin/Lib/distutils/sysconfig.py | 3 | 22882 | """Provide access to Python's configuration information. The specific
configuration variables available depend heavily on the platform and
configuration. The values may be retrieved using
get_config_var(name), and the list of variables is available via
get_config_vars().keys(). Additional convenience functions are also
available.
Written by: Fred L. Drake, Jr.
Email: <[email protected]>
"""
__revision__ = "$Id$"
import os
import re
import string
import sys
from distutils.errors import DistutilsPlatformError
# These are needed in a couple of spots, so just compute them once.
PREFIX = os.path.normpath(sys.prefix)
EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
# Path to the base directory of the project. On Windows the binary may
# live in project/PCBuild9. If we're dealing with an x64 Windows build,
# it'll live in project/PCbuild/amd64.
project_base = os.path.dirname(os.path.abspath(sys.executable))
if os.name == "nt" and "pcbuild" in project_base[-8:].lower():
project_base = os.path.abspath(os.path.join(project_base, os.path.pardir))
# PC/VS7.1
if os.name == "nt" and "\\pc\\v" in project_base[-10:].lower():
project_base = os.path.abspath(os.path.join(project_base, os.path.pardir,
os.path.pardir))
# PC/AMD64
if os.name == "nt" and "\\pcbuild\\amd64" in project_base[-14:].lower():
project_base = os.path.abspath(os.path.join(project_base, os.path.pardir,
os.path.pardir))
# python_build: (Boolean) if true, we're either building Python or
# building an extension with an un-installed Python, so we use
# different (hard-wired) directories.
# Setup.local is available for Makefile builds including VPATH builds,
# Setup.dist is available on Windows
def _python_build():
for fn in ("Setup.dist", "Setup.local"):
if os.path.isfile(os.path.join(project_base, "Modules", fn)):
return True
return False
python_build = _python_build()
def get_python_version():
"""Return a string containing the major and minor Python version,
leaving off the patchlevel. Sample return values could be '1.5'
or '2.2'.
"""
return sys.version[:3]
def get_python_inc(plat_specific=0, prefix=None):
"""Return the directory containing installed Python header files.
If 'plat_specific' is false (the default), this is the path to the
non-platform-specific header files, i.e. Python.h and so on;
otherwise, this is the path to platform-specific header files
(namely pyconfig.h).
If 'prefix' is supplied, use it instead of sys.prefix or
sys.exec_prefix -- i.e., ignore 'plat_specific'.
"""
if prefix is None:
prefix = plat_specific and EXEC_PREFIX or PREFIX
if os.name == "posix":
if python_build:
buildir = os.path.dirname(sys.executable)
if plat_specific:
# python.h is located in the buildir
inc_dir = buildir
else:
# the source dir is relative to the buildir
srcdir = os.path.abspath(os.path.join(buildir,
get_config_var('srcdir')))
# Include is located in the srcdir
inc_dir = os.path.join(srcdir, "Include")
return inc_dir
return os.path.join(prefix, "include", "python" + get_python_version())
elif os.name == "nt":
return os.path.join(prefix, "include")
elif os.name == "os2":
return os.path.join(prefix, "Include")
else:
raise DistutilsPlatformError(
"I don't know where Python installs its C header files "
"on platform '%s'" % os.name)
def get_python_lib(plat_specific=0, standard_lib=0, prefix=None):
"""Return the directory containing the Python library (standard or
site additions).
If 'plat_specific' is true, return the directory containing
platform-specific modules, i.e. any module from a non-pure-Python
module distribution; otherwise, return the platform-shared library
directory. If 'standard_lib' is true, return the directory
containing standard Python library modules; otherwise, return the
directory for site-specific modules.
If 'prefix' is supplied, use it instead of sys.prefix or
sys.exec_prefix -- i.e., ignore 'plat_specific'.
"""
if prefix is None:
prefix = plat_specific and EXEC_PREFIX or PREFIX
if os.name == "posix":
libpython = os.path.join(prefix,
"lib", "python" + get_python_version())
if standard_lib:
return libpython
else:
return os.path.join(libpython, "site-packages")
elif os.name == "nt":
if standard_lib:
return os.path.join(prefix, "Lib")
else:
if get_python_version() < "2.2":
return prefix
else:
return os.path.join(prefix, "Lib", "site-packages")
elif os.name == "os2":
if standard_lib:
return os.path.join(prefix, "Lib")
else:
return os.path.join(prefix, "Lib", "site-packages")
else:
raise DistutilsPlatformError(
"I don't know where Python installs its library "
"on platform '%s'" % os.name)
_USE_CLANG = None
def customize_compiler(compiler):
"""Do any platform-specific customization of a CCompiler instance.
Mainly needed on Unix, so we can plug in the information that
varies across Unices and is stored in Python's Makefile.
"""
if compiler.compiler_type == "unix":
(cc, cxx, opt, cflags, ccshared, ldshared, so_ext, ar, ar_flags) = \
get_config_vars('CC', 'CXX', 'OPT', 'CFLAGS',
'CCSHARED', 'LDSHARED', 'SO', 'AR',
'ARFLAGS')
newcc = None
if 'CC' in os.environ:
newcc = os.environ['CC']
elif sys.platform == 'darwin' and cc == 'gcc-4.2':
# Issue #13590:
# Since Apple removed gcc-4.2 in Xcode 4.2, we can no
# longer assume it is available for extension module builds.
# If Python was built with gcc-4.2, check first to see if
# it is available on this system; if not, try to use clang
# instead unless the caller explicitly set CC.
global _USE_CLANG
if _USE_CLANG is None:
from distutils import log
from subprocess import Popen, PIPE
p = Popen("! type gcc-4.2 && type clang && exit 2",
shell=True, stdout=PIPE, stderr=PIPE)
p.wait()
if p.returncode == 2:
_USE_CLANG = True
log.warn("gcc-4.2 not found, using clang instead")
else:
_USE_CLANG = False
if _USE_CLANG:
newcc = 'clang'
if newcc:
# On OS X, if CC is overridden, use that as the default
# command for LDSHARED as well
if (sys.platform == 'darwin'
and 'LDSHARED' not in os.environ
and ldshared.startswith(cc)):
ldshared = newcc + ldshared[len(cc):]
cc = newcc
if 'CXX' in os.environ:
cxx = os.environ['CXX']
if 'LDSHARED' in os.environ:
ldshared = os.environ['LDSHARED']
if 'CPP' in os.environ:
cpp = os.environ['CPP']
else:
cpp = cc + " -E" # not always
if 'LDFLAGS' in os.environ:
ldshared = ldshared + ' ' + os.environ['LDFLAGS']
if 'CFLAGS' in os.environ:
cflags = opt + ' ' + os.environ['CFLAGS']
ldshared = ldshared + ' ' + os.environ['CFLAGS']
if 'CPPFLAGS' in os.environ:
cpp = cpp + ' ' + os.environ['CPPFLAGS']
cflags = cflags + ' ' + os.environ['CPPFLAGS']
ldshared = ldshared + ' ' + os.environ['CPPFLAGS']
if 'AR' in os.environ:
ar = os.environ['AR']
if 'ARFLAGS' in os.environ:
archiver = ar + ' ' + os.environ['ARFLAGS']
else:
archiver = ar + ' ' + ar_flags
cc_cmd = cc + ' ' + cflags
compiler.set_executables(
preprocessor=cpp,
compiler=cc_cmd,
compiler_so=cc_cmd + ' ' + ccshared,
compiler_cxx=cxx,
linker_so=ldshared,
linker_exe=cc,
archiver=archiver)
compiler.shared_lib_extension = so_ext
def get_config_h_filename():
"""Return full pathname of installed pyconfig.h file."""
if python_build:
if os.name == "nt":
inc_dir = os.path.join(project_base, "PC")
else:
inc_dir = project_base
else:
inc_dir = get_python_inc(plat_specific=1)
if get_python_version() < '2.2':
config_h = 'config.h'
else:
# The name of the config.h file changed in 2.2
config_h = 'pyconfig.h'
return os.path.join(inc_dir, config_h)
def get_makefile_filename():
"""Return full pathname of installed Makefile from the Python build."""
if python_build:
return os.path.join(os.path.dirname(sys.executable), "Makefile")
lib_dir = get_python_lib(plat_specific=1, standard_lib=1)
return os.path.join(lib_dir, "config", "Makefile")
def parse_config_h(fp, g=None):
"""Parse a config.h-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
if g is None:
g = {}
define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
#
while 1:
line = fp.readline()
if not line:
break
m = define_rx.match(line)
if m:
n, v = m.group(1, 2)
try: v = int(v)
except ValueError: pass
g[n] = v
else:
m = undef_rx.match(line)
if m:
g[m.group(1)] = 0
return g
# Regexes needed for parsing Makefile (and similar syntaxes,
# like old-style Setup files).
_variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
def parse_makefile(fn, g=None):
"""Parse a Makefile-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
from distutils.text_file import TextFile
fp = TextFile(fn, strip_comments=1, skip_blanks=1, join_lines=1)
if g is None:
g = {}
done = {}
notdone = {}
while 1:
line = fp.readline()
if line is None: # eof
break
m = _variable_rx.match(line)
if m:
n, v = m.group(1, 2)
v = v.strip()
# `$$' is a literal `$' in make
tmpv = v.replace('$$', '')
if "$" in tmpv:
notdone[n] = v
else:
try:
v = int(v)
except ValueError:
# insert literal `$'
done[n] = v.replace('$$', '$')
else:
done[n] = v
# do variable interpolation here
while notdone:
for name in notdone.keys():
value = notdone[name]
m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
if m:
n = m.group(1)
found = True
if n in done:
item = str(done[n])
elif n in notdone:
# get it on a subsequent round
found = False
elif n in os.environ:
# do it like make: fall back to environment
item = os.environ[n]
else:
done[n] = item = ""
if found:
after = value[m.end():]
value = value[:m.start()] + item + after
if "$" in after:
notdone[name] = value
else:
try: value = int(value)
except ValueError:
done[name] = value.strip()
else:
done[name] = value
del notdone[name]
else:
# bogus variable reference; just drop it since we can't deal
del notdone[name]
fp.close()
# strip spurious spaces
for k, v in done.items():
if isinstance(v, str):
done[k] = v.strip()
# save the results in the global dictionary
g.update(done)
return g
def expand_makefile_vars(s, vars):
"""Expand Makefile-style variables -- "${foo}" or "$(foo)" -- in
'string' according to 'vars' (a dictionary mapping variable names to
values). Variables not present in 'vars' are silently expanded to the
empty string. The variable values in 'vars' should not contain further
variable expansions; if 'vars' is the output of 'parse_makefile()',
you're fine. Returns a variable-expanded version of 's'.
"""
# This algorithm does multiple expansion, so if vars['foo'] contains
# "${bar}", it will expand ${foo} to ${bar}, and then expand
# ${bar}... and so forth. This is fine as long as 'vars' comes from
# 'parse_makefile()', which takes care of such expansions eagerly,
# according to make's variable expansion semantics.
while 1:
m = _findvar1_rx.search(s) or _findvar2_rx.search(s)
if m:
(beg, end) = m.span()
s = s[0:beg] + vars.get(m.group(1)) + s[end:]
else:
break
return s
_config_vars = None
def _init_posix():
"""Initialize the module as appropriate for POSIX systems."""
g = {}
# load the installed Makefile:
try:
filename = get_makefile_filename()
parse_makefile(filename, g)
except IOError, msg:
my_msg = "invalid Python installation: unable to open %s" % filename
if hasattr(msg, "strerror"):
my_msg = my_msg + " (%s)" % msg.strerror
raise DistutilsPlatformError(my_msg)
# load the installed pyconfig.h:
try:
filename = get_config_h_filename()
parse_config_h(file(filename), g)
except IOError, msg:
my_msg = "invalid Python installation: unable to open %s" % filename
if hasattr(msg, "strerror"):
my_msg = my_msg + " (%s)" % msg.strerror
raise DistutilsPlatformError(my_msg)
# On AIX, there are wrong paths to the linker scripts in the Makefile
# -- these paths are relative to the Python source, but when installed
# the scripts are in another directory.
if python_build:
g['LDSHARED'] = g['BLDSHARED']
elif get_python_version() < '2.1':
# The following two branches are for 1.5.2 compatibility.
if sys.platform == 'aix4': # what about AIX 3.x ?
# Linker script is in the config directory, not in Modules as the
# Makefile says.
python_lib = get_python_lib(standard_lib=1)
ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix')
python_exp = os.path.join(python_lib, 'config', 'python.exp')
g['LDSHARED'] = "%s %s -bI:%s" % (ld_so_aix, g['CC'], python_exp)
elif sys.platform == 'beos':
# Linker script is in the config directory. In the Makefile it is
# relative to the srcdir, which after installation no longer makes
# sense.
python_lib = get_python_lib(standard_lib=1)
linkerscript_path = string.split(g['LDSHARED'])[0]
linkerscript_name = os.path.basename(linkerscript_path)
linkerscript = os.path.join(python_lib, 'config',
linkerscript_name)
# XXX this isn't the right place to do this: adding the Python
# library to the link, if needed, should be in the "build_ext"
# command. (It's also needed for non-MS compilers on Windows, and
# it's taken care of for them by the 'build_ext.get_libraries()'
# method.)
g['LDSHARED'] = ("%s -L%s/lib -lpython%s" %
(linkerscript, PREFIX, get_python_version()))
global _config_vars
_config_vars = g
def _init_nt():
"""Initialize the module as appropriate for NT"""
g = {}
# set basic install directories
g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1)
g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1)
# XXX hmmm.. a normal install puts include files here
g['INCLUDEPY'] = get_python_inc(plat_specific=0)
g['SO'] = '.pyd'
g['EXE'] = ".exe"
g['VERSION'] = get_python_version().replace(".", "")
g['BINDIR'] = os.path.dirname(os.path.abspath(sys.executable))
global _config_vars
_config_vars = g
def _init_os2():
"""Initialize the module as appropriate for OS/2"""
g = {}
# set basic install directories
g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1)
g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1)
# XXX hmmm.. a normal install puts include files here
g['INCLUDEPY'] = get_python_inc(plat_specific=0)
g['SO'] = '.pyd'
g['EXE'] = ".exe"
global _config_vars
_config_vars = g
def get_config_vars(*args):
"""With no arguments, return a dictionary of all configuration
variables relevant for the current platform. Generally this includes
everything needed to build extensions and install both pure modules and
extensions. On Unix, this means every variable defined in Python's
installed Makefile; on Windows and Mac OS it's a much smaller set.
With arguments, return a list of values that result from looking up
each argument in the configuration variable dictionary.
"""
global _config_vars
if _config_vars is None:
func = globals().get("_init_" + os.name)
if func:
func()
else:
_config_vars = {}
# Normalized versions of prefix and exec_prefix are handy to have;
# in fact, these are the standard versions used most places in the
# Distutils.
_config_vars['prefix'] = PREFIX
_config_vars['exec_prefix'] = EXEC_PREFIX
if sys.platform == 'darwin':
kernel_version = os.uname()[2] # Kernel version (8.4.3)
major_version = int(kernel_version.split('.')[0])
if major_version < 8:
# On Mac OS X before 10.4, check if -arch and -isysroot
# are in CFLAGS or LDFLAGS and remove them if they are.
# This is needed when building extensions on a 10.3 system
# using a universal build of python.
for key in ('LDFLAGS', 'BASECFLAGS', 'LDSHARED',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _config_vars[key]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = re.sub('-isysroot [^ \t]*', ' ', flags)
_config_vars[key] = flags
else:
# Allow the user to override the architecture flags using
# an environment variable.
# NOTE: This name was introduced by Apple in OSX 10.5 and
# is used by several scripting languages distributed with
# that OS release.
if 'ARCHFLAGS' in os.environ:
arch = os.environ['ARCHFLAGS']
for key in ('LDFLAGS', 'BASECFLAGS', 'LDSHARED',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _config_vars[key]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = flags + ' ' + arch
_config_vars[key] = flags
# If we're on OSX 10.5 or later and the user tries to
# compiles an extension using an SDK that is not present
# on the current machine it is better to not use an SDK
# than to fail.
#
# The major usecase for this is users using a Python.org
# binary installer on OSX 10.6: that installer uses
# the 10.4u SDK, but that SDK is not installed by default
# when you install Xcode.
#
m = re.search('-isysroot\s+(\S+)', _config_vars['CFLAGS'])
if m is not None:
sdk = m.group(1)
if not os.path.exists(sdk):
for key in ('LDFLAGS', 'BASECFLAGS', 'LDSHARED',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _config_vars[key]
flags = re.sub('-isysroot\s+\S+(\s|$)', ' ', flags)
_config_vars[key] = flags
if args:
vals = []
for name in args:
vals.append(_config_vars.get(name))
return vals
else:
return _config_vars
def get_config_var(name):
"""Return the value of a single variable using the dictionary
returned by 'get_config_vars()'. Equivalent to
get_config_vars().get(name)
"""
return get_config_vars().get(name)
| gpl-3.0 | -8,925,784,584,515,524,000 | 36.457143 | 79 | 0.53636 | false |
mindprince/test-infra | gubernator/testgrid.py | 19 | 4097 | #!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
import cloudstorage as gcs
import pb_glance
CONFIG_PROTO_SCHEMA = {
1: {
'name': 'test_groups',
1: 'name',
2: 'query',
9: {},
},
2: {
'name': 'dashboards',
1: {
'name': 'dashboard_tab',
1: 'name',
2: 'test_group_name',
6: 'base_options',
7: {},
8: {2: {}},
9: {},
11: {},
12: {},
},
2: 'name',
}
}
_testgrid_config = None
def get_config():
"""
Load the testgrid config loaded from a proto stored on GCS.
It will be cached locally in memory for the life of this process.
Returns:
dict: {
'test_groups': [{'name': ..., 'query': ...}],
'dashboards': [{
'name': ...,
'dashboard_tab': [{'name': ..., 'test_group_name': ...}]
}]
}
"""
global _testgrid_config # pylint: disable=global-statement
if not _testgrid_config:
try:
data = gcs.open('/k8s-testgrid/config').read()
except gcs.NotFoundError:
# Fallback to local files for development-- the k8s-testgrid bucket
# has restrictive ACLs that dev_appserver.py can't read.
data = open('tg-config').read()
_testgrid_config = pb_glance.parse_protobuf(data, CONFIG_PROTO_SCHEMA)
return _testgrid_config
def path_to_group_name(path):
"""
Args:
path: a job directory like "/kubernetes-jenkins/jobs/e2e-gce"
Returns:
test_group_name: the group name in the config, or None if not found
"""
try:
config = get_config()
except gcs.errors.Error:
logging.exception('unable to load testgrid config')
return None
path = path.strip('/') # the config doesn't have leading/trailing slashes
if '/pull/' in path: # translate PR to all-pr result form
path = re.sub(r'/pull/([^/]+/)?\d+/', '/directory/', path)
for test_group in config.get('test_groups', []):
if path in test_group['query']:
return test_group['name'][0]
def path_to_query(path):
"""
Convert a GCS job directory to the testgrid path for its results.
Args:
path: a job directory like "/kubernetes-jenkins/jobs/e2e-gce"
Returns:
query: the url for the job, like "k8s#gce", or "" if not found.
"""
group = path_to_group_name(path)
if not group:
return ''
# Tabs can appear on multiple dashboards. Favor selecting 'k8s' over others,
# otherwise pick a random tab.
options = {}
for dashboard in get_config().get('dashboards', []):
dashboard_name = dashboard['name'][0]
tabs = dashboard['dashboard_tab']
for (skip_base_options, penalty) in ((True, 0), (False, 1000)):
for tab in tabs:
if 'base_options' in tab and skip_base_options:
continue
if group in tab['test_group_name']:
query = '%s#%s' % (dashboard_name, tab['name'][0])
options[dashboard_name] = (-len(tabs) + penalty, query)
if dashboard_name in options:
break
if 'k8s' in options:
return options['k8s'][1]
elif len(options) > 1:
logging.info('ambiguous testgrid options: %s', options)
elif len(options) == 0:
return ''
return sorted(options.values())[0][1]
| apache-2.0 | -695,466,601,918,399,600 | 30.037879 | 80 | 0.568221 | false |
evidens/json2csv | json2csv.py | 2 | 4981 | #!/usr/bin/env python
try:
import unicodecsv as csv
except ImportError:
import csv
import json
import operator
import os
from collections import OrderedDict
import logging
logging.basicConfig(level=logging.DEBUG)
class Json2Csv(object):
"""Process a JSON object to a CSV file"""
collection = None
# Better for single-nested dictionaries
SEP_CHAR = ', '
KEY_VAL_CHAR = ': '
DICT_SEP_CHAR = '\r'
DICT_OPEN = ''
DICT_CLOSE = ''
# Better for deep-nested dictionaries
# SEP_CHAR = ', '
# KEY_VAL_CHAR = ': '
# DICT_SEP_CHAR = '; '
# DICT_OPEN = '{ '
# DICT_CLOSE = '} '
def __init__(self, outline):
self.rows = []
if not isinstance(outline, dict):
raise ValueError('You must pass in an outline for JSON2CSV to follow')
elif 'map' not in outline or len(outline['map']) < 1:
raise ValueError('You must specify at least one value for "map"')
key_map = OrderedDict()
for header, key in outline['map']:
splits = key.split('.')
splits = [int(s) if s.isdigit() else s for s in splits]
key_map[header] = splits
self.key_map = key_map
if 'collection' in outline:
self.collection = outline['collection']
def load(self, json_file):
self.process_each(json.load(json_file))
def process_each(self, data):
"""Process each item of a json-loaded dict
"""
if self.collection and self.collection in data:
data = data[self.collection]
for d in data:
logging.info(d)
self.rows.append(self.process_row(d))
def process_row(self, item):
"""Process a row of json data against the key map
"""
row = {}
for header, keys in self.key_map.items():
try:
row[header] = reduce(operator.getitem, keys, item)
except (KeyError, IndexError, TypeError):
row[header] = None
return row
def make_strings(self):
str_rows = []
for row in self.rows:
str_rows.append({k: self.make_string(val)
for k, val in row.items()})
return str_rows
def make_string(self, item):
if isinstance(item, list) or isinstance(item, set) or isinstance(item, tuple):
return self.SEP_CHAR.join([self.make_string(subitem) for subitem in item])
elif isinstance(item, dict):
return self.DICT_OPEN + self.DICT_SEP_CHAR.join([self.KEY_VAL_CHAR.join([k, self.make_string(val)]) for k, val in item.items()]) + self.DICT_CLOSE
else:
return unicode(item)
def write_csv(self, filename='output.csv', make_strings=False):
"""Write the processed rows to the given filename
"""
if (len(self.rows) <= 0):
raise AttributeError('No rows were loaded')
if make_strings:
out = self.make_strings()
else:
out = self.rows
with open(filename, 'wb+') as f:
writer = csv.DictWriter(f, self.key_map.keys())
writer.writeheader()
writer.writerows(out)
class MultiLineJson2Csv(Json2Csv):
def load(self, json_file):
self.process_each(json_file)
def process_each(self, data, collection=None):
"""Load each line of an iterable collection (ie. file)"""
for line in data:
d = json.loads(line)
if self.collection in d:
d = d[self.collection]
self.rows.append(self.process_row(d))
def init_parser():
import argparse
parser = argparse.ArgumentParser(description="Converts JSON to CSV")
parser.add_argument('json_file', type=argparse.FileType('r'),
help="Path to JSON data file to load")
parser.add_argument('key_map', type=argparse.FileType('r'),
help="File containing JSON key-mapping file to load")
parser.add_argument('-e', '--each-line', action="store_true", default=False,
help="Process each line of JSON file separately")
parser.add_argument('-o', '--output-csv', type=str, default=None,
help="Path to csv file to output")
parser.add_argument(
'--strings', help="Convert lists, sets, and dictionaries fully to comma-separated strings.", action="store_true", default=True)
return parser
if __name__ == '__main__':
parser = init_parser()
args = parser.parse_args()
key_map = json.load(args.key_map)
loader = None
if args.each_line:
loader = MultiLineJson2Csv(key_map)
else:
loader = Json2Csv(key_map)
loader.load(args.json_file)
outfile = args.output_csv
if outfile is None:
fileName, fileExtension = os.path.splitext(args.json_file.name)
outfile = fileName + '.csv'
loader.write_csv(filename=outfile, make_strings=args.strings)
| mit | -6,921,815,868,965,170,000 | 31.135484 | 158 | 0.586027 | false |
jcpowermac/ansible | lib/ansible/module_utils/network/f5/common.py | 4 | 8051 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.connection import exec_command
from ansible.module_utils.network.common.utils import to_list, ComplexList
from ansible.module_utils.six import iteritems
from collections import defaultdict
try:
from icontrol.exceptions import iControlUnexpectedHTTPError
HAS_F5SDK = True
except ImportError:
HAS_F5SDK = False
f5_provider_spec = {
'server': dict(
fallback=(env_fallback, ['F5_SERVER'])
),
'server_port': dict(
type='int',
default=443,
fallback=(env_fallback, ['F5_SERVER_PORT'])
),
'user': dict(
fallback=(env_fallback, ['F5_USER', 'ANSIBLE_NET_USERNAME'])
),
'password': dict(
no_log=True,
aliases=['pass', 'pwd'],
fallback=(env_fallback, ['F5_PASSWORD', 'ANSIBLE_NET_PASSWORD'])
),
'ssh_keyfile': dict(
fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']),
type='path'
),
'validate_certs': dict(
type='bool',
fallback=(env_fallback, ['F5_VALIDATE_CERTS'])
),
'transport': dict(
default='rest',
choices=['cli', 'rest']
),
'timeout': dict(type='int'),
}
f5_argument_spec = {
'provider': dict(type='dict', options=f5_provider_spec),
}
f5_top_spec = {
'server': dict(
removed_in_version=2.9,
fallback=(env_fallback, ['F5_SERVER'])
),
'user': dict(
removed_in_version=2.9,
fallback=(env_fallback, ['F5_USER', 'ANSIBLE_NET_USERNAME'])
),
'password': dict(
removed_in_version=2.9,
no_log=True,
aliases=['pass', 'pwd'],
fallback=(env_fallback, ['F5_PASSWORD', 'ANSIBLE_NET_PASSWORD'])
),
'validate_certs': dict(
removed_in_version=2.9,
type='bool',
fallback=(env_fallback, ['F5_VALIDATE_CERTS'])
),
'server_port': dict(
removed_in_version=2.9,
type='int',
default=443,
fallback=(env_fallback, ['F5_SERVER_PORT'])
),
'transport': dict(
removed_in_version=2.9,
default='rest',
choices=['cli', 'rest']
)
}
f5_argument_spec.update(f5_top_spec)
def get_provider_argspec():
return f5_provider_spec
def load_params(params):
provider = params.get('provider') or dict()
for key, value in iteritems(provider):
if key in f5_argument_spec:
if params.get(key) is None and value is not None:
params[key] = value
# Fully Qualified name (with the partition)
def fqdn_name(partition, value):
if value is not None and not value.startswith('/'):
return '/{0}/{1}'.format(partition, value)
return value
# Fully Qualified name (with partition) for a list
def fq_list_names(partition, list_names):
if list_names is None:
return None
return map(lambda x: fqdn_name(partition, x), list_names)
def to_commands(module, commands):
spec = {
'command': dict(key=True),
'prompt': dict(),
'answer': dict()
}
transform = ComplexList(spec, module)
return transform(commands)
def run_commands(module, commands, check_rc=True):
responses = list()
commands = to_commands(module, to_list(commands))
for cmd in commands:
cmd = module.jsonify(cmd)
rc, out, err = exec_command(module, cmd)
if check_rc and rc != 0:
raise F5ModuleError(to_text(err, errors='surrogate_then_replace'))
responses.append(to_text(out, errors='surrogate_then_replace'))
return responses
def cleanup_tokens(client):
try:
resource = client.api.shared.authz.tokens_s.token.load(
name=client.api.icrs.token
)
resource.delete()
except Exception:
pass
def is_cli(module):
transport = module.params['transport']
provider_transport = (module.params['provider'] or {}).get('transport')
result = 'cli' in (transport, provider_transport)
return result
class Noop(object):
"""Represent no-operation required
This class is used in the Difference engine to specify when an attribute
has not changed. Difference attributes may return an instance of this
class as a means to indicate when the attribute has not changed.
The Noop object allows attributes to be set to None when sending updates
to the API. `None` is technically a valid value in some cases (it indicates
that the attribute should be removed from the resource).
"""
pass
class F5BaseClient(object):
def __init__(self, *args, **kwargs):
self.params = kwargs
load_params(self.params)
@property
def api(self):
raise F5ModuleError("Management root must be used from the concrete product classes.")
def reconnect(self):
"""Attempts to reconnect to a device
The existing token from a ManagementRoot can become invalid if you,
for example, upgrade the device (such as is done in the *_software
module.
This method can be used to reconnect to a remote device without
having to re-instantiate the ArgumentSpec and AnsibleF5Client classes
it will use the same values that were initially provided to those
classes
:return:
:raises iControlUnexpectedHTTPError
"""
self.api = self.mgmt
class AnsibleF5Parameters(object):
def __init__(self, *args, **kwargs):
self._values = defaultdict(lambda: None)
self._values['__warnings'] = []
self.client = kwargs.pop('client', None)
params = kwargs.pop('params', None)
if params:
self.update(params=params)
def update(self, params=None):
if params:
for k, v in iteritems(params):
if self.api_map is not None and k in self.api_map:
map_key = self.api_map[k]
else:
map_key = k
# Handle weird API parameters like `dns.proxy.__iter__` by
# using a map provided by the module developer
class_attr = getattr(type(self), map_key, None)
if isinstance(class_attr, property):
# There is a mapped value for the api_map key
if class_attr.fset is None:
# If the mapped value does not have
# an associated setter
self._values[map_key] = v
else:
# The mapped value has a setter
setattr(self, map_key, v)
else:
# If the mapped value is not a @property
self._values[map_key] = v
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
def __getattr__(self, item):
# Ensures that properties that weren't defined, and therefore stashed
# in the `_values` dict, will be retrievable.
return self._values[item]
@property
def partition(self):
if self._values['partition'] is None:
return 'Common'
return self._values['partition'].strip('/')
@partition.setter
def partition(self, value):
self._values['partition'] = value
def _filter_params(self, params):
return dict((k, v) for k, v in iteritems(params) if v is not None)
class F5ModuleError(Exception):
pass
| gpl-3.0 | 4,251,562,229,103,793,700 | 29.612167 | 94 | 0.602037 | false |
liukaijv/XlsxWriter | xlsxwriter/test/workbook/test_check_sheetname.py | 8 | 1669 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
import unittest
from ...workbook import Workbook
class TestCheckSheetname(unittest.TestCase):
"""
Test the Workbook _check_sheetname() method.
"""
def setUp(self):
self.workbook = Workbook()
def test_check_sheetname(self):
"""Test the _check_sheetname() method"""
got = self.workbook._check_sheetname('name')
exp = 'name'
self.assertEqual(got, exp)
got = self.workbook._check_sheetname('Sheet1')
exp = 'Sheet1'
self.assertEqual(got, exp)
got = self.workbook._check_sheetname(None)
exp = 'Sheet3'
self.assertEqual(got, exp)
def test_check_sheetname_with_exception1(self):
"""Test the _check_sheetname() method with exception"""
name = 'name_that_is_longer_than_thirty_one_characters'
self.assertRaises(Exception, self.workbook._check_sheetname, name)
def test_check_sheetname_with_exception2(self):
"""Test the _check_sheetname() method with exception"""
name = 'name_with_special_character_?'
self.assertRaises(Exception, self.workbook._check_sheetname, name)
def test_check_sheetname_with_exception3(self):
"""Test the _check_sheetname() method with exception"""
name1 = 'Duplicate_name'
name2 = name1.lower()
self.workbook.add_worksheet(name1)
self.assertRaises(Exception, self.workbook.add_worksheet, name2)
def tearDown(self):
self.workbook.fileclosed = 1
| bsd-2-clause | -9,026,266,447,833,554,000 | 27.775862 | 79 | 0.612942 | false |
victorzhao/miniblink49 | v8_4_5/build/landmines.py | 35 | 8385 | #!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script runs every build as the first hook (See DEPS). If it detects that
the build should be clobbered, it will delete the contents of the build
directory.
A landmine is tripped when a builder checks out a different revision, and the
diff between the new landmines and the old ones is non-null. At this point, the
build is clobbered.
"""
import difflib
import errno
import gyp_environment
import logging
import optparse
import os
import re
import shutil
import sys
import subprocess
import time
import landmine_utils
SRC_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def get_build_dir(build_tool, is_iphone=False):
"""
Returns output directory absolute path dependent on build and targets.
Examples:
r'c:\b\build\slave\win\build\src\out'
'/mnt/data/b/build/slave/linux/build/src/out'
'/b/build/slave/ios_rel_device/build/src/xcodebuild'
Keep this function in sync with tools/build/scripts/slave/compile.py
"""
ret = None
if build_tool == 'xcode':
ret = os.path.join(SRC_DIR, 'xcodebuild')
elif build_tool in ['make', 'ninja', 'ninja-ios']: # TODO: Remove ninja-ios.
if 'CHROMIUM_OUT_DIR' in os.environ:
output_dir = os.environ.get('CHROMIUM_OUT_DIR').strip()
if not output_dir:
raise Error('CHROMIUM_OUT_DIR environment variable is set but blank!')
else:
output_dir = landmine_utils.gyp_generator_flags().get('output_dir', 'out')
ret = os.path.join(SRC_DIR, output_dir)
elif build_tool in ['msvs', 'vs', 'ib']:
ret = os.path.join(SRC_DIR, 'build')
else:
raise NotImplementedError('Unexpected GYP_GENERATORS (%s)' % build_tool)
return os.path.abspath(ret)
def extract_gn_build_commands(build_ninja_file):
"""Extracts from a build.ninja the commands to run GN.
The commands to run GN are the gn rule and build.ninja build step at the
top of the build.ninja file. We want to keep these when deleting GN builds
since we want to preserve the command-line flags to GN.
On error, returns the empty string."""
result = ""
with open(build_ninja_file, 'r') as f:
# Read until the second blank line. The first thing GN writes to the file
# is the "rule gn" and the second is the section for "build build.ninja",
# separated by blank lines.
num_blank_lines = 0
while num_blank_lines < 2:
line = f.readline()
if len(line) == 0:
return '' # Unexpected EOF.
result += line
if line[0] == '\n':
num_blank_lines = num_blank_lines + 1
return result
def delete_build_dir(build_dir):
# GN writes a build.ninja.d file. Note that not all GN builds have args.gn.
build_ninja_d_file = os.path.join(build_dir, 'build.ninja.d')
if not os.path.exists(build_ninja_d_file):
shutil.rmtree(build_dir)
return
# GN builds aren't automatically regenerated when you sync. To avoid
# messing with the GN workflow, erase everything but the args file, and
# write a dummy build.ninja file that will automatically rerun GN the next
# time Ninja is run.
build_ninja_file = os.path.join(build_dir, 'build.ninja')
build_commands = extract_gn_build_commands(build_ninja_file)
try:
gn_args_file = os.path.join(build_dir, 'args.gn')
with open(gn_args_file, 'r') as f:
args_contents = f.read()
except IOError:
args_contents = ''
shutil.rmtree(build_dir)
# Put back the args file (if any).
os.mkdir(build_dir)
if args_contents != '':
with open(gn_args_file, 'w') as f:
f.write(args_contents)
# Write the build.ninja file sufficiently to regenerate itself.
with open(os.path.join(build_dir, 'build.ninja'), 'w') as f:
if build_commands != '':
f.write(build_commands)
else:
# Couldn't parse the build.ninja file, write a default thing.
f.write('''rule gn
command = gn -q gen //out/%s/
description = Regenerating ninja files
build build.ninja: gn
generator = 1
depfile = build.ninja.d
''' % (os.path.split(build_dir)[1]))
# Write a .d file for the build which references a nonexistant file. This
# will make Ninja always mark the build as dirty.
with open(build_ninja_d_file, 'w') as f:
f.write('build.ninja: nonexistant_file.gn\n')
def needs_clobber(landmines_path, new_landmines):
if os.path.exists(landmines_path):
with open(landmines_path, 'r') as f:
old_landmines = f.readlines()
if old_landmines != new_landmines:
old_date = time.ctime(os.stat(landmines_path).st_ctime)
diff = difflib.unified_diff(old_landmines, new_landmines,
fromfile='old_landmines', tofile='new_landmines',
fromfiledate=old_date, tofiledate=time.ctime(), n=0)
sys.stdout.write('Clobbering due to:\n')
sys.stdout.writelines(diff)
return True
else:
sys.stdout.write('Clobbering due to missing landmines file.\n')
return True
return False
def clobber_if_necessary(new_landmines):
"""Does the work of setting, planting, and triggering landmines."""
out_dir = get_build_dir(landmine_utils.builder())
landmines_path = os.path.normpath(os.path.join(out_dir, '..', '.landmines'))
try:
os.makedirs(out_dir)
except OSError as e:
if e.errno == errno.EEXIST:
pass
if needs_clobber(landmines_path, new_landmines):
# Clobber contents of build directory but not directory itself: some
# checkouts have the build directory mounted.
for f in os.listdir(out_dir):
path = os.path.join(out_dir, f)
if os.path.basename(out_dir) == 'build':
# Only delete build directories and files for MSVS builds as the folder
# shares some checked out files and directories.
if (os.path.isdir(path) and
re.search(r'(?:[Rr]elease)|(?:[Dd]ebug)', f)):
delete_build_dir(path)
elif (os.path.isfile(path) and
(path.endswith('.sln') or
path.endswith('.vcxproj') or
path.endswith('.vcxproj.user'))):
os.unlink(path)
else:
if os.path.isfile(path):
os.unlink(path)
elif os.path.isdir(path):
delete_build_dir(path)
if os.path.basename(out_dir) == 'xcodebuild':
# Xcodebuild puts an additional project file structure into build,
# while the output folder is xcodebuild.
project_dir = os.path.join(SRC_DIR, 'build', 'all.xcodeproj')
if os.path.exists(project_dir) and os.path.isdir(project_dir):
delete_build_dir(project_dir)
# Save current set of landmines for next time.
with open(landmines_path, 'w') as f:
f.writelines(new_landmines)
def process_options():
"""Returns a list of landmine emitting scripts."""
parser = optparse.OptionParser()
parser.add_option(
'-s', '--landmine-scripts', action='append',
default=[os.path.join(SRC_DIR, 'build', 'get_landmines.py')],
help='Path to the script which emits landmines to stdout. The target '
'is passed to this script via option -t. Note that an extra '
'script can be specified via an env var EXTRA_LANDMINES_SCRIPT.')
parser.add_option('-v', '--verbose', action='store_true',
default=('LANDMINES_VERBOSE' in os.environ),
help=('Emit some extra debugging information (default off). This option '
'is also enabled by the presence of a LANDMINES_VERBOSE environment '
'variable.'))
options, args = parser.parse_args()
if args:
parser.error('Unknown arguments %s' % args)
logging.basicConfig(
level=logging.DEBUG if options.verbose else logging.ERROR)
extra_script = os.environ.get('EXTRA_LANDMINES_SCRIPT')
if extra_script:
return options.landmine_scripts + [extra_script]
else:
return options.landmine_scripts
def main():
landmine_scripts = process_options()
if landmine_utils.builder() in ('dump_dependency_json', 'eclipse'):
return 0
gyp_environment.set_environment()
landmines = []
for s in landmine_scripts:
proc = subprocess.Popen([sys.executable, s], stdout=subprocess.PIPE)
output, _ = proc.communicate()
landmines.extend([('%s\n' % l.strip()) for l in output.splitlines()])
clobber_if_necessary(landmines)
return 0
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 | 1,074,439,803,792,460,900 | 33.22449 | 80 | 0.672033 | false |
tomashaber/raiden | raiden/network/discovery.py | 1 | 3394 | # -*- coding: utf-8 -*-
import socket
from ethereum import slogging
from raiden.exceptions import UnknownAddress
from raiden.utils import (
host_port_to_endpoint,
isaddress,
pex,
split_endpoint,
)
from raiden.exceptions import InvalidAddress
log = slogging.getLogger(__name__)
class Discovery(object):
""" Mock mapping address: host, port """
def __init__(self):
self.nodeid_to_hostport = dict()
def register(self, node_address, host, port):
if not isaddress(node_address):
raise ValueError('node_address must be a valid address')
try:
socket.inet_pton(socket.AF_INET, host)
except OSError:
raise ValueError('invalid ip address provided: {}'.format(host))
if not isinstance(port, (int, long)):
raise ValueError('port must be a valid number')
self.nodeid_to_hostport[node_address] = (host, port)
def get(self, node_address):
try:
return self.nodeid_to_hostport[node_address]
except KeyError:
raise InvalidAddress('Unknown address {}'.format(pex(node_address)))
def nodeid_by_host_port(self, host_port):
for nodeid, value_hostport in self.nodeid_to_hostport.items():
if value_hostport == host_port:
return nodeid
return None
class ContractDiscovery(Discovery):
""" Raiden node discovery.
Allows registering and looking up by endpoint (host, port) for node_address.
"""
def __init__(self, node_address, discovery_proxy):
super(ContractDiscovery, self).__init__()
self.node_address = node_address
self.discovery_proxy = discovery_proxy
def register(self, node_address, host, port):
if node_address != self.node_address:
raise ValueError('You can only register your own endpoint.')
if not isaddress(node_address):
raise ValueError('node_address must be a valid address')
try:
socket.inet_pton(socket.AF_INET, host)
except OSError:
raise ValueError('invalid ip address provided: {}'.format(host))
if not isinstance(port, (int, long)):
raise ValueError('port must be a valid number')
try:
current_value = self.get(node_address)
except UnknownAddress:
current_value = None
if current_value == (host, port):
log.info(
'endpoint already registered',
node_address=pex(node_address),
host=host,
port=port
)
else:
endpoint = host_port_to_endpoint(host, port)
self.discovery_proxy.register_endpoint(node_address, endpoint)
log.info(
'registered endpoint in discovery',
node_address=pex(node_address),
host=host,
port=port
)
def get(self, node_address):
endpoint = self.discovery_proxy.endpoint_by_address(node_address)
host_port = split_endpoint(endpoint)
return host_port
def nodeid_by_host_port(self, host_port):
host, port = host_port
endpoint = host_port_to_endpoint(host, port)
return self.discovery_proxy.address_by_endpoint(endpoint)
def version(self):
return self.discovery_proxy.version()
| mit | -5,565,086,129,687,018,000 | 29.576577 | 80 | 0.603712 | false |
access-missouri/am-django-project | am/finance/migrations/0001_initial.py | 1 | 3804 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-12-18 21:21
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('general', '0003_auto_20170804_2100'),
]
operations = [
migrations.CreateModel(
name='FinanceEntity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, help_text=b'Date and time when the object was created.')),
('updated_at', models.DateTimeField(auto_now=True, help_text=b'Date and time when the object was last updated.')),
('extras', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default=dict, help_text=b'Key-value store suitable for storing arbitrary information not covered elsewhere.')),
('mec_id', models.CharField(help_text='Missouri Ethics Commission ID number.', max_length=128)),
('name', models.CharField(help_text='Readable name if Company or Committee.', max_length=300)),
('type', models.CharField(choices=[('corp', 'Company'), ('comm', 'Committee'), ('person', 'Person')], help_text='Company, Committee, or Person?', max_length=8)),
('first_name', models.CharField(blank=True, help_text='First name, if person.', max_length=75, null=True)),
('last_name', models.CharField(blank=True, help_text='First name, if person.', max_length=75, null=True)),
('address_first', models.CharField(blank=True, max_length=150, null=True)),
('address_second', models.CharField(blank=True, max_length=150, null=True)),
('address_city', models.CharField(blank=True, max_length=150, null=True)),
('address_state', models.CharField(blank=True, max_length=3, null=True)),
('address_zip', models.CharField(blank=True, max_length=10, null=True)),
('employer', models.CharField(blank=True, help_text='Employer, if person.', max_length=150, null=True)),
('linked_person', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='finance_entities', to='general.Person')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='FinanceTransaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, help_text=b'Date and time when the object was created.')),
('updated_at', models.DateTimeField(auto_now=True, help_text=b'Date and time when the object was last updated.')),
('extras', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default=dict, help_text=b'Key-value store suitable for storing arbitrary information not covered elsewhere.')),
('type', models.CharField(choices=[('M', 'Monetary'), ('I', 'In-Kind')], max_length=1)),
('date', models.DateField()),
('amount', models.FloatField()),
('t_from', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='spending', to='finance.FinanceEntity')),
('t_to', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='income', to='finance.FinanceEntity')),
],
options={
'abstract': False,
},
),
]
| bsd-2-clause | -8,058,657,331,822,270,000 | 62.4 | 197 | 0.615668 | false |
joelthompson/ansible-modules-core | cloud/amazon/iam_cert.py | 17 | 11715 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: iam_cert
short_description: Manage server certificates for use on ELBs and CloudFront
description:
- Allows for the management of server certificates
version_added: "2.0"
options:
name:
description:
- Name of certificate to add, update or remove.
required: true
aliases: []
new_name:
description:
- When present, this will update the name of the cert with the value passed here.
required: false
aliases: []
new_path:
description:
- When present, this will update the path of the cert with the value passed here.
required: false
aliases: []
state:
description:
- Whether to create, delete certificate. When present is specified it will attempt to make an update if new_path or new_name is specified.
required: true
default: null
choices: [ "present", "absent" ]
aliases: []
path:
description:
- When creating or updating, specify the desired path of the certificate
required: false
default: "/"
aliases: []
cert_chain:
description:
- The path to the CA certificate chain in PEM encoded format.
required: false
default: null
aliases: []
cert:
description:
- The path to the certificate body in PEM encoded format.
required: false
aliases: []
key:
description:
- The path to the private key of the certificate in PEM encoded format.
dup_ok:
description:
- By default the module will not upload a certificate that is already uploaded into AWS. If set to True, it will upload the certificate as long as the name is unique.
required: false
default: False
aliases: []
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
requirements: [ "boto" ]
author: Jonathan I. Davila
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Basic server certificate upload
tasks:
- name: Upload Certificate
iam_cert:
name: very_ssl
state: present
cert: somecert.pem
key: privcertkey
cert_chain: myverytrustedchain
'''
import json
import sys
try:
import boto
import boto.iam
import boto.ec2
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def boto_exception(err):
'''generic error message handler'''
if hasattr(err, 'error_message'):
error = err.error_message
elif hasattr(err, 'message'):
error = err.message
else:
error = '%s: %s' % (Exception, err)
return error
def cert_meta(iam, name):
opath = iam.get_server_certificate(name).get_server_certificate_result.\
server_certificate.\
server_certificate_metadata.\
path
ocert = iam.get_server_certificate(name).get_server_certificate_result.\
server_certificate.\
certificate_body
ocert_id = iam.get_server_certificate(name).get_server_certificate_result.\
server_certificate.\
server_certificate_metadata.\
server_certificate_id
upload_date = iam.get_server_certificate(name).get_server_certificate_result.\
server_certificate.\
server_certificate_metadata.\
upload_date
exp = iam.get_server_certificate(name).get_server_certificate_result.\
server_certificate.\
server_certificate_metadata.\
expiration
return opath, ocert, ocert_id, upload_date, exp
def dup_check(module, iam, name, new_name, cert, orig_cert_names, orig_cert_bodies, dup_ok):
update=False
if any(ct in orig_cert_names for ct in [name, new_name]):
for i_name in [name, new_name]:
if i_name is None:
continue
if cert is not None:
try:
c_index=orig_cert_names.index(i_name)
except NameError:
continue
else:
if orig_cert_bodies[c_index] == cert:
update=True
break
elif orig_cert_bodies[c_index] != cert:
module.fail_json(changed=False, msg='A cert with the name %s already exists and'
' has a different certificate body associated'
' with it. Certificates cannot have the same name' % i_name)
else:
update=True
break
elif cert in orig_cert_bodies and not dup_ok:
for crt_name, crt_body in zip(orig_cert_names, orig_cert_bodies):
if crt_body == cert:
module.fail_json(changed=False, msg='This certificate already'
' exists under the name %s' % crt_name)
return update
def cert_action(module, iam, name, cpath, new_name, new_path, state,
cert, key, chain, orig_cert_names, orig_cert_bodies, dup_ok):
if state == 'present':
update = dup_check(module, iam, name, new_name, cert, orig_cert_names,
orig_cert_bodies, dup_ok)
if update:
opath, ocert, ocert_id, upload_date, exp = cert_meta(iam, name)
changed=True
if new_name and new_path:
iam.update_server_cert(name, new_cert_name=new_name, new_path=new_path)
module.exit_json(changed=changed, original_name=name, new_name=new_name,
original_path=opath, new_path=new_path, cert_body=ocert,
upload_date=upload_date, expiration_date=exp)
elif new_name and not new_path:
iam.update_server_cert(name, new_cert_name=new_name)
module.exit_json(changed=changed, original_name=name, new_name=new_name,
cert_path=opath, cert_body=ocert,
upload_date=upload_date, expiration_date=exp)
elif not new_name and new_path:
iam.update_server_cert(name, new_path=new_path)
module.exit_json(changed=changed, name=new_name,
original_path=opath, new_path=new_path, cert_body=ocert,
upload_date=upload_date, expiration_date=exp)
else:
changed=False
module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert,
upload_date=upload_date, expiration_date=exp,
msg='No new path or name specified. No changes made')
else:
changed=True
iam.upload_server_cert(name, cert, key, cert_chain=chain, path=cpath)
opath, ocert, ocert_id, upload_date, exp = cert_meta(iam, name)
module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert,
upload_date=upload_date, expiration_date=exp)
elif state == 'absent':
if name in orig_cert_names:
changed=True
iam.delete_server_cert(name)
module.exit_json(changed=changed, deleted_cert=name)
else:
changed=False
module.exit_json(changed=changed, msg='Certificate with the name %s already absent' % name)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(
default=None, required=True, choices=['present', 'absent']),
name=dict(default=None, required=False),
cert=dict(default=None, required=False),
key=dict(default=None, required=False),
cert_chain=dict(default=None, required=False),
new_name=dict(default=None, required=False),
path=dict(default='/', required=False),
new_path=dict(default=None, required=False),
dup_ok=dict(default=False, required=False, choices=[False, True])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[],
)
if not HAS_BOTO:
module.fail_json(msg="Boto is required for this module")
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
try:
if region:
iam = connect_to_aws(boto.iam, region, **aws_connect_kwargs)
else:
iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
state = module.params.get('state')
name = module.params.get('name')
path = module.params.get('path')
new_name = module.params.get('new_name')
new_path = module.params.get('new_path')
cert_chain = module.params.get('cert_chain')
dup_ok = module.params.get('dup_ok')
if state == 'present':
cert = open(module.params.get('cert'), 'r').read().rstrip()
key = open(module.params.get('key'), 'r').read().rstrip()
if cert_chain is not None:
cert_chain = open(module.params.get('cert_chain'), 'r').read()
else:
key=cert=chain=None
orig_certs = [ctb['server_certificate_name'] for ctb in \
iam.get_all_server_certs().\
list_server_certificates_result.\
server_certificate_metadata_list]
orig_bodies = [iam.get_server_certificate(thing).\
get_server_certificate_result.\
certificate_body \
for thing in orig_certs]
if new_name == name:
new_name = None
if new_path == path:
new_path = None
changed = False
try:
cert_action(module, iam, name, path, new_name, new_path, state,
cert, key, cert_chain, orig_certs, orig_bodies, dup_ok)
except boto.exception.BotoServerError, err:
module.fail_json(changed=changed, msg=str(err), debug=[cert,key])
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 | -1,320,375,280,987,984,100 | 38.444444 | 172 | 0.56927 | false |
baylee-d/osf.io | admin_tests/spam/test_extras.py | 13 | 1190 | import pytest
from nose import tools as nt
from admin.spam.templatetags import spam_extras
@pytest.mark.django_db
class TestReverseTags:
@pytest.fixture(autouse=True)
def override_urlconf(self, settings):
settings.ROOT_URLCONF = 'admin.base.urls'
def test_reverse_spam_detail(self):
res = spam_extras.reverse_spam_detail('123ab', page='2', status='4')
nt.assert_in('/spam/123ab/?', res)
nt.assert_in('page=2', res)
nt.assert_in('status=4', res)
nt.assert_equal(len('/spam/123ab/?page=2&status=4'), len(res))
def test_reverse_spam_list(self):
res = spam_extras.reverse_spam_list(page='2', status='4')
nt.assert_in('/spam/?', res)
nt.assert_in('page=2', res)
nt.assert_in('status=4', res)
nt.assert_equal(len('/spam/?page=2&status=4'), len(res))
def test_reverse_spam_user(self):
res = spam_extras.reverse_spam_user('kzzab', page='2', status='4')
nt.assert_in('/spam/user/kzzab/?', res)
nt.assert_in('page=2', res)
nt.assert_in('status=4', res)
nt.assert_equal(len('/spam/user/kzzab/?page=2&status=4'),
len(res))
| apache-2.0 | 8,431,833,047,182,561,000 | 34 | 76 | 0.604202 | false |
artwr/airflow | tests/contrib/operators/test_mongo_to_s3_operator.py | 4 | 4092 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from airflow import DAG
from airflow.contrib.operators.mongo_to_s3 import MongoToS3Operator
from airflow.models import TaskInstance
from airflow.utils import timezone
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
TASK_ID = 'test_mongo_to_s3_operator'
MONGO_CONN_ID = 'default_mongo'
S3_CONN_ID = 'default_s3'
MONGO_COLLECTION = 'example_collection'
MONGO_QUERY = {"$lt": "{{ ts + 'Z' }}"}
S3_BUCKET = 'example_bucket'
S3_KEY = 'example_key'
DEFAULT_DATE = timezone.datetime(2017, 1, 1)
MOCK_MONGO_RETURN = [
{'example_return_key_1': 'example_return_value_1'},
{'example_return_key_2': 'example_return_value_2'}
]
class MongoToS3OperatorTest(unittest.TestCase):
def setUp(self):
args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE
}
self.dag = DAG('test_dag_id', default_args=args)
self.mock_operator = MongoToS3Operator(
task_id=TASK_ID,
mongo_conn_id=MONGO_CONN_ID,
s3_conn_id=S3_CONN_ID,
mongo_collection=MONGO_COLLECTION,
mongo_query=MONGO_QUERY,
s3_bucket=S3_BUCKET,
s3_key=S3_KEY,
dag=self.dag
)
def test_init(self):
self.assertEqual(self.mock_operator.task_id, TASK_ID)
self.assertEqual(self.mock_operator.mongo_conn_id, MONGO_CONN_ID)
self.assertEqual(self.mock_operator.s3_conn_id, S3_CONN_ID)
self.assertEqual(self.mock_operator.mongo_collection, MONGO_COLLECTION)
self.assertEqual(self.mock_operator.mongo_query, MONGO_QUERY)
self.assertEqual(self.mock_operator.s3_bucket, S3_BUCKET)
self.assertEqual(self.mock_operator.s3_key, S3_KEY)
def test_template_field_overrides(self):
self.assertEqual(self.mock_operator.template_fields, ['s3_key', 'mongo_query'])
def test_render_template(self):
ti = TaskInstance(self.mock_operator, DEFAULT_DATE)
ti.render_templates()
expected_rendered_template = {'$lt': u'2017-01-01T00:00:00+00:00Z'}
self.assertDictEqual(
expected_rendered_template,
getattr(self.mock_operator, 'mongo_query')
)
@mock.patch('airflow.contrib.operators.mongo_to_s3.MongoHook')
@mock.patch('airflow.contrib.operators.mongo_to_s3.S3Hook')
def test_execute(self, mock_s3_hook, mock_mongo_hook):
operator = self.mock_operator
mock_mongo_hook.return_value.find.return_value = iter(MOCK_MONGO_RETURN)
mock_s3_hook.return_value.load_string.return_value = True
operator.execute(None)
mock_mongo_hook.return_value.find.assert_called_once_with(
mongo_collection=MONGO_COLLECTION,
query=MONGO_QUERY,
mongo_db=None
)
op_stringify = self.mock_operator._stringify
op_transform = self.mock_operator.transform
s3_doc_str = op_stringify(op_transform(MOCK_MONGO_RETURN))
mock_s3_hook.return_value.load_string.assert_called_once_with(
string_data=s3_doc_str,
key=S3_KEY,
bucket_name=S3_BUCKET,
replace=False
)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 933,954,973,534,528,100 | 32.268293 | 87 | 0.662268 | false |
stackforge/watcher | watcher/tests/policy_fixture.py | 1 | 1607 | # Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import fixtures
from oslo_config import cfg
from oslo_policy import _parser
from oslo_policy import opts as policy_opts
from watcher.common import policy as watcher_policy
from watcher.tests import fake_policy
CONF = cfg.CONF
class PolicyFixture(fixtures.Fixture):
def _setUp(self):
self.policy_dir = self.useFixture(fixtures.TempDir())
self.policy_file_name = os.path.join(self.policy_dir.path,
'policy.json')
with open(self.policy_file_name, 'w') as policy_file:
policy_file.write(fake_policy.policy_data)
policy_opts.set_defaults(CONF)
CONF.set_override('policy_file', self.policy_file_name, 'oslo_policy')
watcher_policy._ENFORCER = None
self.addCleanup(watcher_policy.init().clear)
def set_rules(self, rules):
policy = watcher_policy._ENFORCER
policy.set_rules({k: _parser.parse_rule(v)
for k, v in rules.items()})
| apache-2.0 | 4,579,654,776,968,350,700 | 35.522727 | 78 | 0.688861 | false |
Seagate/swift | swift/container/backend.py | 11 | 37210 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Pluggable Back-ends for Container Server
"""
import os
from uuid import uuid4
import time
import cPickle as pickle
import sqlite3
from swift.common.utils import Timestamp
from swift.common.db import DatabaseBroker, utf8encode
SQLITE_ARG_LIMIT = 999
DATADIR = 'containers'
POLICY_STAT_TABLE_CREATE = '''
CREATE TABLE policy_stat (
storage_policy_index INTEGER PRIMARY KEY,
object_count INTEGER DEFAULT 0,
bytes_used INTEGER DEFAULT 0
);
'''
POLICY_STAT_TRIGGER_SCRIPT = '''
CREATE TRIGGER object_insert_policy_stat AFTER INSERT ON object
BEGIN
UPDATE policy_stat
SET object_count = object_count + (1 - new.deleted),
bytes_used = bytes_used + new.size
WHERE storage_policy_index = new.storage_policy_index;
INSERT INTO policy_stat (
storage_policy_index, object_count, bytes_used)
SELECT new.storage_policy_index,
(1 - new.deleted),
new.size
WHERE NOT EXISTS(
SELECT changes() as change
FROM policy_stat
WHERE change <> 0
);
UPDATE container_info
SET hash = chexor(hash, new.name, new.created_at);
END;
CREATE TRIGGER object_delete_policy_stat AFTER DELETE ON object
BEGIN
UPDATE policy_stat
SET object_count = object_count - (1 - old.deleted),
bytes_used = bytes_used - old.size
WHERE storage_policy_index = old.storage_policy_index;
UPDATE container_info
SET hash = chexor(hash, old.name, old.created_at);
END;
'''
CONTAINER_INFO_TABLE_SCRIPT = '''
CREATE TABLE container_info (
account TEXT,
container TEXT,
created_at TEXT,
put_timestamp TEXT DEFAULT '0',
delete_timestamp TEXT DEFAULT '0',
reported_put_timestamp TEXT DEFAULT '0',
reported_delete_timestamp TEXT DEFAULT '0',
reported_object_count INTEGER DEFAULT 0,
reported_bytes_used INTEGER DEFAULT 0,
hash TEXT default '00000000000000000000000000000000',
id TEXT,
status TEXT DEFAULT '',
status_changed_at TEXT DEFAULT '0',
metadata TEXT DEFAULT '',
x_container_sync_point1 INTEGER DEFAULT -1,
x_container_sync_point2 INTEGER DEFAULT -1,
storage_policy_index INTEGER DEFAULT 0,
reconciler_sync_point INTEGER DEFAULT -1
);
'''
CONTAINER_STAT_VIEW_SCRIPT = '''
CREATE VIEW container_stat
AS SELECT ci.account, ci.container, ci.created_at,
ci.put_timestamp, ci.delete_timestamp,
ci.reported_put_timestamp, ci.reported_delete_timestamp,
ci.reported_object_count, ci.reported_bytes_used, ci.hash,
ci.id, ci.status, ci.status_changed_at, ci.metadata,
ci.x_container_sync_point1, ci.x_container_sync_point2,
ci.reconciler_sync_point,
ci.storage_policy_index,
coalesce(ps.object_count, 0) AS object_count,
coalesce(ps.bytes_used, 0) AS bytes_used
FROM container_info ci LEFT JOIN policy_stat ps
ON ci.storage_policy_index = ps.storage_policy_index;
CREATE TRIGGER container_stat_update
INSTEAD OF UPDATE ON container_stat
BEGIN
UPDATE container_info
SET account = NEW.account,
container = NEW.container,
created_at = NEW.created_at,
put_timestamp = NEW.put_timestamp,
delete_timestamp = NEW.delete_timestamp,
reported_put_timestamp = NEW.reported_put_timestamp,
reported_delete_timestamp = NEW.reported_delete_timestamp,
reported_object_count = NEW.reported_object_count,
reported_bytes_used = NEW.reported_bytes_used,
hash = NEW.hash,
id = NEW.id,
status = NEW.status,
status_changed_at = NEW.status_changed_at,
metadata = NEW.metadata,
x_container_sync_point1 = NEW.x_container_sync_point1,
x_container_sync_point2 = NEW.x_container_sync_point2,
storage_policy_index = NEW.storage_policy_index,
reconciler_sync_point = NEW.reconciler_sync_point;
END;
'''
class ContainerBroker(DatabaseBroker):
"""Encapsulates working with a container database."""
db_type = 'container'
db_contains_type = 'object'
db_reclaim_timestamp = 'created_at'
@property
def storage_policy_index(self):
if not hasattr(self, '_storage_policy_index'):
self._storage_policy_index = \
self.get_info()['storage_policy_index']
return self._storage_policy_index
def _initialize(self, conn, put_timestamp, storage_policy_index):
"""
Create a brand new container database (tables, indices, triggers, etc.)
"""
if not self.account:
raise ValueError(
'Attempting to create a new database with no account set')
if not self.container:
raise ValueError(
'Attempting to create a new database with no container set')
if storage_policy_index is None:
storage_policy_index = 0
self.create_object_table(conn)
self.create_policy_stat_table(conn, storage_policy_index)
self.create_container_info_table(conn, put_timestamp,
storage_policy_index)
def create_object_table(self, conn):
"""
Create the object table which is specific to the container DB.
Not a part of Pluggable Back-ends, internal to the baseline code.
:param conn: DB connection object
"""
conn.executescript("""
CREATE TABLE object (
ROWID INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT,
created_at TEXT,
size INTEGER,
content_type TEXT,
etag TEXT,
deleted INTEGER DEFAULT 0,
storage_policy_index INTEGER DEFAULT 0
);
CREATE INDEX ix_object_deleted_name ON object (deleted, name);
CREATE TRIGGER object_update BEFORE UPDATE ON object
BEGIN
SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT');
END;
""" + POLICY_STAT_TRIGGER_SCRIPT)
def create_container_info_table(self, conn, put_timestamp,
storage_policy_index):
"""
Create the container_info table which is specific to the container DB.
Not a part of Pluggable Back-ends, internal to the baseline code.
Also creates the container_stat view.
:param conn: DB connection object
:param put_timestamp: put timestamp
:param storage_policy_index: storage policy index
"""
if put_timestamp is None:
put_timestamp = Timestamp(0).internal
# The container_stat view is for compatibility; old versions of Swift
# expected a container_stat table with columns "object_count" and
# "bytes_used", but when that stuff became per-storage-policy and
# moved to the policy_stat table, we stopped creating those columns in
# container_stat.
#
# To retain compatibility, we create the container_stat view with some
# triggers to make it behave like the old container_stat table. This
# way, if an old version of Swift encounters a database with the new
# schema, it can still work.
#
# Note that this can occur during a rolling Swift upgrade if a DB gets
# rsynced from an old node to a new, so it's necessary for
# availability during upgrades. The fact that it enables downgrades is
# a nice bonus.
conn.executescript(CONTAINER_INFO_TABLE_SCRIPT +
CONTAINER_STAT_VIEW_SCRIPT)
conn.execute("""
INSERT INTO container_info (account, container, created_at, id,
put_timestamp, status_changed_at, storage_policy_index)
VALUES (?, ?, ?, ?, ?, ?, ?);
""", (self.account, self.container, Timestamp(time.time()).internal,
str(uuid4()), put_timestamp, put_timestamp,
storage_policy_index))
def create_policy_stat_table(self, conn, storage_policy_index=0):
"""
Create policy_stat table.
:param conn: DB connection object
:param storage_policy_index: the policy_index the container is
being created with
"""
conn.executescript(POLICY_STAT_TABLE_CREATE)
conn.execute("""
INSERT INTO policy_stat (storage_policy_index)
VALUES (?)
""", (storage_policy_index,))
def get_db_version(self, conn):
if self._db_version == -1:
self._db_version = 0
for row in conn.execute('''
SELECT name FROM sqlite_master
WHERE name = 'ix_object_deleted_name' '''):
self._db_version = 1
return self._db_version
def _newid(self, conn):
conn.execute('''
UPDATE container_stat
SET reported_put_timestamp = 0, reported_delete_timestamp = 0,
reported_object_count = 0, reported_bytes_used = 0''')
def _delete_db(self, conn, timestamp):
"""
Mark the DB as deleted
:param conn: DB connection object
:param timestamp: timestamp to mark as deleted
"""
conn.execute("""
UPDATE container_stat
SET delete_timestamp = ?,
status = 'DELETED',
status_changed_at = ?
WHERE delete_timestamp < ? """, (timestamp, timestamp, timestamp))
def _commit_puts_load(self, item_list, entry):
"""See :func:`swift.common.db.DatabaseBroker._commit_puts_load`"""
data = pickle.loads(entry.decode('base64'))
(name, timestamp, size, content_type, etag, deleted) = data[:6]
if len(data) > 6:
storage_policy_index = data[6]
else:
storage_policy_index = 0
item_list.append({'name': name,
'created_at': timestamp,
'size': size,
'content_type': content_type,
'etag': etag,
'deleted': deleted,
'storage_policy_index': storage_policy_index})
def empty(self):
"""
Check if container DB is empty.
:returns: True if the database has no active objects, False otherwise
"""
self._commit_puts_stale_ok()
with self.get() as conn:
try:
row = conn.execute(
'SELECT max(object_count) from policy_stat').fetchone()
except sqlite3.OperationalError as err:
if not any(msg in str(err) for msg in (
"no such column: storage_policy_index",
"no such table: policy_stat")):
raise
row = conn.execute(
'SELECT object_count from container_stat').fetchone()
return (row[0] == 0)
def delete_object(self, name, timestamp, storage_policy_index=0):
"""
Mark an object deleted.
:param name: object name to be deleted
:param timestamp: timestamp when the object was marked as deleted
"""
self.put_object(name, timestamp, 0, 'application/deleted', 'noetag',
deleted=1, storage_policy_index=storage_policy_index)
def make_tuple_for_pickle(self, record):
return (record['name'], record['created_at'], record['size'],
record['content_type'], record['etag'], record['deleted'],
record['storage_policy_index'])
def put_object(self, name, timestamp, size, content_type, etag, deleted=0,
storage_policy_index=0):
"""
Creates an object in the DB with its metadata.
:param name: object name to be created
:param timestamp: timestamp of when the object was created
:param size: object size
:param content_type: object content-type
:param etag: object etag
:param deleted: if True, marks the object as deleted and sets the
deleted_at timestamp to timestamp
:param storage_policy_index: the storage policy index for the object
"""
record = {'name': name, 'created_at': timestamp, 'size': size,
'content_type': content_type, 'etag': etag,
'deleted': deleted,
'storage_policy_index': storage_policy_index}
self.put_record(record)
def _is_deleted_info(self, object_count, put_timestamp, delete_timestamp,
**kwargs):
"""
Apply delete logic to database info.
:returns: True if the DB is considered to be deleted, False otherwise
"""
# The container is considered deleted if the delete_timestamp
# value is greater than the put_timestamp, and there are no
# objects in the container.
return (object_count in (None, '', 0, '0')) and (
Timestamp(delete_timestamp) > Timestamp(put_timestamp))
def _is_deleted(self, conn):
"""
Check container_stat view and evaluate info.
:param conn: database conn
:returns: True if the DB is considered to be deleted, False otherwise
"""
info = conn.execute('''
SELECT put_timestamp, delete_timestamp, object_count
FROM container_stat''').fetchone()
return self._is_deleted_info(**info)
def get_info_is_deleted(self):
"""
Get the is_deleted status and info for the container.
:returns: a tuple, in the form (info, is_deleted) info is a dict as
returned by get_info and is_deleted is a boolean.
"""
if self.db_file != ':memory:' and not os.path.exists(self.db_file):
return {}, True
info = self.get_info()
return info, self._is_deleted_info(**info)
def get_info(self):
"""
Get global data for the container.
:returns: dict with keys: account, container, created_at,
put_timestamp, delete_timestamp, status_changed_at,
object_count, bytes_used, reported_put_timestamp,
reported_delete_timestamp, reported_object_count,
reported_bytes_used, hash, id, x_container_sync_point1,
x_container_sync_point2, and storage_policy_index.
"""
self._commit_puts_stale_ok()
with self.get() as conn:
data = None
trailing_sync = 'x_container_sync_point1, x_container_sync_point2'
trailing_pol = 'storage_policy_index'
errors = set()
while not data:
try:
data = conn.execute(('''
SELECT account, container, created_at, put_timestamp,
delete_timestamp, status_changed_at,
object_count, bytes_used,
reported_put_timestamp, reported_delete_timestamp,
reported_object_count, reported_bytes_used, hash,
id, %s, %s
FROM container_stat
''') % (trailing_sync, trailing_pol)).fetchone()
except sqlite3.OperationalError as err:
err_msg = str(err)
if err_msg in errors:
# only attempt migration once
raise
errors.add(err_msg)
if 'no such column: storage_policy_index' in err_msg:
trailing_pol = '0 AS storage_policy_index'
elif 'no such column: x_container_sync_point' in err_msg:
trailing_sync = '-1 AS x_container_sync_point1, ' \
'-1 AS x_container_sync_point2'
else:
raise
data = dict(data)
# populate instance cache
self._storage_policy_index = data['storage_policy_index']
self.account = data['account']
self.container = data['container']
return data
def set_x_container_sync_points(self, sync_point1, sync_point2):
with self.get() as conn:
try:
self._set_x_container_sync_points(conn, sync_point1,
sync_point2)
except sqlite3.OperationalError as err:
if 'no such column: x_container_sync_point' not in \
str(err):
raise
self._migrate_add_container_sync_points(conn)
self._set_x_container_sync_points(conn, sync_point1,
sync_point2)
conn.commit()
def _set_x_container_sync_points(self, conn, sync_point1, sync_point2):
if sync_point1 is not None and sync_point2 is not None:
conn.execute('''
UPDATE container_stat
SET x_container_sync_point1 = ?,
x_container_sync_point2 = ?
''', (sync_point1, sync_point2))
elif sync_point1 is not None:
conn.execute('''
UPDATE container_stat
SET x_container_sync_point1 = ?
''', (sync_point1,))
elif sync_point2 is not None:
conn.execute('''
UPDATE container_stat
SET x_container_sync_point2 = ?
''', (sync_point2,))
def get_policy_stats(self):
with self.get() as conn:
try:
info = conn.execute('''
SELECT storage_policy_index, object_count, bytes_used
FROM policy_stat
''').fetchall()
except sqlite3.OperationalError as err:
if not any(msg in str(err) for msg in (
"no such column: storage_policy_index",
"no such table: policy_stat")):
raise
info = conn.execute('''
SELECT 0 as storage_policy_index, object_count, bytes_used
FROM container_stat
''').fetchall()
policy_stats = {}
for row in info:
stats = dict(row)
key = stats.pop('storage_policy_index')
policy_stats[key] = stats
return policy_stats
def has_multiple_policies(self):
with self.get() as conn:
try:
curs = conn.execute('''
SELECT count(storage_policy_index)
FROM policy_stat
''').fetchone()
except sqlite3.OperationalError as err:
if 'no such table: policy_stat' not in str(err):
raise
# no policy_stat row
return False
if curs and curs[0] > 1:
return True
# only one policy_stat row
return False
def set_storage_policy_index(self, policy_index, timestamp=None):
"""
Update the container_stat policy_index and status_changed_at.
"""
if timestamp is None:
timestamp = Timestamp(time.time()).internal
def _setit(conn):
conn.execute('''
INSERT OR IGNORE INTO policy_stat (storage_policy_index)
VALUES (?)
''', (policy_index,))
conn.execute('''
UPDATE container_stat
SET storage_policy_index = ?,
status_changed_at = MAX(?, status_changed_at)
WHERE storage_policy_index <> ?
''', (policy_index, timestamp, policy_index))
conn.commit()
with self.get() as conn:
try:
_setit(conn)
except sqlite3.OperationalError as err:
if not any(msg in str(err) for msg in (
"no such column: storage_policy_index",
"no such table: policy_stat")):
raise
self._migrate_add_storage_policy(conn)
_setit(conn)
self._storage_policy_index = policy_index
def reported(self, put_timestamp, delete_timestamp, object_count,
bytes_used):
"""
Update reported stats, available with container's `get_info`.
:param put_timestamp: put_timestamp to update
:param delete_timestamp: delete_timestamp to update
:param object_count: object_count to update
:param bytes_used: bytes_used to update
"""
with self.get() as conn:
conn.execute('''
UPDATE container_stat
SET reported_put_timestamp = ?, reported_delete_timestamp = ?,
reported_object_count = ?, reported_bytes_used = ?
''', (put_timestamp, delete_timestamp, object_count, bytes_used))
conn.commit()
def list_objects_iter(self, limit, marker, end_marker, prefix, delimiter,
path=None, storage_policy_index=0):
"""
Get a list of objects sorted by name starting at marker onward, up
to limit entries. Entries will begin with the prefix and will not
have the delimiter after the prefix.
:param limit: maximum number of entries to get
:param marker: marker query
:param end_marker: end marker query
:param prefix: prefix query
:param delimiter: delimiter for query
:param path: if defined, will set the prefix and delimiter based on
the path
:returns: list of tuples of (name, created_at, size, content_type,
etag)
"""
delim_force_gte = False
(marker, end_marker, prefix, delimiter, path) = utf8encode(
marker, end_marker, prefix, delimiter, path)
self._commit_puts_stale_ok()
if path is not None:
prefix = path
if path:
prefix = path = path.rstrip('/') + '/'
delimiter = '/'
elif delimiter and not prefix:
prefix = ''
orig_marker = marker
with self.get() as conn:
results = []
while len(results) < limit:
query = '''SELECT name, created_at, size, content_type, etag
FROM object WHERE'''
query_args = []
if end_marker:
query += ' name < ? AND'
query_args.append(end_marker)
if delim_force_gte:
query += ' name >= ? AND'
query_args.append(marker)
# Always set back to False
delim_force_gte = False
elif marker and marker >= prefix:
query += ' name > ? AND'
query_args.append(marker)
elif prefix:
query += ' name >= ? AND'
query_args.append(prefix)
if self.get_db_version(conn) < 1:
query += ' +deleted = 0'
else:
query += ' deleted = 0'
orig_tail_query = '''
ORDER BY name LIMIT ?
'''
orig_tail_args = [limit - len(results)]
# storage policy filter
policy_tail_query = '''
AND storage_policy_index = ?
''' + orig_tail_query
policy_tail_args = [storage_policy_index] + orig_tail_args
tail_query, tail_args = \
policy_tail_query, policy_tail_args
try:
curs = conn.execute(query + tail_query,
tuple(query_args + tail_args))
except sqlite3.OperationalError as err:
if 'no such column: storage_policy_index' not in str(err):
raise
tail_query, tail_args = \
orig_tail_query, orig_tail_args
curs = conn.execute(query + tail_query,
tuple(query_args + tail_args))
curs.row_factory = None
if prefix is None:
# A delimiter without a specified prefix is ignored
return [r for r in curs]
if not delimiter:
if not prefix:
# It is possible to have a delimiter but no prefix
# specified. As above, the prefix will be set to the
# empty string, so avoid performing the extra work to
# check against an empty prefix.
return [r for r in curs]
else:
return [r for r in curs if r[0].startswith(prefix)]
# We have a delimiter and a prefix (possibly empty string) to
# handle
rowcount = 0
for row in curs:
rowcount += 1
marker = name = row[0]
if len(results) >= limit or not name.startswith(prefix):
curs.close()
return results
end = name.find(delimiter, len(prefix))
if path is not None:
if name == path:
continue
if end >= 0 and len(name) > end + len(delimiter):
marker = name[:end] + chr(ord(delimiter) + 1)
curs.close()
break
elif end > 0:
marker = name[:end] + chr(ord(delimiter) + 1)
# we want result to be inclusive of delim+1
delim_force_gte = True
dir_name = name[:end + 1]
if dir_name != orig_marker:
results.append([dir_name, '0', 0, None, ''])
curs.close()
break
results.append(row)
if not rowcount:
break
return results
def merge_items(self, item_list, source=None):
"""
Merge items into the object table.
:param item_list: list of dictionaries of {'name', 'created_at',
'size', 'content_type', 'etag', 'deleted',
'storage_policy_index'}
:param source: if defined, update incoming_sync with the source
"""
for item in item_list:
if isinstance(item['name'], unicode):
item['name'] = item['name'].encode('utf-8')
def _really_merge_items(conn):
curs = conn.cursor()
if self.get_db_version(conn) >= 1:
query_mod = ' deleted IN (0, 1) AND '
else:
query_mod = ''
curs.execute('BEGIN IMMEDIATE')
# Get created_at times for objects in item_list that already exist.
# We must chunk it up to avoid sqlite's limit of 999 args.
created_at = {}
for offset in xrange(0, len(item_list), SQLITE_ARG_LIMIT):
chunk = [rec['name'] for rec in
item_list[offset:offset + SQLITE_ARG_LIMIT]]
created_at.update(
((rec[0], rec[1]), rec[2]) for rec in curs.execute(
'SELECT name, storage_policy_index, created_at '
'FROM object WHERE ' + query_mod + ' name IN (%s)' %
','.join('?' * len(chunk)), chunk))
# Sort item_list into things that need adding and deleting, based
# on results of created_at query.
to_delete = {}
to_add = {}
for item in item_list:
item.setdefault('storage_policy_index', 0) # legacy
item_ident = (item['name'], item['storage_policy_index'])
if created_at.get(item_ident) < item['created_at']:
if item_ident in created_at: # exists with older timestamp
to_delete[item_ident] = item
if item_ident in to_add: # duplicate entries in item_list
to_add[item_ident] = max(item, to_add[item_ident],
key=lambda i: i['created_at'])
else:
to_add[item_ident] = item
if to_delete:
curs.executemany(
'DELETE FROM object WHERE ' + query_mod +
'name=? AND storage_policy_index=?',
((rec['name'], rec['storage_policy_index'])
for rec in to_delete.itervalues()))
if to_add:
curs.executemany(
'INSERT INTO object (name, created_at, size, content_type,'
'etag, deleted, storage_policy_index)'
'VALUES (?, ?, ?, ?, ?, ?, ?)',
((rec['name'], rec['created_at'], rec['size'],
rec['content_type'], rec['etag'], rec['deleted'],
rec['storage_policy_index'])
for rec in to_add.itervalues()))
if source:
# for replication we rely on the remote end sending merges in
# order with no gaps to increment sync_points
sync_point = item_list[-1]['ROWID']
curs.execute('''
UPDATE incoming_sync SET
sync_point=max(?, sync_point) WHERE remote_id=?
''', (sync_point, source))
if curs.rowcount < 1:
curs.execute('''
INSERT INTO incoming_sync (sync_point, remote_id)
VALUES (?, ?)
''', (sync_point, source))
conn.commit()
with self.get() as conn:
try:
return _really_merge_items(conn)
except sqlite3.OperationalError as err:
if 'no such column: storage_policy_index' not in str(err):
raise
self._migrate_add_storage_policy(conn)
return _really_merge_items(conn)
def get_reconciler_sync(self):
with self.get() as conn:
try:
return conn.execute('''
SELECT reconciler_sync_point FROM container_stat
''').fetchone()[0]
except sqlite3.OperationalError as err:
if "no such column: reconciler_sync_point" not in str(err):
raise
return -1
def update_reconciler_sync(self, point):
query = '''
UPDATE container_stat
SET reconciler_sync_point = ?
'''
with self.get() as conn:
try:
conn.execute(query, (point,))
except sqlite3.OperationalError as err:
if "no such column: reconciler_sync_point" not in str(err):
raise
self._migrate_add_storage_policy(conn)
conn.execute(query, (point,))
conn.commit()
def get_misplaced_since(self, start, count):
"""
Get a list of objects which are in a storage policy different
from the container's storage policy.
:param start: last reconciler sync point
:param count: maximum number of entries to get
:returns: list of dicts with keys: name, created_at, size,
content_type, etag, storage_policy_index
"""
qry = '''
SELECT ROWID, name, created_at, size, content_type, etag,
deleted, storage_policy_index
FROM object
WHERE ROWID > ?
AND storage_policy_index != (
SELECT storage_policy_index FROM container_stat LIMIT 1)
ORDER BY ROWID ASC LIMIT ?
'''
self._commit_puts_stale_ok()
with self.get() as conn:
try:
cur = conn.execute(qry, (start, count))
except sqlite3.OperationalError as err:
if "no such column: storage_policy_index" not in str(err):
raise
return []
return list(dict(row) for row in cur.fetchall())
def _migrate_add_container_sync_points(self, conn):
"""
Add the x_container_sync_point columns to the 'container_stat' table.
"""
conn.executescript('''
BEGIN;
ALTER TABLE container_stat
ADD COLUMN x_container_sync_point1 INTEGER DEFAULT -1;
ALTER TABLE container_stat
ADD COLUMN x_container_sync_point2 INTEGER DEFAULT -1;
COMMIT;
''')
def _migrate_add_storage_policy(self, conn):
"""
Migrate the container schema to support tracking objects from
multiple storage policies. If the container_stat table has any
pending migrations, they are applied now before copying into
container_info.
* create the 'policy_stat' table.
* copy the current 'object_count' and 'bytes_used' columns to a
row in the 'policy_stat' table.
* add the storage_policy_index column to the 'object' table.
* drop the 'object_insert' and 'object_delete' triggers.
* add the 'object_insert_policy_stat' and
'object_delete_policy_stat' triggers.
* create container_info table for non-policy container info
* insert values from container_stat into container_info
* drop container_stat table
* create container_stat view
"""
# I tried just getting the list of column names in the current
# container_stat table with a pragma table_info, but could never get
# it inside the same transaction as the DDL (non-DML) statements:
# https://docs.python.org/2/library/sqlite3.html
# #controlling-transactions
# So we just apply all pending migrations to container_stat and copy a
# static known list of column names into container_info.
try:
self._migrate_add_container_sync_points(conn)
except sqlite3.OperationalError as e:
if 'duplicate column' in str(e):
conn.execute('ROLLBACK;')
else:
raise
try:
conn.executescript("""
ALTER TABLE container_stat
ADD COLUMN metadata TEXT DEFAULT '';
""")
except sqlite3.OperationalError as e:
if 'duplicate column' not in str(e):
raise
column_names = ', '.join((
'account', 'container', 'created_at', 'put_timestamp',
'delete_timestamp', 'reported_put_timestamp',
'reported_object_count', 'reported_bytes_used', 'hash', 'id',
'status', 'status_changed_at', 'metadata',
'x_container_sync_point1', 'x_container_sync_point2'))
conn.executescript(
'BEGIN;' +
POLICY_STAT_TABLE_CREATE +
'''
INSERT INTO policy_stat (
storage_policy_index, object_count, bytes_used)
SELECT 0, object_count, bytes_used
FROM container_stat;
ALTER TABLE object
ADD COLUMN storage_policy_index INTEGER DEFAULT 0;
DROP TRIGGER object_insert;
DROP TRIGGER object_delete;
''' +
POLICY_STAT_TRIGGER_SCRIPT +
CONTAINER_INFO_TABLE_SCRIPT +
'''
INSERT INTO container_info (%s)
SELECT %s FROM container_stat;
DROP TABLE IF EXISTS container_stat;
''' % (column_names, column_names) +
CONTAINER_STAT_VIEW_SCRIPT +
'COMMIT;')
| apache-2.0 | -1,712,850,593,186,095,000 | 40.025358 | 79 | 0.531524 | false |
dongguangming/pexpect | tests/test_winsize.py | 6 | 2568 | #!/usr/bin/env python
'''
PEXPECT LICENSE
This license is approved by the OSI and FSF as GPL-compatible.
http://opensource.org/licenses/isc-license.txt
Copyright (c) 2012, Noah Spurrier <[email protected]>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
import pexpect
import unittest
from . import PexpectTestCase
import time
class TestCaseWinsize(PexpectTestCase.PexpectTestCase):
def test_winsize (self):
'''
This tests that the child process can set and get the windows size.
This makes use of an external script sigwinch_report.py.
'''
p1 = pexpect.spawn('%s sigwinch_report.py' % self.PYTHONBIN)
p1.expect('READY', timeout=10)
p1.setwinsize (11,22)
index = p1.expect ([pexpect.TIMEOUT, b'SIGWINCH: \(([0-9]*), ([0-9]*)\)'],
timeout=30)
if index == 0:
self.fail("TIMEOUT -- this platform may not support sigwinch properly.\n" + str(p1))
self.assertEqual(p1.match.group(1, 2), (b"11" ,b"22"))
self.assertEqual(p1.getwinsize(), (11, 22))
time.sleep(1)
p1.setwinsize (24,80)
index = p1.expect ([pexpect.TIMEOUT, b'SIGWINCH: \(([0-9]*), ([0-9]*)\)'],
timeout=10)
if index == 0:
self.fail ("TIMEOUT -- this platform may not support sigwinch properly.\n" + str(p1))
self.assertEqual(p1.match.group(1, 2), (b"24" ,b"80"))
self.assertEqual(p1.getwinsize(), (24, 80))
p1.close()
# def test_parent_resize (self):
# pid = os.getpid()
# p1 = pexpect.spawn('%s sigwinch_report.py' % self.PYTHONBIN)
# time.sleep(10)
# p1.setwinsize (11,22)
# os.kill (pid, signal.SIGWINCH)
if __name__ == '__main__':
unittest.main()
suite = unittest.makeSuite(TestCaseWinsize,'test')
| isc | -798,408,546,997,018,500 | 37.328358 | 97 | 0.633956 | false |
scriptnull/coala | bears/tests/c_languages/codeclone_detection/ClangCloneDetectionBearTest.py | 2 | 3681 | import os
import unittest
from queue import Queue
from bears.tests.BearTestHelper import generate_skip_decorator
from bears.c_languages.codeclone_detection.ClangFunctionDifferenceBear import (
ClangFunctionDifferenceBear)
from bears.c_languages.codeclone_detection.ClangCloneDetectionBear import (
ClangCloneDetectionBear)
from coalib.settings.Section import Section
from coalib.settings.Setting import Setting
@generate_skip_decorator(ClangCloneDetectionBear)
class ClangCloneDetectionBearTest(unittest.TestCase):
def setUp(self):
self.base_test_path = os.path.abspath(os.path.join(
os.path.dirname(__file__),
"clone_detection_samples"))
self.section = Section("default")
self.section.append(Setting("files", "", origin=self.base_test_path))
self.section.append(Setting("max_clone_difference", "0.308"))
self.clone_files = [os.listdir(os.path.join(self.base_test_path,
"clones"))]
def test_dependencies(self):
self.assertIn(ClangFunctionDifferenceBear,
ClangCloneDetectionBear.get_dependencies())
def test_configuration(self):
self.section.append(Setting("average_calculation", "true"))
self.section.append(Setting("poly_postprocessing", "false"))
self.section.append(Setting("exp_postprocessing", "true"))
self.clone_files = [
os.path.join(self.base_test_path, "clones", "s4c.c")]
# Ignore the results, it may be possible that it still passes :)
self.check_clone_detection_bear(self.clone_files,
lambda results, msg: True)
def test_non_clones(self):
self.non_clone_files = [
os.path.join(self.base_test_path, "non_clones", elem)
for elem in os.listdir(os.path.join(self.base_test_path,
"non_clones"))]
self.check_clone_detection_bear(self.non_clone_files,
lambda results, msg:
self.assertEqual(results, [], msg))
def test_clones(self):
self.clone_files = [
os.path.join(self.base_test_path, "clones", elem)
for elem in os.listdir(os.path.join(self.base_test_path,
"clones"))]
self.check_clone_detection_bear(self.clone_files,
lambda results, msg:
self.assertNotEqual(results, [], msg))
def check_clone_detection_bear(self, files, result_check_function):
"""
Checks the results of the CloneDetectionBear with the given function.
:param files: The files to check. Each will be checked
on its own.
:param result_check_function: A function yielding an exception if the
results are invalid.
"""
for file in files:
difference_results = ClangFunctionDifferenceBear(
{file: ""},
self.section,
Queue()).run_bear_from_section([], {})
uut = ClangCloneDetectionBear(
{file: ""},
self.section,
Queue())
arg_dict = {"dependency_results":
{ClangFunctionDifferenceBear.__name__:
list(difference_results)}}
result_check_function(
list(uut.run_bear_from_section([], arg_dict)),
"while analyzing "+file)
| agpl-3.0 | 5,606,790,069,334,038,000 | 41.310345 | 79 | 0.564521 | false |
ecdavis/pants | pants/test/core/test_echo_to_all.py | 1 | 2960 | ###############################################################################
#
# Copyright 2012 Pants Developers (see AUTHORS.txt)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
import socket
import unittest
import pants
from pants.test._pants_util import *
class EchoToAll(pants.Stream):
def on_read(self, data):
for channel in self.server.channels.itervalues():
channel.write(data)
class TestEchoToAll(PantsTestCase):
def setUp(self):
self.server = pants.Server(ConnectionClass=EchoToAll).listen(('127.0.0.1', 4040))
PantsTestCase.setUp(self)
def test_echo_to_all_with_one_client(self):
sock = socket.socket()
sock.settimeout(1.0)
sock.connect(('127.0.0.1', 4040))
request = repr(sock)
sock.send(request)
response = sock.recv(1024)
self.assertEqual(response, request)
sock.close()
def test_echo_to_all_with_two_sequential_clients(self):
sock1 = socket.socket()
sock1.settimeout(1.0)
sock1.connect(('127.0.0.1', 4040))
request1 = repr(sock1)
sock1.send(request1)
response1 = sock1.recv(1024)
self.assertEqual(response1, request1)
sock1.close()
sock2 = socket.socket()
sock2.settimeout(1.0)
sock2.connect(('127.0.0.1', 4040))
request2 = repr(sock2)
sock2.send(request2)
response2 = sock2.recv(1024)
self.assertEqual(response2, request2)
sock2.close()
def test_echo_to_all_with_two_concurrent_clients(self):
sock1 = socket.socket()
sock1.settimeout(1.0)
sock2 = socket.socket()
sock2.settimeout(1.0)
sock1.connect(('127.0.0.1', 4040))
sock2.connect(('127.0.0.1', 4040))
request1 = repr(sock1)
sock1.send(request1)
response1_1 = sock1.recv(1024)
response1_2 = sock2.recv(1024)
request2 = repr(sock2)
sock2.send(request2)
response2_1 = sock1.recv(1024)
response2_2 = sock2.recv(1024)
self.assertEqual(response1_1, request1)
self.assertEqual(response1_2, request1)
self.assertEqual(response2_1, request2)
self.assertEqual(response2_2, request2)
sock1.close()
sock2.close()
def tearDown(self):
PantsTestCase.tearDown(self)
self.server.close()
| apache-2.0 | 6,850,289,465,869,795,000 | 32.258427 | 89 | 0.606757 | false |
UCHIC/WaterMonitor | Previous_Designs/Residential_Prototype/Firmware/first_version/filenamer.py | 1 | 1229 | import re
from os.path import exists, join, isdir
class Filenamer(object):
def __init__(self, filename_format='%08d.dat'):
self._format = filename_format
self._directories_dict = {} # starts out empty
self._next_number = -1 # self.next() will change this to zero
def next_filename(self):
self._next_number += 1
if re.search('%[0-9]*d', self._format):
while any([
exists(join(d, self._format % self._next_number))
for d in self.directories
]):
self._next_number += 1
return self._format % self._next_number
else:
return self._format
def add_directory(self, directory):
self._directories_dict[directory] = None
directories = property(lambda self: self._directories_dict.keys())
format = property(lambda self: self._format)
# make this an iterator
def __iter__(self):
return self
def next(self):
return self.next_filename()
if __name__ == '__main__':
f = Filenamer()
f.add_directory('.')
f.add_directory('..')
for i, filename in enumerate(f):
if i == 9: break
print filename
| bsd-3-clause | -4,510,054,833,404,321,000 | 27.581395 | 70 | 0.558177 | false |
mffiedler/svt | applications_scalability/websockets_perf/test_scripts/v_user_builds.py | 3 | 1554 | from websocket import create_connection
from ConfigParser import SafeConfigParser
import ssl
import gevent
import time
import json
class Transaction(object):
def __init__(self, varfile='ose_vars.cfg'):
'''
Gets instantiated once only
'''
parser = SafeConfigParser()
parser.read(varfile)
self.ose_server = parser.get('wss', 'ose_server')
self.ose_project = parser.get('wss', 'ose_project')
self.ose_resver = parser.get('wss', 'ose_resver')
self.ose_token = parser.get('wss', 'ose_token')
def run(self):
'''
Each thread runs this method independently
'''
url = 'wss://{}/oapi/v1/namespaces/{}/builds?watch={}&resourceVersion={}&access_token={}'.format(self.ose_server, self.ose_project, 'true', self.ose_resver, self.ose_token)
start = time.time()
# Ignore self signed certificates
ws = create_connection(url, sslopt={"cert_reqs": ssl.CERT_NONE})
self.ws = ws
def _receive():
while True:
res = ws.recv()
start_at = time.time()
data = json.loads(res)
end_at = time.time()
response_time = int((end_at - start_at))
gevent.spawn(_receive)
def on_quit(self):
self.ws.close()
if __name__ == '__main__':
trans = Transaction()
trans.run()
| apache-2.0 | 5,527,815,583,500,081,000 | 28.320755 | 180 | 0.515444 | false |
sridevikoushik31/nova | nova/tests/compute/test_claims.py | 5 | 4761 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for resource tracker claims."""
import uuid
from nova.compute import claims
from nova import test
class DummyTracker(object):
icalled = False
rcalled = False
def abort_instance_claim(self, *args, **kwargs):
self.icalled = True
def drop_resize_claim(self, *args, **kwargs):
self.rcalled = True
class ClaimTestCase(test.TestCase):
def setUp(self):
super(ClaimTestCase, self).setUp()
self.resources = self._fake_resources()
self.tracker = DummyTracker()
def _claim(self, **kwargs):
instance = self._fake_instance(**kwargs)
return claims.Claim(instance, self.tracker)
def _fake_instance(self, **kwargs):
instance = {
'uuid': str(uuid.uuid1()),
'memory_mb': 1024,
'root_gb': 10,
'ephemeral_gb': 5,
'vcpus': 1
}
instance.update(**kwargs)
return instance
def _fake_instance_type(self, **kwargs):
instance_type = {
'id': 1,
'name': 'fakeitype',
'memory_mb': 1,
'vcpus': 1,
'root_gb': 1,
'ephemeral_gb': 2
}
instance_type.update(**kwargs)
return instance_type
def _fake_resources(self, values=None):
resources = {
'memory_mb': 2048,
'memory_mb_used': 0,
'free_ram_mb': 2048,
'local_gb': 20,
'local_gb_used': 0,
'free_disk_gb': 20,
'vcpus': 2,
'vcpus_used': 0
}
if values:
resources.update(values)
return resources
def test_cpu_unlimited(self):
claim = self._claim(vcpus=100000)
self.assertTrue(claim.test(self.resources))
def test_memory_unlimited(self):
claim = self._claim(memory_mb=99999999)
self.assertTrue(claim.test(self.resources))
def test_disk_unlimited_root(self):
claim = self._claim(root_gb=999999)
self.assertTrue(claim.test(self.resources))
def test_disk_unlimited_ephemeral(self):
claim = self._claim(ephemeral_gb=999999)
self.assertTrue(claim.test(self.resources))
def test_cpu_oversubscription(self):
claim = self._claim(vcpus=8)
limits = {'vcpu': 16}
self.assertTrue(claim.test(self.resources, limits))
def test_cpu_insufficient(self):
claim = self._claim(vcpus=17)
limits = {'vcpu': 16}
self.assertFalse(claim.test(self.resources, limits))
def test_memory_oversubscription(self):
claim = self._claim(memory_mb=4096)
limits = {'memory_mb': 8192}
self.assertTrue(claim.test(self.resources, limits))
def test_memory_insufficient(self):
claim = self._claim(memory_mb=16384)
limits = {'memory_mb': 8192}
self.assertFalse(claim.test(self.resources, limits))
def test_disk_oversubscription(self):
claim = self._claim(root_gb=10, ephemeral_gb=40)
limits = {'disk_gb': 60}
self.assertTrue(claim.test(self.resources, limits))
def test_disk_insufficient(self):
claim = self._claim(root_gb=10, ephemeral_gb=40)
limits = {'disk_gb': 45}
self.assertFalse(claim.test(self.resources, limits))
def test_abort(self):
claim = self._abort()
self.assertTrue(claim.tracker.icalled)
def _abort(self):
claim = None
try:
with self._claim(memory_mb=4096) as claim:
raise test.TestingException("abort")
except test.TestingException:
pass
return claim
class ResizeClaimTestCase(ClaimTestCase):
def setUp(self):
super(ResizeClaimTestCase, self).setUp()
self.instance = self._fake_instance()
def _claim(self, **kwargs):
instance_type = self._fake_instance_type(**kwargs)
return claims.ResizeClaim(self.instance, instance_type, self.tracker)
def test_abort(self):
claim = self._abort()
self.assertTrue(claim.tracker.rcalled)
| apache-2.0 | -4,474,864,012,096,952,300 | 28.943396 | 78 | 0.605755 | false |
goliveirab/odoo | openerp/fields.py | 60 | 74988 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-2014 OpenERP (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" High-level objects for fields. """
from collections import OrderedDict
from datetime import date, datetime
from functools import partial
from operator import attrgetter
from types import NoneType
import logging
import pytz
import xmlrpclib
from openerp.tools import float_round, frozendict, html_sanitize, ustr, OrderedSet
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT as DATE_FORMAT
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT as DATETIME_FORMAT
DATE_LENGTH = len(date.today().strftime(DATE_FORMAT))
DATETIME_LENGTH = len(datetime.now().strftime(DATETIME_FORMAT))
EMPTY_DICT = frozendict()
_logger = logging.getLogger(__name__)
class SpecialValue(object):
""" Encapsulates a value in the cache in place of a normal value. """
def __init__(self, value):
self.value = value
def get(self):
return self.value
class FailedValue(SpecialValue):
""" Special value that encapsulates an exception instead of a value. """
def __init__(self, exception):
self.exception = exception
def get(self):
raise self.exception
def _check_value(value):
""" Return ``value``, or call its getter if ``value`` is a :class:`SpecialValue`. """
return value.get() if isinstance(value, SpecialValue) else value
def resolve_all_mro(cls, name, reverse=False):
""" Return the (successively overridden) values of attribute ``name`` in ``cls``
in mro order, or inverse mro order if ``reverse`` is true.
"""
klasses = reversed(cls.__mro__) if reverse else cls.__mro__
for klass in klasses:
if name in klass.__dict__:
yield klass.__dict__[name]
class MetaField(type):
""" Metaclass for field classes. """
by_type = {}
def __new__(meta, name, bases, attrs):
""" Combine the ``_slots`` dict from parent classes, and determine
``__slots__`` for them on the new class.
"""
base_slots = {}
for base in reversed(bases):
base_slots.update(getattr(base, '_slots', ()))
slots = dict(base_slots)
slots.update(attrs.get('_slots', ()))
attrs['__slots__'] = set(slots) - set(base_slots)
attrs['_slots'] = slots
return type.__new__(meta, name, bases, attrs)
def __init__(cls, name, bases, attrs):
super(MetaField, cls).__init__(name, bases, attrs)
if cls.type and cls.type not in MetaField.by_type:
MetaField.by_type[cls.type] = cls
# compute class attributes to avoid calling dir() on fields
cls.column_attrs = []
cls.related_attrs = []
cls.description_attrs = []
for attr in dir(cls):
if attr.startswith('_column_'):
cls.column_attrs.append((attr[8:], attr))
elif attr.startswith('_related_'):
cls.related_attrs.append((attr[9:], attr))
elif attr.startswith('_description_'):
cls.description_attrs.append((attr[13:], attr))
class Field(object):
""" The field descriptor contains the field definition, and manages accesses
and assignments of the corresponding field on records. The following
attributes may be provided when instanciating a field:
:param string: the label of the field seen by users (string); if not
set, the ORM takes the field name in the class (capitalized).
:param help: the tooltip of the field seen by users (string)
:param readonly: whether the field is readonly (boolean, by default ``False``)
:param required: whether the value of the field is required (boolean, by
default ``False``)
:param index: whether the field is indexed in database (boolean, by
default ``False``)
:param default: the default value for the field; this is either a static
value, or a function taking a recordset and returning a value
:param states: a dictionary mapping state values to lists of UI attribute-value
pairs; possible attributes are: 'readonly', 'required', 'invisible'.
Note: Any state-based condition requires the ``state`` field value to be
available on the client-side UI. This is typically done by including it in
the relevant views, possibly made invisible if not relevant for the
end-user.
:param groups: comma-separated list of group xml ids (string); this
restricts the field access to the users of the given groups only
:param bool copy: whether the field value should be copied when the record
is duplicated (default: ``True`` for normal fields, ``False`` for
``one2many`` and computed fields, including property fields and
related fields)
:param string oldname: the previous name of this field, so that ORM can rename
it automatically at migration
.. _field-computed:
.. rubric:: Computed fields
One can define a field whose value is computed instead of simply being
read from the database. The attributes that are specific to computed
fields are given below. To define such a field, simply provide a value
for the attribute ``compute``.
:param compute: name of a method that computes the field
:param inverse: name of a method that inverses the field (optional)
:param search: name of a method that implement search on the field (optional)
:param store: whether the field is stored in database (boolean, by
default ``False`` on computed fields)
:param compute_sudo: whether the field should be recomputed as superuser
to bypass access rights (boolean, by default ``False``)
The methods given for ``compute``, ``inverse`` and ``search`` are model
methods. Their signature is shown in the following example::
upper = fields.Char(compute='_compute_upper',
inverse='_inverse_upper',
search='_search_upper')
@api.depends('name')
def _compute_upper(self):
for rec in self:
rec.upper = rec.name.upper() if rec.name else False
def _inverse_upper(self):
for rec in self:
rec.name = rec.upper.lower() if rec.upper else False
def _search_upper(self, operator, value):
if operator == 'like':
operator = 'ilike'
return [('name', operator, value)]
The compute method has to assign the field on all records of the invoked
recordset. The decorator :meth:`openerp.api.depends` must be applied on
the compute method to specify the field dependencies; those dependencies
are used to determine when to recompute the field; recomputation is
automatic and guarantees cache/database consistency. Note that the same
method can be used for several fields, you simply have to assign all the
given fields in the method; the method will be invoked once for all
those fields.
By default, a computed field is not stored to the database, and is
computed on-the-fly. Adding the attribute ``store=True`` will store the
field's values in the database. The advantage of a stored field is that
searching on that field is done by the database itself. The disadvantage
is that it requires database updates when the field must be recomputed.
The inverse method, as its name says, does the inverse of the compute
method: the invoked records have a value for the field, and you must
apply the necessary changes on the field dependencies such that the
computation gives the expected value. Note that a computed field without
an inverse method is readonly by default.
The search method is invoked when processing domains before doing an
actual search on the model. It must return a domain equivalent to the
condition: ``field operator value``.
.. _field-related:
.. rubric:: Related fields
The value of a related field is given by following a sequence of
relational fields and reading a field on the reached model. The complete
sequence of fields to traverse is specified by the attribute
:param related: sequence of field names
Some field attributes are automatically copied from the source field if
they are not redefined: ``string``, ``help``, ``readonly``, ``required`` (only
if all fields in the sequence are required), ``groups``, ``digits``, ``size``,
``translate``, ``sanitize``, ``selection``, ``comodel_name``, ``domain``,
``context``. All semantic-free attributes are copied from the source
field.
By default, the values of related fields are not stored to the database.
Add the attribute ``store=True`` to make it stored, just like computed
fields. Related fields are automatically recomputed when their
dependencies are modified.
.. _field-company-dependent:
.. rubric:: Company-dependent fields
Formerly known as 'property' fields, the value of those fields depends
on the company. In other words, users that belong to different companies
may see different values for the field on a given record.
:param company_dependent: whether the field is company-dependent (boolean)
.. _field-incremental-definition:
.. rubric:: Incremental definition
A field is defined as class attribute on a model class. If the model
is extended (see :class:`~openerp.models.Model`), one can also extend
the field definition by redefining a field with the same name and same
type on the subclass. In that case, the attributes of the field are
taken from the parent class and overridden by the ones given in
subclasses.
For instance, the second class below only adds a tooltip on the field
``state``::
class First(models.Model):
_name = 'foo'
state = fields.Selection([...], required=True)
class Second(models.Model):
_inherit = 'foo'
state = fields.Selection(help="Blah blah blah")
"""
__metaclass__ = MetaField
type = None # type of the field (string)
relational = False # whether the field is a relational one
_slots = {
'_attrs': EMPTY_DICT, # dictionary of field attributes; it contains:
# - all attributes after __init__()
# - free attributes only after set_class_name()
'automatic': False, # whether the field is automatically created ("magic" field)
'inherited': False, # whether the field is inherited (_inherits)
'column': None, # the column corresponding to the field
'setup_done': False, # whether the field has been set up
'name': None, # name of the field
'model_name': None, # name of the model of this field
'comodel_name': None, # name of the model of values (if relational)
'store': True, # whether the field is stored in database
'index': False, # whether the field is indexed in database
'manual': False, # whether the field is a custom field
'copy': True, # whether the field is copied over by BaseModel.copy()
'depends': (), # collection of field dependencies
'recursive': False, # whether self depends on itself
'compute': None, # compute(recs) computes field on recs
'compute_sudo': False, # whether field should be recomputed as admin
'inverse': None, # inverse(recs) inverses field on recs
'search': None, # search(recs, operator, value) searches on self
'related': None, # sequence of field names, for related fields
'related_sudo': True, # whether related fields should be read as admin
'company_dependent': False, # whether ``self`` is company-dependent (property field)
'default': None, # default(recs) returns the default value
'string': None, # field label
'help': None, # field tooltip
'readonly': False, # whether the field is readonly
'required': False, # whether the field is required
'states': None, # set readonly and required depending on state
'groups': None, # csv list of group xml ids
'change_default': False, # whether the field may trigger a "user-onchange"
'deprecated': None, # whether the field is deprecated
'inverse_fields': (), # collection of inverse fields (objects)
'computed_fields': (), # fields computed with the same method as self
'related_field': None, # corresponding related field
'_triggers': (), # invalidation and recomputation triggers
}
def __init__(self, string=None, **kwargs):
kwargs['string'] = string
attrs = {key: val for key, val in kwargs.iteritems() if val is not None}
self._attrs = attrs or EMPTY_DICT
def __getattr__(self, name):
""" Access non-slot field attribute. """
try:
return self._attrs[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
""" Set slot or non-slot field attribute. """
try:
object.__setattr__(self, name, value)
except AttributeError:
if self._attrs:
self._attrs[name] = value
else:
self._attrs = {name: value} # replace EMPTY_DICT
def __delattr__(self, name):
""" Remove non-slot field attribute. """
try:
del self._attrs[name]
except KeyError:
raise AttributeError(name)
def new(self, **kwargs):
""" Return a field of the same type as ``self``, with its own parameters. """
return type(self)(**kwargs)
def set_class_name(self, cls, name):
""" Assign the model class and field name of ``self``. """
self_attrs = self._attrs
for attr, value in self._slots.iteritems():
setattr(self, attr, value)
self.model_name = cls._name
self.name = name
# determine all inherited field attributes
attrs = {}
for field in resolve_all_mro(cls, name, reverse=True):
if isinstance(field, type(self)):
attrs.update(field._attrs)
else:
attrs.clear()
attrs.update(self_attrs) # necessary in case self is not in cls
# initialize ``self`` with ``attrs``
if attrs.get('compute'):
# by default, computed fields are not stored, not copied and readonly
attrs['store'] = attrs.get('store', False)
attrs['copy'] = attrs.get('copy', False)
attrs['readonly'] = attrs.get('readonly', not attrs.get('inverse'))
if attrs.get('related'):
# by default, related fields are not stored and not copied
attrs['store'] = attrs.get('store', False)
attrs['copy'] = attrs.get('copy', False)
# fix for function fields overridden by regular columns
if not isinstance(attrs.get('column'), (NoneType, fields.function)):
attrs.pop('store', None)
for attr, value in attrs.iteritems():
setattr(self, attr, value)
if not self.string and not self.related:
# related fields get their string from their parent field
self.string = name.replace('_', ' ').capitalize()
# determine self.default and cls._defaults in a consistent way
self._determine_default(cls, name)
def _determine_default(self, cls, name):
""" Retrieve the default value for ``self`` in the hierarchy of ``cls``, and
determine ``self.default`` and ``cls._defaults`` accordingly.
"""
self.default = None
# traverse the class hierarchy upwards, and take the first field
# definition with a default or _defaults for self
for klass in cls.__mro__:
if name in klass.__dict__:
field = klass.__dict__[name]
if not isinstance(field, type(self)):
# klass contains another value overridden by self
return
if 'default' in field._attrs:
# take the default in field, and adapt it for cls._defaults
value = field._attrs['default']
if callable(value):
from openerp import api
self.default = value
cls._defaults[name] = api.model(
lambda recs: self.convert_to_write(value(recs))
)
else:
self.default = lambda recs: value
cls._defaults[name] = value
return
defaults = klass.__dict__.get('_defaults') or {}
if name in defaults:
# take the value from _defaults, and adapt it for self.default
value = defaults[name]
if callable(value):
func = lambda recs: value(recs._model, recs._cr, recs._uid, recs._context)
else:
func = lambda recs: value
self.default = lambda recs: self.convert_to_cache(
func(recs), recs, validate=False,
)
cls._defaults[name] = value
return
def __str__(self):
return "%s.%s" % (self.model_name, self.name)
def __repr__(self):
return "%s.%s" % (self.model_name, self.name)
############################################################################
#
# Field setup
#
def setup(self, env):
""" Make sure that ``self`` is set up, except for recomputation triggers. """
if not self.setup_done:
if self.related:
self._setup_related(env)
else:
self._setup_regular(env)
self.setup_done = True
#
# Setup of non-related fields
#
def _setup_regular(self, env):
""" Setup the attributes of a non-related field. """
recs = env[self.model_name]
def make_depends(deps):
return tuple(deps(recs) if callable(deps) else deps)
# convert compute into a callable and determine depends
if isinstance(self.compute, basestring):
# if the compute method has been overridden, concatenate all their _depends
self.depends = ()
for method in resolve_all_mro(type(recs), self.compute, reverse=True):
self.depends += make_depends(getattr(method, '_depends', ()))
self.compute = getattr(type(recs), self.compute)
else:
self.depends = make_depends(getattr(self.compute, '_depends', ()))
# convert inverse and search into callables
if isinstance(self.inverse, basestring):
self.inverse = getattr(type(recs), self.inverse)
if isinstance(self.search, basestring):
self.search = getattr(type(recs), self.search)
#
# Setup of related fields
#
def _setup_related(self, env):
""" Setup the attributes of a related field. """
# fix the type of self.related if necessary
if isinstance(self.related, basestring):
self.related = tuple(self.related.split('.'))
# determine the chain of fields, and make sure they are all set up
recs = env[self.model_name]
fields = []
for name in self.related:
field = recs._fields[name]
field.setup(env)
recs = recs[name]
fields.append(field)
self.related_field = field
# check type consistency
if self.type != field.type:
raise Warning("Type of related field %s is inconsistent with %s" % (self, field))
# determine dependencies, compute, inverse, and search
self.depends = ('.'.join(self.related),)
self.compute = self._compute_related
if not (self.readonly or field.readonly):
self.inverse = self._inverse_related
if field._description_searchable:
# allow searching on self only if the related field is searchable
self.search = self._search_related
# copy attributes from field to self (string, help, etc.)
for attr, prop in self.related_attrs:
if not getattr(self, attr):
setattr(self, attr, getattr(field, prop))
for attr, value in field._attrs.iteritems():
if attr not in self._attrs:
setattr(self, attr, value)
# special case for states: copy it only for inherited fields
if not self.states and self.inherited:
self.states = field.states
# special case for required: check if all fields are required
if not self.store and not self.required:
self.required = all(field.required for field in fields)
def _compute_related(self, records):
""" Compute the related field ``self`` on ``records``. """
# when related_sudo, bypass access rights checks when reading values
others = records.sudo() if self.related_sudo else records
for record, other in zip(records, others):
if not record.id:
# draft record, do not switch to another environment
other = record
# traverse the intermediate fields; follow the first record at each step
for name in self.related[:-1]:
other = other[name][:1]
record[self.name] = other[self.related[-1]]
def _inverse_related(self, records):
""" Inverse the related field ``self`` on ``records``. """
for record in records:
other = record
# traverse the intermediate fields, and keep at most one record
for name in self.related[:-1]:
other = other[name][:1]
if other:
other[self.related[-1]] = record[self.name]
def _search_related(self, records, operator, value):
""" Determine the domain to search on field ``self``. """
return [('.'.join(self.related), operator, value)]
# properties used by _setup_related() to copy values from related field
_related_comodel_name = property(attrgetter('comodel_name'))
_related_string = property(attrgetter('string'))
_related_help = property(attrgetter('help'))
_related_readonly = property(attrgetter('readonly'))
_related_groups = property(attrgetter('groups'))
@property
def base_field(self):
""" Return the base field of an inherited field, or ``self``. """
return self.related_field.base_field if self.inherited else self
#
# Setup of field triggers
#
# The triggers is a collection of pairs (field, path) of computed fields
# that depend on ``self``. When ``self`` is modified, it invalidates the cache
# of each ``field``, and registers the records to recompute based on ``path``.
# See method ``modified`` below for details.
#
def add_trigger(self, trigger):
""" Add a recomputation trigger on ``self``. """
if trigger not in self._triggers:
self._triggers += (trigger,)
def setup_triggers(self, env):
""" Add the necessary triggers to invalidate/recompute ``self``. """
model = env[self.model_name]
for path in self.depends:
self._setup_dependency([], model, path.split('.'))
def _setup_dependency(self, path0, model, path1):
""" Make ``self`` depend on ``model``; `path0 + path1` is a dependency of
``self``, and ``path0`` is the sequence of field names from ``self.model``
to ``model``.
"""
env = model.env
head, tail = path1[0], path1[1:]
if head == '*':
# special case: add triggers on all fields of model (except self)
fields = set(model._fields.itervalues()) - set([self])
else:
fields = [model._fields[head]]
for field in fields:
if field == self:
_logger.debug("Field %s is recursively defined", self)
self.recursive = True
continue
#_logger.debug("Add trigger on %s to recompute %s", field, self)
field.add_trigger((self, '.'.join(path0 or ['id'])))
# add trigger on inverse fields, too
for invf in field.inverse_fields:
#_logger.debug("Add trigger on %s to recompute %s", invf, self)
invf.add_trigger((self, '.'.join(path0 + [head])))
# recursively traverse the dependency
if tail:
comodel = env[field.comodel_name]
self._setup_dependency(path0 + [head], comodel, tail)
@property
def dependents(self):
""" Return the computed fields that depend on ``self``. """
return (field for field, path in self._triggers)
############################################################################
#
# Field description
#
def get_description(self, env):
""" Return a dictionary that describes the field ``self``. """
desc = {'type': self.type}
for attr, prop in self.description_attrs:
value = getattr(self, prop)
if callable(value):
value = value(env)
if value is not None:
desc[attr] = value
return desc
# properties used by get_description()
_description_store = property(attrgetter('store'))
_description_manual = property(attrgetter('manual'))
_description_depends = property(attrgetter('depends'))
_description_related = property(attrgetter('related'))
_description_company_dependent = property(attrgetter('company_dependent'))
_description_readonly = property(attrgetter('readonly'))
_description_required = property(attrgetter('required'))
_description_states = property(attrgetter('states'))
_description_groups = property(attrgetter('groups'))
_description_change_default = property(attrgetter('change_default'))
_description_deprecated = property(attrgetter('deprecated'))
@property
def _description_searchable(self):
return bool(self.store or self.search or (self.column and self.column._fnct_search))
@property
def _description_sortable(self):
return self.store or (self.inherited and self.related_field._description_sortable)
def _description_string(self, env):
if self.string and env.lang:
field = self.base_field
name = "%s,%s" % (field.model_name, field.name)
trans = env['ir.translation']._get_source(name, 'field', env.lang)
return trans or self.string
return self.string
def _description_help(self, env):
if self.help and env.lang:
name = "%s,%s" % (self.model_name, self.name)
trans = env['ir.translation']._get_source(name, 'help', env.lang)
return trans or self.help
return self.help
############################################################################
#
# Conversion to column instance
#
def to_column(self):
""" Return a column object corresponding to ``self``, or ``None``. """
if not self.store and self.compute:
# non-stored computed fields do not have a corresponding column
self.column = None
return None
# determine column parameters
#_logger.debug("Create fields._column for Field %s", self)
args = {}
for attr, prop in self.column_attrs:
args[attr] = getattr(self, prop)
for attr, value in self._attrs.iteritems():
args[attr] = value
if self.company_dependent:
# company-dependent fields are mapped to former property fields
args['type'] = self.type
args['relation'] = self.comodel_name
self.column = fields.property(**args)
elif self.column:
# let the column provide a valid column for the given parameters
self.column = self.column.new(_computed_field=bool(self.compute), **args)
else:
# create a fresh new column of the right type
self.column = getattr(fields, self.type)(**args)
return self.column
# properties used by to_column() to create a column instance
_column_copy = property(attrgetter('copy'))
_column_select = property(attrgetter('index'))
_column_manual = property(attrgetter('manual'))
_column_string = property(attrgetter('string'))
_column_help = property(attrgetter('help'))
_column_readonly = property(attrgetter('readonly'))
_column_required = property(attrgetter('required'))
_column_states = property(attrgetter('states'))
_column_groups = property(attrgetter('groups'))
_column_change_default = property(attrgetter('change_default'))
_column_deprecated = property(attrgetter('deprecated'))
############################################################################
#
# Conversion of values
#
def null(self, env):
""" return the null value for this field in the given environment """
return False
def convert_to_cache(self, value, record, validate=True):
""" convert ``value`` to the cache level in ``env``; ``value`` may come from
an assignment, or have the format of methods :meth:`BaseModel.read`
or :meth:`BaseModel.write`
:param record: the target record for the assignment, or an empty recordset
:param bool validate: when True, field-specific validation of
``value`` will be performed
"""
return value
def convert_to_read(self, value, use_name_get=True):
""" convert ``value`` from the cache to a value as returned by method
:meth:`BaseModel.read`
:param bool use_name_get: when True, value's diplay name will
be computed using :meth:`BaseModel.name_get`, if relevant
for the field
"""
return False if value is None else value
def convert_to_write(self, value, target=None, fnames=None):
""" convert ``value`` from the cache to a valid value for method
:meth:`BaseModel.write`.
:param target: optional, the record to be modified with this value
:param fnames: for relational fields only, an optional collection of
field names to convert
"""
return self.convert_to_read(value)
def convert_to_onchange(self, value):
""" convert ``value`` from the cache to a valid value for an onchange
method v7.
"""
return self.convert_to_write(value)
def convert_to_export(self, value, env):
""" convert ``value`` from the cache to a valid value for export. The
parameter ``env`` is given for managing translations.
"""
if not value:
return ''
return value if env.context.get('export_raw_data') else ustr(value)
def convert_to_display_name(self, value, record=None):
""" convert ``value`` from the cache to a suitable display name. """
return ustr(value)
############################################################################
#
# Descriptor methods
#
def __get__(self, record, owner):
""" return the value of field ``self`` on ``record`` """
if record is None:
return self # the field is accessed through the owner class
if not record:
# null record -> return the null value for this field
return self.null(record.env)
# only a single record may be accessed
record.ensure_one()
try:
return record._cache[self]
except KeyError:
pass
# cache miss, retrieve value
if record.id:
# normal record -> read or compute value for this field
self.determine_value(record)
else:
# draft record -> compute the value or let it be null
self.determine_draft_value(record)
# the result should be in cache now
return record._cache[self]
def __set__(self, record, value):
""" set the value of field ``self`` on ``record`` """
env = record.env
# only a single record may be updated
record.ensure_one()
# adapt value to the cache level
value = self.convert_to_cache(value, record)
if env.in_draft or not record.id:
# determine dependent fields
spec = self.modified_draft(record)
# set value in cache, inverse field, and mark record as dirty
record._cache[self] = value
if env.in_onchange:
for invf in self.inverse_fields:
invf._update(value, record)
record._set_dirty(self.name)
# determine more dependent fields, and invalidate them
if self.relational:
spec += self.modified_draft(record)
env.invalidate(spec)
else:
# simply write to the database, and update cache
record.write({self.name: self.convert_to_write(value)})
record._cache[self] = value
############################################################################
#
# Computation of field values
#
def _compute_value(self, records):
""" Invoke the compute method on ``records``. """
# initialize the fields to their corresponding null value in cache
for field in self.computed_fields:
records._cache[field] = field.null(records.env)
records.env.computed[field].update(records._ids)
self.compute(records)
for field in self.computed_fields:
records.env.computed[field].difference_update(records._ids)
def compute_value(self, records):
""" Invoke the compute method on ``records``; the results are in cache. """
with records.env.do_in_draft():
try:
self._compute_value(records)
except (AccessError, MissingError):
# some record is forbidden or missing, retry record by record
for record in records:
try:
self._compute_value(record)
except Exception as exc:
record._cache[self.name] = FailedValue(exc)
def determine_value(self, record):
""" Determine the value of ``self`` for ``record``. """
env = record.env
if self.column and not (self.depends and env.in_draft):
# this is a stored field or an old-style function field
if self.depends:
# this is a stored computed field, check for recomputation
recs = record._recompute_check(self)
if recs:
# recompute the value (only in cache)
self.compute_value(recs)
# HACK: if result is in the wrong cache, copy values
if recs.env != env:
for source, target in zip(recs, recs.with_env(env)):
try:
values = target._convert_to_cache({
f.name: source[f.name] for f in self.computed_fields
}, validate=False)
except MissingError as e:
values = FailedValue(e)
target._cache.update(values)
# the result is saved to database by BaseModel.recompute()
return
# read the field from database
record._prefetch_field(self)
elif self.compute:
# this is either a non-stored computed field, or a stored computed
# field in draft mode
if self.recursive:
self.compute_value(record)
else:
recs = record._in_cache_without(self)
self.compute_value(recs)
else:
# this is a non-stored non-computed field
record._cache[self] = self.null(env)
def determine_draft_value(self, record):
""" Determine the value of ``self`` for the given draft ``record``. """
if self.compute:
self._compute_value(record)
else:
record._cache[self] = SpecialValue(self.null(record.env))
def determine_inverse(self, records):
""" Given the value of ``self`` on ``records``, inverse the computation. """
if self.inverse:
self.inverse(records)
def determine_domain(self, records, operator, value):
""" Return a domain representing a condition on ``self``. """
if self.search:
return self.search(records, operator, value)
else:
return [(self.name, operator, value)]
############################################################################
#
# Notification when fields are modified
#
def modified(self, records):
""" Notify that field ``self`` has been modified on ``records``: prepare the
fields/records to recompute, and return a spec indicating what to
invalidate.
"""
# invalidate the fields that depend on self, and prepare recomputation
spec = [(self, records._ids)]
for field, path in self._triggers:
if path and field.store:
# don't move this line to function top, see log
env = records.env(user=SUPERUSER_ID, context={'active_test': False})
target = env[field.model_name].search([(path, 'in', records.ids)])
if target:
spec.append((field, target._ids))
# recompute field on target in the environment of records,
# and as user admin if required
if field.compute_sudo:
target = target.with_env(records.env(user=SUPERUSER_ID))
else:
target = target.with_env(records.env)
target._recompute_todo(field)
else:
spec.append((field, None))
return spec
def modified_draft(self, records):
""" Same as :meth:`modified`, but in draft mode. """
env = records.env
# invalidate the fields on the records in cache that depend on
# ``records``, except fields currently being computed
spec = []
for field, path in self._triggers:
target = env[field.model_name]
computed = target.browse(env.computed[field])
if path == 'id':
target = records - computed
elif path:
target = (target.browse(env.cache[field]) - computed).filtered(
lambda rec: rec._mapped_cache(path) & records
)
else:
target = target.browse(env.cache[field]) - computed
if target:
spec.append((field, target._ids))
return spec
class Boolean(Field):
type = 'boolean'
def convert_to_cache(self, value, record, validate=True):
return bool(value)
def convert_to_export(self, value, env):
if env.context.get('export_raw_data'):
return value
return ustr(value)
class Integer(Field):
type = 'integer'
_slots = {
'group_operator': None, # operator for aggregating values
}
_related_group_operator = property(attrgetter('group_operator'))
_column_group_operator = property(attrgetter('group_operator'))
def convert_to_cache(self, value, record, validate=True):
if isinstance(value, dict):
# special case, when an integer field is used as inverse for a one2many
return value.get('id', False)
return int(value or 0)
def convert_to_read(self, value, use_name_get=True):
# Integer values greater than 2^31-1 are not supported in pure XMLRPC,
# so we have to pass them as floats :-(
if value and value > xmlrpclib.MAXINT:
return float(value)
return value
def _update(self, records, value):
# special case, when an integer field is used as inverse for a one2many
records._cache[self] = value.id or 0
def convert_to_export(self, value, env):
if value or value == 0:
return value if env.context.get('export_raw_data') else ustr(value)
return ''
class Float(Field):
""" The precision digits are given by the attribute
:param digits: a pair (total, decimal), or a function taking a database
cursor and returning a pair (total, decimal)
"""
type = 'float'
_slots = {
'_digits': None, # digits argument passed to class initializer
'group_operator': None, # operator for aggregating values
}
def __init__(self, string=None, digits=None, **kwargs):
super(Float, self).__init__(string=string, _digits=digits, **kwargs)
@property
def digits(self):
if callable(self._digits):
with fields._get_cursor() as cr:
return self._digits(cr)
else:
return self._digits
def _setup_digits(self, env):
""" Setup the digits for ``self`` and its corresponding column """
pass
def _setup_regular(self, env):
super(Float, self)._setup_regular(env)
self._setup_digits(env)
_related__digits = property(attrgetter('_digits'))
_related_group_operator = property(attrgetter('group_operator'))
_description_digits = property(attrgetter('digits'))
_column_digits = property(lambda self: not callable(self._digits) and self._digits)
_column_digits_compute = property(lambda self: callable(self._digits) and self._digits)
_column_group_operator = property(attrgetter('group_operator'))
def convert_to_cache(self, value, record, validate=True):
# apply rounding here, otherwise value in cache may be wrong!
value = float(value or 0.0)
digits = self.digits
return float_round(value, precision_digits=digits[1]) if digits else value
def convert_to_export(self, value, env):
if value or value == 0.0:
return value if env.context.get('export_raw_data') else ustr(value)
return ''
class _String(Field):
""" Abstract class for string fields. """
_slots = {
'translate': False, # whether the field is translated
}
_column_translate = property(attrgetter('translate'))
_related_translate = property(attrgetter('translate'))
_description_translate = property(attrgetter('translate'))
class Char(_String):
""" Basic string field, can be length-limited, usually displayed as a
single-line string in clients
:param int size: the maximum size of values stored for that field
:param bool translate: whether the values of this field can be translated
"""
type = 'char'
_slots = {
'size': None, # maximum size of values (deprecated)
}
_column_size = property(attrgetter('size'))
_related_size = property(attrgetter('size'))
_description_size = property(attrgetter('size'))
def _setup_regular(self, env):
super(Char, self)._setup_regular(env)
assert isinstance(self.size, (NoneType, int)), \
"Char field %s with non-integer size %r" % (self, self.size)
def convert_to_cache(self, value, record, validate=True):
if value is None or value is False:
return False
return ustr(value)[:self.size]
class Text(_String):
""" Very similar to :class:`~.Char` but used for longer contents, does not
have a size and usually displayed as a multiline text box.
:param translate: whether the value of this field can be translated
"""
type = 'text'
def convert_to_cache(self, value, record, validate=True):
if value is None or value is False:
return False
return ustr(value)
class Html(_String):
type = 'html'
_slots = {
'sanitize': True, # whether value must be sanitized
'strip_style': False, # whether to strip style attributes
}
_column_sanitize = property(attrgetter('sanitize'))
_related_sanitize = property(attrgetter('sanitize'))
_description_sanitize = property(attrgetter('sanitize'))
_column_strip_style = property(attrgetter('strip_style'))
_related_strip_style = property(attrgetter('strip_style'))
_description_strip_style = property(attrgetter('strip_style'))
def convert_to_cache(self, value, record, validate=True):
if value is None or value is False:
return False
if validate and self.sanitize:
return html_sanitize(value, strip_style=self.strip_style)
return value
class Date(Field):
type = 'date'
@staticmethod
def today(*args):
""" Return the current day in the format expected by the ORM.
This function may be used to compute default values.
"""
return date.today().strftime(DATE_FORMAT)
@staticmethod
def context_today(record, timestamp=None):
""" Return the current date as seen in the client's timezone in a format
fit for date fields. This method may be used to compute default
values.
:param datetime timestamp: optional datetime value to use instead of
the current date and time (must be a datetime, regular dates
can't be converted between timezones.)
:rtype: str
"""
today = timestamp or datetime.now()
context_today = None
tz_name = record._context.get('tz') or record.env.user.tz
if tz_name:
try:
today_utc = pytz.timezone('UTC').localize(today, is_dst=False) # UTC = no DST
context_today = today_utc.astimezone(pytz.timezone(tz_name))
except Exception:
_logger.debug("failed to compute context/client-specific today date, using UTC value for `today`",
exc_info=True)
return (context_today or today).strftime(DATE_FORMAT)
@staticmethod
def from_string(value):
""" Convert an ORM ``value`` into a :class:`date` value. """
if not value:
return None
value = value[:DATE_LENGTH]
return datetime.strptime(value, DATE_FORMAT).date()
@staticmethod
def to_string(value):
""" Convert a :class:`date` value into the format expected by the ORM. """
return value.strftime(DATE_FORMAT) if value else False
def convert_to_cache(self, value, record, validate=True):
if not value:
return False
if isinstance(value, basestring):
if validate:
# force parsing for validation
self.from_string(value)
return value[:DATE_LENGTH]
return self.to_string(value)
def convert_to_export(self, value, env):
if not value:
return ''
return self.from_string(value) if env.context.get('export_raw_data') else ustr(value)
class Datetime(Field):
type = 'datetime'
@staticmethod
def now(*args):
""" Return the current day and time in the format expected by the ORM.
This function may be used to compute default values.
"""
return datetime.now().strftime(DATETIME_FORMAT)
@staticmethod
def context_timestamp(record, timestamp):
"""Returns the given timestamp converted to the client's timezone.
This method is *not* meant for use as a _defaults initializer,
because datetime fields are automatically converted upon
display on client side. For _defaults you :meth:`fields.datetime.now`
should be used instead.
:param datetime timestamp: naive datetime value (expressed in UTC)
to be converted to the client timezone
:rtype: datetime
:return: timestamp converted to timezone-aware datetime in context
timezone
"""
assert isinstance(timestamp, datetime), 'Datetime instance expected'
tz_name = record._context.get('tz') or record.env.user.tz
utc_timestamp = pytz.utc.localize(timestamp, is_dst=False) # UTC = no DST
if tz_name:
try:
context_tz = pytz.timezone(tz_name)
return utc_timestamp.astimezone(context_tz)
except Exception:
_logger.debug("failed to compute context/client-specific timestamp, "
"using the UTC value",
exc_info=True)
return utc_timestamp
@staticmethod
def from_string(value):
""" Convert an ORM ``value`` into a :class:`datetime` value. """
if not value:
return None
value = value[:DATETIME_LENGTH]
if len(value) == DATE_LENGTH:
value += " 00:00:00"
return datetime.strptime(value, DATETIME_FORMAT)
@staticmethod
def to_string(value):
""" Convert a :class:`datetime` value into the format expected by the ORM. """
return value.strftime(DATETIME_FORMAT) if value else False
def convert_to_cache(self, value, record, validate=True):
if not value:
return False
if isinstance(value, basestring):
if validate:
# force parsing for validation
self.from_string(value)
value = value[:DATETIME_LENGTH]
if len(value) == DATE_LENGTH:
value += " 00:00:00"
return value
return self.to_string(value)
def convert_to_export(self, value, env):
if not value:
return ''
return self.from_string(value) if env.context.get('export_raw_data') else ustr(value)
def convert_to_display_name(self, value, record=None):
assert record, 'Record expected'
return Datetime.to_string(Datetime.context_timestamp(record, Datetime.from_string(value)))
class Binary(Field):
type = 'binary'
class Selection(Field):
"""
:param selection: specifies the possible values for this field.
It is given as either a list of pairs (``value``, ``string``), or a
model method, or a method name.
:param selection_add: provides an extension of the selection in the case
of an overridden field. It is a list of pairs (``value``, ``string``).
The attribute ``selection`` is mandatory except in the case of
:ref:`related fields <field-related>` or :ref:`field extensions
<field-incremental-definition>`.
"""
type = 'selection'
_slots = {
'selection': None, # [(value, string), ...], function or method name
}
def __init__(self, selection=None, string=None, **kwargs):
if callable(selection):
from openerp import api
selection = api.expected(api.model, selection)
super(Selection, self).__init__(selection=selection, string=string, **kwargs)
def _setup_regular(self, env):
super(Selection, self)._setup_regular(env)
assert self.selection is not None, "Field %s without selection" % self
def _setup_related(self, env):
super(Selection, self)._setup_related(env)
# selection must be computed on related field
field = self.related_field
self.selection = lambda model: field._description_selection(model.env)
def set_class_name(self, cls, name):
super(Selection, self).set_class_name(cls, name)
# determine selection (applying 'selection_add' extensions)
for field in resolve_all_mro(cls, name, reverse=True):
if isinstance(field, type(self)):
# We cannot use field.selection or field.selection_add here
# because those attributes are overridden by ``set_class_name``.
if 'selection' in field._attrs:
self.selection = field._attrs['selection']
if 'selection_add' in field._attrs:
# use an OrderedDict to update existing values
selection_add = field._attrs['selection_add']
self.selection = OrderedDict(self.selection + selection_add).items()
else:
self.selection = None
def _description_selection(self, env):
""" return the selection list (pairs (value, label)); labels are
translated according to context language
"""
selection = self.selection
if isinstance(selection, basestring):
return getattr(env[self.model_name], selection)()
if callable(selection):
return selection(env[self.model_name])
# translate selection labels
if env.lang:
name = "%s,%s" % (self.model_name, self.name)
translate = partial(
env['ir.translation']._get_source, name, 'selection', env.lang)
return [(value, translate(label) if label else label) for value, label in selection]
else:
return selection
@property
def _column_selection(self):
if isinstance(self.selection, basestring):
method = self.selection
return lambda self, *a, **kw: getattr(self, method)(*a, **kw)
else:
return self.selection
def get_values(self, env):
""" return a list of the possible values """
selection = self.selection
if isinstance(selection, basestring):
selection = getattr(env[self.model_name], selection)()
elif callable(selection):
selection = selection(env[self.model_name])
return [value for value, _ in selection]
def convert_to_cache(self, value, record, validate=True):
if not validate:
return value or False
if value in self.get_values(record.env):
return value
elif not value:
return False
raise ValueError("Wrong value for %s: %r" % (self, value))
def convert_to_export(self, value, env):
if not isinstance(self.selection, list):
# FIXME: this reproduces an existing buggy behavior!
return value if value else ''
for item in self._description_selection(env):
if item[0] == value:
return item[1]
return False
class Reference(Selection):
type = 'reference'
_slots = {
'size': None, # maximum size of values (deprecated)
}
_related_size = property(attrgetter('size'))
_column_size = property(attrgetter('size'))
def _setup_regular(self, env):
super(Reference, self)._setup_regular(env)
assert isinstance(self.size, (NoneType, int)), \
"Reference field %s with non-integer size %r" % (self, self.size)
def convert_to_cache(self, value, record, validate=True):
if isinstance(value, BaseModel):
if ((not validate or value._name in self.get_values(record.env))
and len(value) <= 1):
return value.with_env(record.env) or False
elif isinstance(value, basestring):
res_model, res_id = value.split(',')
return record.env[res_model].browse(int(res_id))
elif not value:
return False
raise ValueError("Wrong value for %s: %r" % (self, value))
def convert_to_read(self, value, use_name_get=True):
return "%s,%s" % (value._name, value.id) if value else False
def convert_to_export(self, value, env):
return value.name_get()[0][1] if value else ''
def convert_to_display_name(self, value, record=None):
return ustr(value and value.display_name)
class _Relational(Field):
""" Abstract class for relational fields. """
relational = True
_slots = {
'domain': [], # domain for searching values
'context': {}, # context for searching values
}
def _setup_regular(self, env):
super(_Relational, self)._setup_regular(env)
if self.comodel_name not in env.registry:
_logger.warning("Field %s with unknown comodel_name %r"
% (self, self.comodel_name))
self.comodel_name = '_unknown'
@property
def _related_domain(self):
if callable(self.domain):
# will be called with another model than self's
return lambda recs: self.domain(recs.env[self.model_name])
else:
# maybe not correct if domain is a string...
return self.domain
_related_context = property(attrgetter('context'))
_description_relation = property(attrgetter('comodel_name'))
_description_context = property(attrgetter('context'))
def _description_domain(self, env):
return self.domain(env[self.model_name]) if callable(self.domain) else self.domain
_column_obj = property(attrgetter('comodel_name'))
_column_domain = property(attrgetter('domain'))
_column_context = property(attrgetter('context'))
def null(self, env):
return env[self.comodel_name]
def modified(self, records):
# Invalidate cache for self.inverse_fields, too. Note that recomputation
# of fields that depend on self.inverse_fields is already covered by the
# triggers (see above).
spec = super(_Relational, self).modified(records)
for invf in self.inverse_fields:
spec.append((invf, None))
return spec
class Many2one(_Relational):
""" The value of such a field is a recordset of size 0 (no
record) or 1 (a single record).
:param comodel_name: name of the target model (string)
:param domain: an optional domain to set on candidate values on the
client side (domain or string)
:param context: an optional context to use on the client side when
handling that field (dictionary)
:param ondelete: what to do when the referred record is deleted;
possible values are: ``'set null'``, ``'restrict'``, ``'cascade'``
:param auto_join: whether JOINs are generated upon search through that
field (boolean, by default ``False``)
:param delegate: set it to ``True`` to make fields of the target model
accessible from the current model (corresponds to ``_inherits``)
The attribute ``comodel_name`` is mandatory except in the case of related
fields or field extensions.
"""
type = 'many2one'
_slots = {
'ondelete': 'set null', # what to do when value is deleted
'auto_join': False, # whether joins are generated upon search
'delegate': False, # whether self implements delegation
}
def __init__(self, comodel_name=None, string=None, **kwargs):
super(Many2one, self).__init__(comodel_name=comodel_name, string=string, **kwargs)
def set_class_name(self, cls, name):
super(Many2one, self).set_class_name(cls, name)
# determine self.delegate
if not self.delegate:
self.delegate = name in cls._inherits.values()
_column_ondelete = property(attrgetter('ondelete'))
_column_auto_join = property(attrgetter('auto_join'))
def _update(self, records, value):
""" Update the cached value of ``self`` for ``records`` with ``value``. """
records._cache[self] = value
def convert_to_cache(self, value, record, validate=True):
if isinstance(value, (NoneType, int, long)):
return record.env[self.comodel_name].browse(value)
if isinstance(value, BaseModel):
if value._name == self.comodel_name and len(value) <= 1:
return value.with_env(record.env)
raise ValueError("Wrong value for %s: %r" % (self, value))
elif isinstance(value, tuple):
return record.env[self.comodel_name].browse(value[0])
elif isinstance(value, dict):
return record.env[self.comodel_name].new(value)
else:
return self.null(record.env)
def convert_to_read(self, value, use_name_get=True):
if use_name_get and value:
# evaluate name_get() as superuser, because the visibility of a
# many2one field value (id and name) depends on the current record's
# access rights, and not the value's access rights.
try:
value_sudo = value.sudo()
# performance trick: make sure that all records of the same
# model as value in value.env will be prefetched in value_sudo.env
value_sudo.env.prefetch[value._name].update(value.env.prefetch[value._name])
return value_sudo.name_get()[0]
except MissingError:
# Should not happen, unless the foreign key is missing.
return False
else:
return value.id
def convert_to_write(self, value, target=None, fnames=None):
return value.id
def convert_to_onchange(self, value):
return value.id
def convert_to_export(self, value, env):
return value.name_get()[0][1] if value else ''
def convert_to_display_name(self, value, record=None):
return ustr(value.display_name)
class UnionUpdate(SpecialValue):
""" Placeholder for a value update; when this value is taken from the cache,
it returns ``record[field.name] | value`` and stores it in the cache.
"""
def __init__(self, field, record, value):
self.args = (field, record, value)
def get(self):
field, record, value = self.args
# in order to read the current field's value, remove self from cache
del record._cache[field]
# read the current field's value, and update it in cache only
record._cache[field] = new_value = record[field.name] | value
return new_value
class _RelationalMulti(_Relational):
""" Abstract class for relational fields *2many. """
def _update(self, records, value):
""" Update the cached value of ``self`` for ``records`` with ``value``. """
for record in records:
if self in record._cache:
record._cache[self] = record[self.name] | value
else:
record._cache[self] = UnionUpdate(self, record, value)
def convert_to_cache(self, value, record, validate=True):
if isinstance(value, BaseModel):
if value._name == self.comodel_name:
return value.with_env(record.env)
elif isinstance(value, list):
# value is a list of record ids or commands
comodel = record.env[self.comodel_name]
ids = OrderedSet(record[self.name].ids)
# modify ids with the commands
for command in value:
if isinstance(command, (tuple, list)):
if command[0] == 0:
ids.add(comodel.new(command[2]).id)
elif command[0] == 1:
comodel.browse(command[1]).update(command[2])
ids.add(command[1])
elif command[0] == 2:
# note: the record will be deleted by write()
ids.discard(command[1])
elif command[0] == 3:
ids.discard(command[1])
elif command[0] == 4:
ids.add(command[1])
elif command[0] == 5:
ids.clear()
elif command[0] == 6:
ids = OrderedSet(command[2])
elif isinstance(command, dict):
ids.add(comodel.new(command).id)
else:
ids.add(command)
# return result as a recordset
return comodel.browse(list(ids))
elif not value:
return self.null(record.env)
raise ValueError("Wrong value for %s: %s" % (self, value))
def convert_to_read(self, value, use_name_get=True):
return value.ids
def convert_to_write(self, value, target=None, fnames=None):
# remove/delete former records
if target is None:
set_ids = []
result = [(6, 0, set_ids)]
add_existing = lambda id: set_ids.append(id)
else:
tag = 2 if self.type == 'one2many' else 3
result = [(tag, record.id) for record in target[self.name] - value]
add_existing = lambda id: result.append((4, id))
if fnames is None:
# take all fields in cache, except the inverses of self
fnames = set(value._fields) - set(MAGIC_COLUMNS)
for invf in self.inverse_fields:
fnames.discard(invf.name)
# add new and existing records
for record in value:
if not record.id:
values = {k: v for k, v in record._cache.iteritems() if k in fnames}
values = record._convert_to_write(values)
result.append((0, 0, values))
elif record._is_dirty():
values = {k: record._cache[k] for k in record._get_dirty() if k in fnames}
values = record._convert_to_write(values)
result.append((1, record.id, values))
else:
add_existing(record.id)
return result
def convert_to_export(self, value, env):
return ','.join(name for id, name in value.name_get()) if value else ''
def convert_to_display_name(self, value, record=None):
raise NotImplementedError()
def _compute_related(self, records):
""" Compute the related field ``self`` on ``records``. """
for record in records:
value = record
# traverse the intermediate fields, and keep at most one record
for name in self.related[:-1]:
value = value[name][:1]
record[self.name] = value[self.related[-1]]
class One2many(_RelationalMulti):
""" One2many field; the value of such a field is the recordset of all the
records in ``comodel_name`` such that the field ``inverse_name`` is equal to
the current record.
:param comodel_name: name of the target model (string)
:param inverse_name: name of the inverse ``Many2one`` field in
``comodel_name`` (string)
:param domain: an optional domain to set on candidate values on the
client side (domain or string)
:param context: an optional context to use on the client side when
handling that field (dictionary)
:param auto_join: whether JOINs are generated upon search through that
field (boolean, by default ``False``)
:param limit: optional limit to use upon read (integer)
The attributes ``comodel_name`` and ``inverse_name`` are mandatory except in
the case of related fields or field extensions.
"""
type = 'one2many'
_slots = {
'inverse_name': None, # name of the inverse field
'auto_join': False, # whether joins are generated upon search
'limit': None, # optional limit to use upon read
'copy': False, # o2m are not copied by default
}
def __init__(self, comodel_name=None, inverse_name=None, string=None, **kwargs):
super(One2many, self).__init__(
comodel_name=comodel_name,
inverse_name=inverse_name,
string=string,
**kwargs
)
def _setup_regular(self, env):
super(One2many, self)._setup_regular(env)
if self.inverse_name:
# link self to its inverse field and vice-versa
comodel = env[self.comodel_name]
invf = comodel._fields[self.inverse_name]
# In some rare cases, a ``One2many`` field can link to ``Int`` field
# (res_model/res_id pattern). Only inverse the field if this is
# a ``Many2one`` field.
if isinstance(invf, Many2one):
self.inverse_fields += (invf,)
invf.inverse_fields += (self,)
_description_relation_field = property(attrgetter('inverse_name'))
_column_fields_id = property(attrgetter('inverse_name'))
_column_auto_join = property(attrgetter('auto_join'))
_column_limit = property(attrgetter('limit'))
class Many2many(_RelationalMulti):
""" Many2many field; the value of such a field is the recordset.
:param comodel_name: name of the target model (string)
The attribute ``comodel_name`` is mandatory except in the case of related
fields or field extensions.
:param relation: optional name of the table that stores the relation in
the database (string)
:param column1: optional name of the column referring to "these" records
in the table ``relation`` (string)
:param column2: optional name of the column referring to "those" records
in the table ``relation`` (string)
The attributes ``relation``, ``column1`` and ``column2`` are optional. If not
given, names are automatically generated from model names, provided
``model_name`` and ``comodel_name`` are different!
:param domain: an optional domain to set on candidate values on the
client side (domain or string)
:param context: an optional context to use on the client side when
handling that field (dictionary)
:param limit: optional limit to use upon read (integer)
"""
type = 'many2many'
_slots = {
'relation': None, # name of table
'column1': None, # column of table referring to model
'column2': None, # column of table referring to comodel
'limit': None, # optional limit to use upon read
}
def __init__(self, comodel_name=None, relation=None, column1=None, column2=None,
string=None, **kwargs):
super(Many2many, self).__init__(
comodel_name=comodel_name,
relation=relation,
column1=column1,
column2=column2,
string=string,
**kwargs
)
def _setup_regular(self, env):
super(Many2many, self)._setup_regular(env)
if not self.relation and self.store:
# retrieve self.relation from the corresponding column
column = self.to_column()
if isinstance(column, fields.many2many):
self.relation, self.column1, self.column2 = \
column._sql_names(env[self.model_name])
if self.relation:
m2m = env.registry._m2m
# if inverse field has already been setup, it is present in m2m
invf = m2m.get((self.relation, self.column2, self.column1))
if invf:
self.inverse_fields += (invf,)
invf.inverse_fields += (self,)
else:
# add self in m2m, so that its inverse field can find it
m2m[(self.relation, self.column1, self.column2)] = self
_column_rel = property(attrgetter('relation'))
_column_id1 = property(attrgetter('column1'))
_column_id2 = property(attrgetter('column2'))
_column_limit = property(attrgetter('limit'))
class Serialized(Field):
""" Minimal support for existing sparse and serialized fields. """
type = 'serialized'
def convert_to_cache(self, value, record, validate=True):
return value or {}
class Id(Field):
""" Special case for field 'id'. """
type = 'integer'
_slots = {
'string': 'ID',
'store': True,
'readonly': True,
}
def to_column(self):
self.column = fields.integer(self.string)
return self.column
def __get__(self, record, owner):
if record is None:
return self # the field is accessed through the class owner
if not record:
return False
return record.ensure_one()._ids[0]
def __set__(self, record, value):
raise TypeError("field 'id' cannot be assigned")
# imported here to avoid dependency cycle issues
from openerp import SUPERUSER_ID, registry
from .exceptions import Warning, AccessError, MissingError
from .models import BaseModel, MAGIC_COLUMNS
from .osv import fields
| agpl-3.0 | -9,165,879,571,625,793,000 | 38.950986 | 114 | 0.585547 | false |
akampjes/p0rk-crackling | p0rk/porkweb/migrations/0008_auto__add_field_attackcharset_name.py | 1 | 6836 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'AttackCharset.name'
db.add_column(u'porkweb_attackcharset', 'name',
self.gf('django.db.models.fields.CharField')(default='Charset', max_length=64),
keep_default=False)
def backwards(self, orm):
# Deleting field 'AttackCharset.name'
db.delete_column(u'porkweb_attackcharset', 'name')
models = {
u'porkweb.attackcharset': {
'Meta': {'object_name': 'AttackCharset'},
'attack': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'charsets'", 'to': u"orm['porkweb.AttackType']"}),
'charset': ('django.db.models.fields.CharField', [], {'default': "'New'", 'max_length': '16'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Charset'", 'max_length': '64'})
},
u'porkweb.attackparam': {
'Meta': {'object_name': 'AttackParam', '_ormbases': [u'porkweb.Param']},
'attack': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'params'", 'to': u"orm['porkweb.AttackType']"}),
u'param_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['porkweb.Param']", 'unique': 'True', 'primary_key': 'True'})
},
u'porkweb.attacktype': {
'Meta': {'object_name': 'AttackType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'porkweb.cracked': {
'Meta': {'object_name': 'Cracked'},
'hash': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['porkweb.Job']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'when': ('django.db.models.fields.DateTimeField', [], {})
},
u'porkweb.hashtype': {
'Meta': {'object_name': 'HashType'},
'hashcat': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hashcatType': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'ocllite': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'oclplus': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'porkweb.job': {
'Meta': {'object_name': 'Job'},
'attackType': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['porkweb.AttackType']"}),
'eta': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'finished': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'hashType': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['porkweb.HashType']"}),
'hashes': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jobServer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['porkweb.JobServer']", 'null': 'True', 'blank': 'True'}),
'progress': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'results': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'speed': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'New'", 'max_length': '16'})
},
u'porkweb.jobparam': {
'Meta': {'object_name': 'JobParam', '_ormbases': [u'porkweb.Param']},
'job': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'params'", 'to': u"orm['porkweb.Job']"}),
u'param_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['porkweb.Param']", 'unique': 'True', 'primary_key': 'True'})
},
u'porkweb.jobserver': {
'Meta': {'object_name': 'JobServer'},
'details': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ipaddr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'os': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'port': ('django.db.models.fields.IntegerField', [], {'default': '8117'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Offline'", 'max_length': '16'})
},
u'porkweb.jobtask': {
'Meta': {'object_name': 'JobTask'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['porkweb.Job']"}),
'taskid': ('django.db.models.fields.CharField', [], {'max_length': '36'}),
'taskresults': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'taskstatus': ('django.db.models.fields.CharField', [], {'default': "'New'", 'max_length': '16'})
},
u'porkweb.log': {
'Meta': {'object_name': 'Log'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.TextField', [], {}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'porkweb.param': {
'Meta': {'object_name': 'Param'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '64'})
}
}
complete_apps = ['porkweb'] | bsd-3-clause | -5,450,891,694,230,060,000 | 61.154545 | 153 | 0.535547 | false |
FreakofKnuth/Nanoinformatics | LSI/ScratchModelRunners/LsiEnvironmentvsNanoMedicine.py | 2 | 4387 | from sys import argv
import numpy
from gensim import corpora, models, similarities
from gensim.corpora import MmCorpus
from nltk.corpus import stopwords
import os
from pprint import pprint
from numpy import median
# Enables ease of recall in the future
modelFile = '.' + os.sep + 'EnvironmentModel'
os.makedirs(modelFile,exist_ok=True)
# File, Documents Text, & Dictionary arrays
fileArray = os.listdir('..' + os.sep + 'Domains' + os.sep + 'EnvironmentTxt' + os.sep)
fileArray2 = os.listdir('..' + os.sep + 'Domains' + os.sep + 'NanoMedicineTxt' + os.sep)
fileArrayHalf = fileArray
fileArraySecondHalf = fileArray2
if "cached" not in argv:
#get stopwords
stops = set(stopwords.words('english'))
dictionary=[]
documents=[]
for file in fileArrayHalf:
with open('..'+os.sep+'Domains'+os.sep+'EnvironmentTxt'+os.sep+file,mode='rt',errors='ignore') as openFile:
words = openFile.read()
documents.append(words.lower().split())
for word in words.lower().split():
dictionary.append(word)
openFile.close()
#map list of words to id#s & save for reference
dictionary = corpora.Dictionary([dictionary])
dictionary.save(modelFile+os.sep+modelFile+'.dict')
# print # of documents
print("\n\n# of documents: ", len(documents), "\nReading in test documents...")
# Vectorize the body of documents agains the dictionary
corpus = [dictionary.doc2bow(document) for document in documents]
print("Serializing the corpus of documents...")
corpora.MmCorpus.serialize('..' + os.sep + 'DomainModels' + os.sep + 'EnvironmentModel' + os.sep + 'EnvironmentModel.serial',corpus)
tfIdfModel = models.TfidfModel(corpus)
tfIdfModel.save('..' + os.sep + 'DomainModels' + os.sep + 'EnvironmentModel' + os.sep + 'EnvironmentModel.mm')
else:
print("Loading saved corpora...")
dictionary=corpora.Dictionary.load(modelFile+os.sep+modelFile+'.dict')
tfIdfModel=models.TfidfModel.load('.' + os.sep + 'DomainModels' + os.sep + 'EnvironmentModel' + os.sep + 'EnvironmentModel.mm')
corpus=MmCorpus('.' + os.sep + 'DomainModels' + os.sep + 'EnvironmentModel' + os.sep + 'EnvironmentModel.serial')
#corpus to tdidf matrix
corpus=tfIdfModel[corpus]
print("Ensure correct corpus length: ",len(corpus),"\n")
print("Document #1 from the corpus (tfIdf): \n",corpus[1][1:20],"\n")
if "cached" not in argv:
#Train LSI
print("Training LSI...")
lsi = models.LsiModel(corpus, id2word=dictionary, num_topics=700)
lsi.save('..'+os.sep+'DomainModels'+os.sep+'EnvironmentModel'+os.sep+'EnvironmentModel.lsi')
else:
lsi=models.LsiModel.load('.'+os.sep+'DomainModels'+os.sep+'EnvironmentModel'+os.sep+'EnvironmentModel.lsi')
#transform corpus to LSI space, index it, & save
index = similarities.MatrixSimilarity(lsi[corpus])
index.save(modelFile+os.sep+'EnvironmentModel.index')
print("Calculating the cosines of each document...")
index = similarities.MatrixSimilarity.load(modelFile+os.sep+'EnvironmentModel.index')
#Test files
print("Vectorizing the test documents (Bag of Words)...\n")
testVectorArray=[]
count=1
for file in fileArraySecondHalf:
with open('..'+os.sep+'Domains'+os.sep+'NanoMedicineTxt'+os.sep+file,mode='rt',errors='ignore') as current:
current=current.read()
# Vectorize test documents against the dictionary (Bag of Words)
testVector=dictionary.doc2bow(current.lower().split())
testVector=tfIdfModel[testVector]
if count==1:
print("(Tdidf) Frequency vectors:\n", "(wordId,frequency): ", fileArraySecondHalf[0], "\n", testVector[1:20],
end='\n\n')
testVectorArray+=lsi[testVector]
count += 1
print("Preview an LSI vector: ",fileArraySecondHalf[0],"\n",testVectorArray[1:20],end='\n\n')
print("Creating vector space for entire corpus...")
#perform a similarity query against the corpus & print (document_number, document_similarity)
print("Calculating cosine similarity between corpus and test documents...\n")
similarity=[]
similarity=index[testVectorArray]
pprint("Max similarity between Environment and NanoMedicine: ")
pprint(max(similarity))
pprint("Mean similarity between Environment and NanoMedicine: ")
pprint(numpy.mean(similarity))
pprint("Median similarity between Environment and NanoMedicine: ")
pprint(numpy.median(similarity)) | apache-2.0 | -4,175,512,434,440,232,000 | 41.192308 | 136 | 0.703214 | false |
Revolution1/ohei | ohei/utils/hwinfo/pci/tests/test_lspci.py | 2 | 11805 | """Unit tests for the lspci module"""
import unittest
from hwinfo.pci.lspci import *
DATA_DIR = 'hwinfo/pci/tests/data'
class TestSingleDeviceVVParse(unittest.TestCase):
SAMPLE_DEVICE_FILE = "%s/single_network_device_lspci_vv" % DATA_DIR
DEVICE_DATA = ""
DEVICE_REC = {
'pci_device_string': 'Broadcom Corporation NetXtreme II BCM5716 Gigabit Ethernet (rev 20)',
'pci_device_class': '0200',
'pci_device_class_name': 'Ethernet controller',
'pci_device_bus_id': '02:00.0',
'pci_device_sub_string': 'Dell Device 0488',
'pci_device_vpd_product_name': 'Broadcom NetXtreme II Ethernet Controller',
}
def setUp(self):
# Load device data from sample file
fh = open(self.SAMPLE_DEVICE_FILE)
data = fh.read()
fh.close()
self.parser = LspciVVParser(data)
def _assert_rec_key(self, rec, key):
self.assertEquals(rec[key], self.DEVICE_REC[key])
def test_pci_device_string(self):
rec = self.parser.parse_items().pop()
self._assert_rec_key(rec, 'pci_device_string')
def test_pci_device_bus_id(self):
rec = self.parser.parse_items().pop()
self._assert_rec_key(rec, 'pci_device_bus_id')
def test_pci_device_class_name(self):
rec = self.parser.parse_items().pop()
self._assert_rec_key(rec, 'pci_device_class_name')
def test_pci_device_sub_string(self):
rec = self.parser.parse_items().pop()
self._assert_rec_key(rec, 'pci_device_sub_string')
def test_pci_device_vpd_product_name(self):
rec = self.parser.parse_items().pop()
self._assert_rec_key(rec, 'pci_device_vpd_product_name')
class TestMultiDeviceVVParse(unittest.TestCase):
SAMPLE_DEVICE_FILE = "%s/lspci_vv" % DATA_DIR
def setUp(self):
fh = open(self.SAMPLE_DEVICE_FILE)
data = fh.read()
fh.close()
self.parser = LspciVVParser(data)
def test_parse_all_devices(self):
recs = self.parser.parse_items()
self.assertEqual(len(recs), 58)
found = False
for rec in recs:
print rec
if rec['pci_device_bus_id'] == '02:00.0':
self.assertEqual(rec['pci_device_class_name'], 'VGA compatible controller')
found = True
self.assertEqual(found, True)
class TestSingleDeviceNParse(unittest.TestCase):
DATA = "ff:10.5 0880: 8086:0eb5 (rev 04)"
DEVICE_REC = {
'pci_device_bus_id': 'ff:10.5',
'pci_vendor_id': '8086',
'pci_device_id': '0eb5',
'pci_device_class': '0880',
}
def setUp(self):
self.parser = LspciNParser(self.DATA)
self.rec = self.parser.parse_items().pop()
def _assert_rec_key(self, key):
self.assertEquals(self.rec[key], self.DEVICE_REC[key])
def test_pci_device_bus_id(self):
self._assert_rec_key('pci_device_bus_id')
def test_pci_vendor_id(self):
self._assert_rec_key('pci_vendor_id')
def test_pci_device_id(self):
self._assert_rec_key('pci_device_id')
def test_pci_device_class(self):
self._assert_rec_key('pci_device_class')
class TestMultiDeviceNParse(unittest.TestCase):
SAMPLE_DEVICE_FILE = "%s/lspci_n" % DATA_DIR
def setUp(self):
fh = open(self.SAMPLE_DEVICE_FILE)
data = fh.read()
fh.close()
self.parser = LspciNParser(data)
def test_parse_all_devices(self):
recs = self.parser.parse_items()
self.assertEqual(len(recs), 171)
class TestSingleDeviceNNMMParse(unittest.TestCase):
SAMPLE_DATA = '02:00.0 "Ethernet controller [0200]" "Broadcom Corporation [14e4]" "NetXtreme II BCM5716 Gigabit Ethernet [163b]" -r20 "Dell [1028]" "Device [02a3]'
DEVICE_REC = {
'pci_device_bus_id': '02:00.0',
'pci_device_class': '0200',
'pci_device_class_name': 'Ethernet controller',
'pci_vendor_name': 'Broadcom Corporation',
'pci_vendor_id': '14e4',
'pci_device_id': '163b',
'pci_device_name': 'NetXtreme II BCM5716 Gigabit Ethernet',
'pci_subvendor_name': 'Dell',
'pci_subvendor_id': '1028',
'pci_subdevice_name': 'Device',
'pci_subdevice_id': '02a3',
}
def setUp(self):
self.parser = LspciNNMMParser(self.SAMPLE_DATA)
self.rec = self.parser.parse_items()[0]
def _assert_rec_key(self, key):
self.assertEquals(self.rec[key], self.DEVICE_REC[key])
def test_pci_device_bus_id(self):
self._assert_rec_key('pci_device_bus_id')
def test_pci_device_class(self):
self._assert_rec_key('pci_device_class')
def test_pci_device_class_name(self):
self._assert_rec_key('pci_device_class_name')
def test_pci_vendor_name(self):
self._assert_rec_key('pci_vendor_name')
def test_pci_vendor_id(self):
self._assert_rec_key('pci_vendor_id')
def test_pci_device_id(self):
self._assert_rec_key('pci_device_id')
def test_pci_device_name(self):
self._assert_rec_key('pci_device_name')
def test_pci_subvendor_name(self):
self._assert_rec_key('pci_subvendor_name')
def test_pci_subdevice_name(self):
self._assert_rec_key('pci_subdevice_name')
def test_pci_subdevice_id(self):
self._assert_rec_key('pci_subdevice_id')
class LsiDeviceParse(TestSingleDeviceNNMMParse):
SAMPLE_DATA = '03:00.0 "SCSI storage controller [0100]" "LSI Logic / Symbios Logic [1000]" "SAS1068E PCI-Express Fusion-MPT SAS [0058]" -r08 "Dell [1028]" "SAS 6/iR Integrated Blades RAID Controller [1f0f]"'
DEVICE_REC = {
'pci_device_bus_id': '03:00.0',
'pci_device_class': '0100',
'pci_device_class_name': 'SCSI storage controller',
'pci_vendor_name': 'LSI Logic / Symbios Logic',
'pci_vendor_id': '1000',
'pci_device_id': '0058',
'pci_device_name': 'SAS1068E PCI-Express Fusion-MPT SAS',
'pci_subvendor_name': 'Dell',
'pci_subvendor_id': '1028',
'pci_subdevice_name': 'SAS 6/iR Integrated Blades RAID Controller',
'pci_subdevice_id': '1f0f',
}
class IntelUSBControllerDeviceParse(TestSingleDeviceNNMMParse):
SAMPLE_DATA = '00:1d.0 "USB controller [0c03]" "Intel Corporation [8086]" "5 Series/3400 Series Chipset USB2 Enhanced Host Controller [3b34]" -r05 -p20 "Dell [1028]" "Device [02a3]"'
DEVICE_REC = {
'pci_device_bus_id': '00:1d.0',
'pci_device_class': '0c03',
'pci_device_class_name': 'USB controller',
'pci_vendor_name': 'Intel Corporation',
'pci_vendor_id': '8086',
'pci_device_id': '3b34',
'pci_device_name': '5 Series/3400 Series Chipset USB2 Enhanced Host Controller',
'pci_subvendor_name': 'Dell',
'pci_subvendor_id': '1028',
'pci_subdevice_name': 'Device',
'pci_subdevice_id': '02a3',
}
class EmulexNicDeviceParse(TestSingleDeviceNNMMParse):
SAMPLE_DATA = '0c:00.0 "Ethernet controller [0200]" "Emulex Corporation [19a2]" "OneConnect 10Gb NIC (be3) [0710]" -r02 "Emulex Corporation [10df]" "Device [e70b]"'
DEVICE_REC = {
'pci_device_bus_id': '0c:00.0',
'pci_device_class': '0200',
'pci_device_class_name': 'Ethernet controller',
'pci_vendor_name': 'Emulex Corporation',
'pci_vendor_id': '19a2',
'pci_device_id': '0710',
'pci_device_name': 'OneConnect 10Gb NIC (be3)',
'pci_subvendor_name': 'Emulex Corporation',
'pci_subvendor_id': '10df',
'pci_subdevice_name': 'Device',
'pci_subdevice_id': 'e70b',
}
class EmulexHbDeviceParse(TestSingleDeviceNNMMParse):
SAMPLE_DATA = '07:00.0 "Fibre Channel [0c04]" "Emulex Corporation [10df]" "Saturn-X: LightPulse Fibre Channel Host Adapter [f100]" -r03 "Hewlett-Packard Company [103c]" "Device [3282]"'
DEVICE_REC = {
'pci_device_bus_id': '07:00.0',
'pci_device_class': '0c04',
'pci_device_class_name': 'Fibre Channel',
'pci_vendor_name': 'Emulex Corporation',
'pci_vendor_id': '10df',
'pci_device_id': 'f100',
'pci_device_name': 'Saturn-X: LightPulse Fibre Channel Host Adapter',
'pci_subvendor_name': 'Hewlett-Packard Company',
'pci_subvendor_id': '103c',
'pci_subdevice_name': 'Device',
'pci_subdevice_id': '3282',
}
class LsiSASDeviceParse(TestSingleDeviceNNMMParse):
SAMPLE_DATA = '06:00.0 "Serial Attached SCSI controller [0107]" "LSI Logic / Symbios Logic [1000]" "SAS2004 PCI-Express Fusion-MPT SAS-2 [Spitfire] [0070]" -r03 "IBM [1014]" "Device [03f8]"'
DEVICE_REC = {
'pci_device_bus_id': '06:00.0',
'pci_device_class': '0107',
'pci_device_class_name': 'Serial Attached SCSI controller',
'pci_vendor_name': 'LSI Logic / Symbios Logic',
'pci_vendor_id': '1000',
'pci_device_id': '0070',
'pci_device_name': 'SAS2004 PCI-Express Fusion-MPT SAS-2 [Spitfire]',
'pci_subvendor_name': 'IBM',
'pci_subvendor_id': '1014',
'pci_subdevice_name': 'Device',
'pci_subdevice_id': '03f8',
}
class BroadcomNetDeviceParse(TestSingleDeviceNNMMParse):
SAMPLE_DATA = '01:00.0 "Ethernet controller [0200]" "Broadcom Corporation [14e4]" "NetXtreme BCM5720 Gigabit Ethernet PCIe [165f]" "Dell [1028]" "Device [1f5b]"'
DEVICE_REC = {
'pci_device_bus_id': '01:00.0',
'pci_device_class': '0200',
'pci_device_class_name': 'Ethernet controller',
'pci_vendor_name': 'Broadcom Corporation',
'pci_vendor_id': '14e4',
'pci_device_id': '165f',
'pci_device_name': 'NetXtreme BCM5720 Gigabit Ethernet PCIe',
'pci_subvendor_name': 'Dell',
'pci_subvendor_id': '1028',
'pci_subdevice_name': 'Device',
'pci_subdevice_id': '1f5b',
}
class IntelNetDeviceParser(TestSingleDeviceNNMMParse):
SAMPLE_DATA = '00:19.0 "Ethernet controller [0200]" "Intel Corporation [8086]" "82579LM Gigabit Network Connection [1502]" -r06 "Dell [1028]" "Device [05d2]"'
DEVICE_REC = {
'pci_device_bus_id': '00:19.0',
'pci_device_class': '0200',
'pci_device_class_name': 'Ethernet controller',
'pci_vendor_name': 'Intel Corporation',
'pci_vendor_id': '8086',
'pci_device_id': '1502',
'pci_device_name': '82579LM Gigabit Network Connection',
'pci_subvendor_name': 'Dell',
'pci_subvendor_id': '1028',
'pci_subdevice_name': 'Device',
'pci_subdevice_id': '05d2',
}
class BrocadeNetDeviceParser(TestSingleDeviceNNMMParse):
SAMPLE_DATA = '0d:00.3 "Ethernet controller [0200]" "Brocade Communications Systems, Inc. [1657]" "1010/1020/1007/1741 10Gbps CNA [0014]" -r01 "Brocade Communications Systems, Inc. [1657]" "1010/1020/1007/1741 10Gbps CNA - LL [0015]"'
DEVICE_REC = {
'pci_device_bus_id': '0d:00.3',
'pci_device_class': '0200',
'pci_device_class_name': 'Ethernet controller',
'pci_vendor_name': 'Brocade Communications Systems, Inc.',
'pci_vendor_id': '1657',
'pci_device_id': '0014',
'pci_device_name': '1010/1020/1007/1741 10Gbps CNA',
'pci_subvendor_name': 'Brocade Communications Systems, Inc.',
'pci_subvendor_id': '1657',
'pci_subdevice_name': '1010/1020/1007/1741 10Gbps CNA - LL',
'pci_subdevice_id': '0015',
}
class TestMultiDeviceNNMMParse(unittest.TestCase):
SAMPLE_FILE = '%s/lspci-nnmm' % DATA_DIR
def setUp(self):
fh = open(self.SAMPLE_FILE)
data = fh.read()
fh.close()
self.parser = LspciNNMMParser(data)
def test_number_of_devices(self):
recs = self.parser.parse_items()
self.assertEqual(len(recs), 37)
| mit | -4,691,712,977,889,901,000 | 34.664653 | 238 | 0.613045 | false |
superdesk/superdesk-core | superdesk/storage/desk_media_storage.py | 1 | 6398 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import logging
import json
import mimetypes
import bson
import bson.errors
import gridfs
import os.path
from eve.io.mongo.media import GridFSMediaStorage
from . import SuperdeskMediaStorage
logger = logging.getLogger(__name__)
def format_id(_id):
try:
return bson.ObjectId(_id)
except bson.errors.InvalidId:
return _id
class SuperdeskGridFSMediaStorage(SuperdeskMediaStorage, GridFSMediaStorage):
def get(self, _id, resource=None):
logger.debug("Getting media file with id= %s" % _id)
_id = format_id(_id)
try:
media_file = self.fs(resource).get(_id)
except Exception:
media_file = None
if media_file and media_file.metadata:
for k, v in media_file.metadata.items():
if isinstance(v, str):
try:
media_file.metadata[k] = json.loads(v)
except ValueError:
logger.info("Non JSON metadata for file: %s with key: %s and value: %s", _id, k, v)
return media_file
def url_for_media(self, media_id, content_type=None):
"""Return url for given media id.
:param media_id: media id from media_id method
"""
ext = mimetypes.guess_extension(content_type or "") or ""
if ext in (".jpe", ".jpeg"):
ext = ".jpg"
return self.app.upload_url(str(media_id) + ext)
def url_for_download(self, media_id, content_type=None):
"""Return url for download.
:param media_id: media id from media_id method
"""
return self.app.download_url(str(media_id))
def url_for_external(self, media_id: str, resource: str = None) -> str:
"""Returns a URL for external use
Returns a URL for use with the Content/Production API
:param str media_id: The ID of the asset
:param str resource: The name of the resource type this Asset is attached to
:rtype: str
:return: The URL for external use
"""
return f"/assets/{media_id}"
def fetch_rendition(self, rendition):
return self.get(rendition.get("media"), "upload")
def put(self, content, filename=None, content_type=None, metadata=None, resource=None, folder=None, **kwargs):
"""Store content in gridfs.
:param content: binary stream
:param filename: unique filename
:param content_type: mime type
:param metadata: file metadata
:param resource: type of resource
:param str folder: Folder that the file will be stored in
:return str: The ID that was generated for this object
"""
# try to determine mimetype on the server
content_type = self._get_mimetype(content, filename, content_type)
if "_id" in kwargs:
kwargs["_id"] = format_id(kwargs["_id"])
if folder:
if folder[-1] == "/":
folder = folder[:-1]
if filename:
filename = "{}/{}".format(folder, filename)
try:
logger.info("Adding file {} to the GridFS".format(filename))
return self.fs(resource).put(
content, content_type=content_type, filename=filename, metadata=metadata, **kwargs
)
except gridfs.errors.FileExists:
logger.info("File exists filename=%s id=%s" % (filename, kwargs["_id"]))
def fs(self, resource=None):
resource = resource or "upload"
driver = self.app.data.mongo
px = driver.current_mongo_prefix(resource)
if px not in self._fs:
self._fs[px] = gridfs.GridFS(driver.pymongo(prefix=px).db)
return self._fs[px]
def remove_unreferenced_files(self, existing_files, resource=None):
"""Get the files from Grid FS and compare against existing files and delete the orphans."""
current_files = self.fs(resource).find({"_id": {"$nin": list(existing_files)}})
for file_id in (file._id for file in current_files if str(file._id) not in existing_files):
print("Removing unused file: ", file_id)
self.delete(file_id)
print("Image cleaning completed successfully.")
def find(self, folder=None, upload_date=None, resource=None):
"""Search for files in the GridFS
Searches for files in the GridFS using a combination of folder name and/or upload date
comparisons. The upload date comparisons uses the same mongodb BSON comparison operators,
i.e. `$eq`, `$gt`, `$gte`, `$lt`, `$lte` and `$ne`, and can be combined together.
:param str folder: Folder name
:param dict upload_date: Upload date with comparison operator (i.e. $lt, $lte, $gt or $gte)
:param resource: The resource type to use
:return list: List of files that matched the provided parameters
"""
folder_query = {"filename": {"$regex": "^{}/".format(folder)}} if folder else None
date_query = {"uploadDate": upload_date} if upload_date else None
if folder and upload_date:
query = {"$and": [folder_query, date_query]}
elif folder:
query = folder_query
elif date_query:
query = date_query
else:
query = {}
files = []
for file in self.fs(resource).find(query):
try:
files.append(
{
"_id": file._id,
"filename": file.filename,
"upload_date": file.upload_date,
"size": file.length,
"_etag": file.md5,
}
)
except AttributeError as e:
logging.warning("Failed to get file attributes. {}".format(e))
return files
def exists(self, id_or_filename, resource):
_id = format_id(id_or_filename)
return super().exists(_id)
def get_by_filename(self, filename):
_id, _ = os.path.splitext(filename)
return self.get(_id)
| agpl-3.0 | -8,318,082,459,960,763,000 | 34.743017 | 114 | 0.587215 | false |
pkuong/garcon | tests/test_decider.py | 1 | 9081 | from __future__ import absolute_import
try:
from unittest.mock import MagicMock
except:
from mock import MagicMock
import boto.swf.layer2 as swf
import json
import pytest
from garcon import decider
from garcon import activity
from tests.fixtures import decider as decider_events
def mock(monkeypatch):
for base in [swf.Decider, swf.WorkflowType, swf.ActivityType, swf.Domain]:
monkeypatch.setattr(base, '__init__', MagicMock(return_value=None))
if base is not swf.Decider:
monkeypatch.setattr(base, 'register', MagicMock())
def test_create_decider(monkeypatch):
"""Create a decider and check the behavior of the registration.
"""
mock(monkeypatch)
from tests.fixtures.flows import example
d = decider.DeciderWorker(example)
assert len(d.activities) == 4
assert d.flow
assert d.domain
assert d.on_exception
monkeypatch.setattr(decider.DeciderWorker, 'register', MagicMock())
d = decider.DeciderWorker(example)
assert d.register.called
monkeypatch.setattr(decider.DeciderWorker, 'register', MagicMock())
dec = decider.DeciderWorker(example, register=False)
assert not dec.register.called
def test_get_history(monkeypatch):
"""Test the decider history
"""
mock(monkeypatch)
from tests.fixtures.flows import example
events = decider_events.history.get('events')
half = int(len(events) / 2)
events = events[:half * 2]
pool_1 = events[half:]
pool_2 = events[:half]
d = decider.DeciderWorker(example)
d.poll = MagicMock(return_value={'events': pool_2})
resp = d.get_history({'events': pool_1, 'nextPageToken': 'nextPage'})
d.poll.assert_called_with(next_page_token='nextPage')
assert len(resp) == len([
evt for evt in events if evt['eventType'].startswith('Decision')])
def test_get_activity_states(monkeypatch):
"""Test get activity states from history.
"""
mock(monkeypatch)
from tests.fixtures.flows import example
events = decider_events.history.get('events')
d = decider.DeciderWorker(example)
history = d.get_history({'events': events})
states = d.get_activity_states(history)
for activity_name, activity_instances in states.items():
for activity_instance, activity_state in activity_instances.items():
assert isinstance(activity_state, activity.ActivityState)
def test_running_workflow(monkeypatch):
"""Test running a workflow.
"""
mock(monkeypatch)
from tests.fixtures.flows import example
d = decider.DeciderWorker(example)
d.poll = MagicMock(return_value=decider_events.history)
d.complete = MagicMock()
d.create_decisions_from_flow = MagicMock()
# Via flow.
d.run()
assert d.create_decisions_from_flow.called
# Via custom decider
spy = MagicMock()
def custom_decider(schedule):
spy()
example.decider = custom_decider
d.run()
assert spy.called
def test_running_workflow_without_events(monkeypatch):
"""Test running a workflow without having any events.
"""
mock(monkeypatch)
from tests.fixtures.flows import example
d = decider.DeciderWorker(example)
d.poll = MagicMock(return_value={})
d.get_history = MagicMock()
d.run()
assert not d.get_history.called
def test_schedule_context():
"""Test the schedule context.
"""
context = decider.ScheduleContext()
assert context.completed
context.mark_uncompleted()
assert not context.completed
def test_schedule_with_unscheduled_activity(monkeypatch):
"""Test the scheduling of an activity.
"""
mock(monkeypatch)
from tests.fixtures.flows import example
monkeypatch.setattr(decider, 'schedule_activity_task', MagicMock())
decisions = MagicMock()
schedule_context = decider.ScheduleContext()
history = {}
current_activity = example.activity_1
decider.schedule(
decisions, schedule_context, history, {}, 'schedule_id',
current_activity)
assert decider.schedule_activity_task.called
assert not schedule_context.completed
def test_schedule_with_scheduled_activity(monkeypatch):
"""Test the scheduling of an activity.
"""
mock(monkeypatch)
from tests.fixtures.flows import example
monkeypatch.setattr(decider, 'schedule_activity_task', MagicMock())
decisions = MagicMock()
schedule_context = decider.ScheduleContext()
instance_state = activity.ActivityState('activity_1')
instance_state.add_state(activity.ACTIVITY_SCHEDULED)
current_activity = example.activity_1
history = {
current_activity.name: {
'workflow_name_activity_1-1-schedule_id': instance_state
}
}
resp = decider.schedule(
decisions, schedule_context, history, {}, 'schedule_id',
current_activity)
assert not decider.schedule_activity_task.called
assert not schedule_context.completed
assert resp.get_last_state() == activity.ACTIVITY_SCHEDULED
with pytest.raises(activity.ActivityInstanceNotReadyException):
resp.result.get('foo')
def test_schedule_with_completed_activity(monkeypatch):
"""Test the scheduling of an activity.
"""
mock(monkeypatch)
from tests.fixtures.flows import example
monkeypatch.setattr(decider, 'schedule_activity_task', MagicMock())
decisions = MagicMock()
schedule_context = decider.ScheduleContext()
instance_state = activity.ActivityState('activity_1')
instance_state.add_state(activity.ACTIVITY_COMPLETED)
current_activity = example.activity_1
history = {
current_activity.name: {
'workflow_name_activity_1-1-schedule_id': instance_state
}
}
resp = decider.schedule(
decisions, schedule_context, history, {}, 'schedule_id',
current_activity)
assert not decider.schedule_activity_task.called
assert resp.get_last_state() == activity.ACTIVITY_COMPLETED
assert schedule_context.completed
resp.result.get('foo')
def test_schedule_requires_with_incomplete_activities():
"""Test the scheduler.
"""
activity_state = activity.ActivityState('activity_name')
with pytest.raises(activity.ActivityInstanceNotReadyException):
decider.ensure_requirements([activity_state])
with pytest.raises(activity.ActivityInstanceNotReadyException):
decider.ensure_requirements([None])
activity_state.add_state(activity.ACTIVITY_COMPLETED)
decider.ensure_requirements(requires=[activity_state])
def test_schedule_activity_task(monkeypatch):
"""Test scheduling an activity task.
"""
mock(monkeypatch)
from tests.fixtures.flows import example
instance = list(example.activity_1.instances({}))[0]
decisions = MagicMock()
decider.schedule_activity_task(decisions, instance)
decisions.schedule_activity_task.assert_called_with(
instance.id,
instance.activity_name,
'1.0',
task_list=instance.activity_worker.task_list,
input=json.dumps(instance.create_execution_input()),
heartbeat_timeout=str(instance.heartbeat_timeout),
start_to_close_timeout=str(instance.timeout),
schedule_to_start_timeout=str(instance.schedule_to_start),
schedule_to_close_timeout=str(instance.schedule_to_close))
def test_schedule_activity_task_with_version(monkeypatch):
"""Test scheduling an activity task with a version.
"""
mock(monkeypatch)
from tests.fixtures.flows import example
instance = list(example.activity_1.instances({}))[0]
decisions = MagicMock()
version = '2.0'
decider.schedule_activity_task(decisions, instance, version=version)
decisions.schedule_activity_task.assert_called_with(
instance.id,
instance.activity_name,
version,
task_list=instance.activity_worker.task_list,
input=json.dumps(instance.create_execution_input()),
heartbeat_timeout=str(instance.heartbeat_timeout),
start_to_close_timeout=str(instance.timeout),
schedule_to_start_timeout=str(instance.schedule_to_start),
schedule_to_close_timeout=str(instance.schedule_to_close))
def test_schedule_activity_task_with_version(monkeypatch):
"""Test scheduling an activity task with a custom id.
"""
mock(monkeypatch)
from tests.fixtures.flows import example
instance = list(example.activity_1.instances({}))[0]
decisions = MagicMock()
custom_id = 'special_id'
decider.schedule_activity_task(decisions, instance, id=custom_id)
decisions.schedule_activity_task.assert_called_with(
custom_id,
instance.activity_name,
'1.0',
task_list=instance.activity_worker.task_list,
input=json.dumps(instance.create_execution_input()),
heartbeat_timeout=str(instance.heartbeat_timeout),
start_to_close_timeout=str(instance.timeout),
schedule_to_start_timeout=str(instance.schedule_to_start),
schedule_to_close_timeout=str(instance.schedule_to_close))
| mit | 1,234,588,957,725,519,400 | 29.169435 | 78 | 0.695078 | false |
seikichi/tuitwi | tuitwi/updater.py | 1 | 10173 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import curses
import tweepy
import tweepy.parsers
import threading
import string
import re
import locale
import const
# 更新処理のスレッドと、一定の間隔でそれに更新を命じるスレッド
# queueへ入れる命令形式は
# 更新: ('GetFriendsTimeline',)
# 投稿: ('PostUpdate', text, reply_id)
# のように, [0]がpython-twitterのメソッド名
# 以降がargs (ただしUpdate系のsince_idはこちらが持つ)
# 例外としては ('Quit',) これは終了を指示する
class Updater(threading.Thread):
''' 一定の間隔でジョブキューに更新処理を入れる '''
def __init__(self, queue, conf):
self._queue = queue
self._stopevent = threading.Event()
self._sleepperiod = conf.get('options', {}).get('update_interval', 60)
self._dm_reply_interval = conf.get('options', {}).get('reply_check_interval', 20)
self._count = self._dm_reply_interval
threading.Thread.__init__(self)
def run(self):
'''メインループ'''
while not self._stopevent.isSet():
# 更新処理を投げる
self._queue.put(("GetFriendsTimeline",))
self._count += 1
if self._count >= self._dm_reply_interval:
# 一定の間隔を越えれば、DMとreplyの更新を投げる
self._count = 0
# self._queue.put(("GetDirectMessages",))
self._queue.put(("GetReplies",))
self._stopevent.wait(self._sleepperiod)
def join(self, timeout=None):
'''スレッドを停止して終了を待つ'''
self._stopevent.set()
threading.Thread.join(self, timeout)
class TwitterCommunicator(threading.Thread):
'''ジョブキューをもとに、更新処理などを行なう.'''
def __init__(self, queue, form, lock, conf):
self._queue = queue
self._form = form
self._lock = lock
self._conf = conf
self._stopevent = threading.Event()
self._since_id = 0
self._rpl_since_id = 0
self._dm_since_id = 0
tkn = tweepy.oauth.OAuthToken(self._conf['access_token']['key'],
self._conf['access_token']['secret'])
oauth_auth = tweepy.OAuthHandler(const.CONSUMER_KEY, const.CONSUMER_SECRET)
oauth_auth.access_token = tkn
self._api = tweepy.API(oauth_auth)
self._funcs = {}
self._funcs['GetFriendsTimeline'] = self._GetFriendsTimeline
self._funcs['GetDirectMessages'] = self._GetDirectMessages
self._funcs['GetReplies'] = self._GetReplies
self._funcs['PostUpdate'] = self._PostUpdate
self._funcs['DestroyStatus'] = self._DestroyStatus
self._funcs['CreateFavorite'] = self._CreateFavorite
self._funcs['DestroyFavorite'] = self._DestroyFavorite
self._funcs['OfficialRT'] = self._OfficialRT
self._funcs['Quit'] = self._Quit
threading.Thread.__init__(self)
def run(self):
while not self._stopevent.isSet():
job = self._queue.get()
self._funcs[job[0]](job[1:])
def join(self, timeout=None):
'''スレッドを停止して終了を待つ'''
self._stopevent.set()
# stopeventをsetした場合、あとは一回でもgetが呼ばれれば、
# その後終了する。queueが空の場合のため、空の命令を送っておく
self._queue.put(("Quit",))
threading.Thread.join(self, timeout)
def _translateTimeline(self, timeline):
'''改行を空白に変更したり、CP932とかの問題を解決する'''
def translate(text):
text = re.sub(u'('+u'|'.join(string.whitespace)+u')',
u' ',
text)
text = text.replace(u'<', u'<')
text = text.replace(u'>', u'>')
text = text.replace(u'\u2015', u'\u2014')
text = text.replace(u'\uff5e', u'\u301c')
text = text.replace(u'\uff0d', u'\u2212')
text = text.replace(u'\u2225', u'\u2016')
text = text.replace(u'\uffe2', u'\u00ac')
text = text.replace(u'\uffe1', u'\u00a3')
text = text.replace(u'\uffe0', u'\u00a2')
return text
for status in timeline:
status.text = translate(status.text)
status.user.name = translate(status.user.name)
# 以下更新系関数
# 流れとしては 1.tryの中で更新 2.ロック取得, tryの中で更新 3.ロックの開放
def _GetFriendsTimeline(self, args):
'''TLを取得する'''
try:
if self._since_id:
timeline = self._api.friends_timeline(since_id = self._since_id)
else:
timeline = self._api.friends_timeline(count=200)
msg = u'TLの取得に成功しました'
except Exception, e:
msg = u'TLの取得に失敗しました'
timeline = []
self._translateTimeline(timeline)
self._lock.acquire()
try:
if timeline:
self._form.controls['view_tab'].update_timeline(timeline)
self._since_id = timeline[0].id
self._form.controls['status_line'].text = msg
self._form.draw()
curses.doupdate()
finally:
self._lock.release()
def _OfficialRT(self, args):
try:
self._api.retweet(id=args[0])
msg = u'公式RTに成功しました'
except Exception, e:
msg = u'公式RTに失敗しました'
self._lock.acquire()
try:
self._form.controls['status_line'].text = msg
self._form.draw()
curses.doupdate()
finally:
self._lock.release()
def _GetReplies(self, args):
'''Replyを取得する'''
try:
if self._rpl_since_id:
timeline = self._api.mentions(since_id=self._rpl_since_id)
else:
timeline = self._api.mentions()
msg = u'Replyの取得に成功しました'
except Exception, e:
msg = u'Replyの取得に失敗しました'
timeline = []
self._translateTimeline(timeline)
self._lock.acquire()
try:
if timeline:
self._form.controls['view_tab'].update_replies(timeline)
self._rpl_since_id = timeline[0].id
self._form.controls['status_line'].text = msg
self._form.draw()
curses.doupdate()
finally:
self._lock.release()
def _GetDirectMessages(self, args):
'''DMを取得する'''
try:
if self._dm_since_id:
timeline = self._api.direct_messages(since_id=self._dm_since_id)
else:
timeline = self._api.direct_messages()
msg = u'DMの取得に成功しました'
except Exception, e:
msg = u'DMの取得に失敗しました'
timeline = []
self._translateTimeline(timeline)
self._lock.acquire()
try:
if timeline:
self._form.controls['view_tab'].update_directmessages(timeline)
self._dm_since_id = timeline[0].id
self._form.controls['status_line'].text = msg
self._form.draw()
curses.doupdate()
finally:
self._lock.release()
def _PostUpdate(self, args):
'''発言する'''
text = args[0]
reply_id = args[1]
try:
status = self._api.update_status(text.encode('utf-8'), reply_id)
msg = u'Postに成功しました'
except Exception, e:
status = None
msg = u'Postに失敗しました'
self._lock.acquire()
try:
if status is not None:
timeline = [status]
self._translateTimeline(timeline)
self._form.controls['view_tab'].update_timeline(timeline)
self._form.controls['status_line'].text = msg
self._form.draw()
curses.doupdate()
finally:
self._lock.release()
def _CreateFavorite(self, args):
status = args[0]
try:
st = self._api.create_favorite(status.id)
msg = u'favに成功しました'
except Exception, e:
st = None
msg = u'favに失敗しました'
self._lock.acquire()
try:
if st is not None:
status.favorited = True
self._form.controls['status_line'].text = msg
self._form.draw()
curses.doupdate()
finally:
self._lock.release()
def _DestroyFavorite(self, args):
status = args[0]
try:
st = self._api.destroy_favorite(status.id)
msg = u'fav削除に成功しました'
except Exception, e:
st = None
msg = u'fav削除に失敗しました'
self._lock.acquire()
try:
if st is not None:
status.favorited = False
self._form.controls['status_line'].text = msg
self._form.draw()
curses.doupdate()
finally:
self._lock.release()
def _DestroyStatus(self, args):
'''削除する'''
deleted = False
try:
self._api.destroy_status(args[0])
msg = u'削除に成功しました'
deleted = True
except Exception, e:
msg = u'削除に失敗しました'
self._lock.acquire()
try:
if deleted:
for win in self._form.controls['view_tab'].wins:
win['win'].delete(args[0])
self._form.controls['status_line'].text = msg
self._form.draw()
curses.doupdate()
finally:
self._lock.release()
def _Quit(self, arg):
pass
| mit | -8,544,426,428,183,973,000 | 29.363934 | 89 | 0.527157 | false |
chromium/chromium | third_party/blink/tools/blinkpy/common/system/log_utils_unittest.py | 7 | 4838 | # Copyright (C) 2010 Chris Jerdonek ([email protected])
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import unittest
from blinkpy.common.system.log_testing import TestLogStream
from blinkpy.common.system.log_utils import configure_logging
class ConfigureLoggingTestBase(unittest.TestCase):
"""Base class for configure_logging() unit tests."""
def _logging_level(self):
raise Exception('Not implemented.')
def setUp(self):
log_stream = TestLogStream(self)
# Use a logger other than the root logger or one prefixed with
# "blinkpy." so as not to conflict with run_blinkpy_tests.py logging.
logger = logging.getLogger('unittest')
# Configure the test logger not to pass messages along to the
# root logger. This prevents test messages from being
# propagated to loggers used by run_blinkpy_tests.py logging (e.g.
# the root logger).
logger.propagate = False
logging_level = self._logging_level()
self._handlers = configure_logging(
logging_level=logging_level,
logger=logger,
stream=log_stream,
include_time=False)
self._log = logger
self._log_stream = log_stream
def tearDown(self):
"""Reset logging to its original state.
This method ensures that the logging configuration set up
for a unit test does not affect logging in other unit tests.
"""
logger = self._log
for handler in self._handlers:
logger.removeHandler(handler)
def _assert_log_messages(self, messages):
"""Assert that the logged messages equal the given messages."""
self._log_stream.assertMessages(messages)
class ConfigureLoggingTest(ConfigureLoggingTestBase):
"""Tests configure_logging() with the default logging level."""
def _logging_level(self):
return None
def test_info_message(self):
self._log.info('test message')
self._assert_log_messages(['test message\n'])
def test_debug_message(self):
self._log.debug('test message')
self._assert_log_messages([])
def test_below_threshold_message(self):
# We test the boundary case of a logging level equal to 19.
# In practice, we will probably only be calling log.debug(),
# which corresponds to a logging level of 10.
level = logging.INFO - 1 # Equals 19.
self._log.log(level, 'test message')
self._assert_log_messages([])
def test_two_messages(self):
self._log.info('message1')
self._log.info('message2')
self._assert_log_messages(['message1\n', 'message2\n'])
class ConfigureLoggingVerboseTest(ConfigureLoggingTestBase):
def _logging_level(self):
return logging.DEBUG
def test_info_message(self):
self._log.info('test message')
self._assert_log_messages(['unittest: [INFO] test message\n'])
def test_debug_message(self):
self._log.debug('test message')
self._assert_log_messages(['unittest: [DEBUG] test message\n'])
class ConfigureLoggingCustomLevelTest(ConfigureLoggingTestBase):
"""Tests configure_logging() with a custom logging level."""
_level = 36
def _logging_level(self):
return self._level
def test_logged_message(self):
self._log.log(self._level, 'test message')
self._assert_log_messages(['test message\n'])
def test_below_threshold_message(self):
self._log.log(self._level - 1, 'test message')
self._assert_log_messages([])
| bsd-3-clause | 5,451,139,174,148,286,000 | 36.796875 | 79 | 0.684787 | false |
bfelbo/deepmoji | scripts/analyze_all_results.py | 2 | 1221 | from __future__ import print_function
# allow us to import the codebase/keras directory
import sys
import glob
import numpy as np
from os.path import dirname, abspath
sys.path.insert(0, dirname(dirname(abspath(__file__))))
DATASETS = ['SE0714', 'Olympic', 'PsychExp', 'SS-Twitter', 'SS-Youtube',
'SCv1', 'SV2-GEN'] # 'SE1604' excluded due to Twitter's ToS
def get_results(dset):
METHOD = 'last'
RESULTS_DIR = 'results/'
RESULT_PATHS = glob.glob('{}/{}_{}_*_results.txt'.format(RESULTS_DIR, dset, METHOD))
assert len(RESULT_PATHS)
scores = []
for path in RESULT_PATHS:
with open(path) as f:
score = f.readline().split(':')[1]
scores.append(float(score))
average = np.mean(scores)
maximum = max(scores)
minimum = min(scores)
std = np.std(scores)
print('Dataset: {}'.format(dset))
print('Method: {}'.format(METHOD))
print('Number of results: {}'.format(len(scores)))
print('--------------------------')
print('Average: {}'.format(average))
print('Maximum: {}'.format(maximum))
print('Minimum: {}'.format(minimum))
print('Standard deviaton: {}'.format(std))
for dset in DATASETS:
get_results(dset)
| mit | 7,870,647,236,632,249,000 | 28.071429 | 88 | 0.611794 | false |
loop1024/pymo-global | android/pgs4a-0.9.6/python-install/lib/python2.7/test/test_marshal.py | 29 | 10569 | #!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
from test import test_support
import marshal
import sys
import unittest
import os
class IntTestCase(unittest.TestCase):
def test_ints(self):
# Test the full range of Python ints.
n = sys.maxint
while n:
for expected in (-n, n):
s = marshal.dumps(expected)
got = marshal.loads(s)
self.assertEqual(expected, got)
marshal.dump(expected, file(test_support.TESTFN, "wb"))
got = marshal.load(file(test_support.TESTFN, "rb"))
self.assertEqual(expected, got)
n = n >> 1
os.unlink(test_support.TESTFN)
def test_int64(self):
# Simulate int marshaling on a 64-bit box. This is most interesting if
# we're running the test on a 32-bit box, of course.
def to_little_endian_string(value, nbytes):
bytes = []
for i in range(nbytes):
bytes.append(chr(value & 0xff))
value >>= 8
return ''.join(bytes)
maxint64 = (1L << 63) - 1
minint64 = -maxint64-1
for base in maxint64, minint64, -maxint64, -(minint64 >> 1):
while base:
s = 'I' + to_little_endian_string(base, 8)
got = marshal.loads(s)
self.assertEqual(base, got)
if base == -1: # a fixed-point for shifting right 1
base = 0
else:
base >>= 1
def test_bool(self):
for b in (True, False):
new = marshal.loads(marshal.dumps(b))
self.assertEqual(b, new)
self.assertEqual(type(b), type(new))
marshal.dump(b, file(test_support.TESTFN, "wb"))
new = marshal.load(file(test_support.TESTFN, "rb"))
self.assertEqual(b, new)
self.assertEqual(type(b), type(new))
class FloatTestCase(unittest.TestCase):
def test_floats(self):
# Test a few floats
small = 1e-25
n = sys.maxint * 3.7e250
while n > small:
for expected in (-n, n):
f = float(expected)
s = marshal.dumps(f)
got = marshal.loads(s)
self.assertEqual(f, got)
marshal.dump(f, file(test_support.TESTFN, "wb"))
got = marshal.load(file(test_support.TESTFN, "rb"))
self.assertEqual(f, got)
n /= 123.4567
f = 0.0
s = marshal.dumps(f, 2)
got = marshal.loads(s)
self.assertEqual(f, got)
# and with version <= 1 (floats marshalled differently then)
s = marshal.dumps(f, 1)
got = marshal.loads(s)
self.assertEqual(f, got)
n = sys.maxint * 3.7e-250
while n < small:
for expected in (-n, n):
f = float(expected)
s = marshal.dumps(f)
got = marshal.loads(s)
self.assertEqual(f, got)
s = marshal.dumps(f, 1)
got = marshal.loads(s)
self.assertEqual(f, got)
marshal.dump(f, file(test_support.TESTFN, "wb"))
got = marshal.load(file(test_support.TESTFN, "rb"))
self.assertEqual(f, got)
marshal.dump(f, file(test_support.TESTFN, "wb"), 1)
got = marshal.load(file(test_support.TESTFN, "rb"))
self.assertEqual(f, got)
n *= 123.4567
os.unlink(test_support.TESTFN)
class StringTestCase(unittest.TestCase):
def test_unicode(self):
for s in [u"", u"Andrè Previn", u"abc", u" "*10000]:
new = marshal.loads(marshal.dumps(s))
self.assertEqual(s, new)
self.assertEqual(type(s), type(new))
marshal.dump(s, file(test_support.TESTFN, "wb"))
new = marshal.load(file(test_support.TESTFN, "rb"))
self.assertEqual(s, new)
self.assertEqual(type(s), type(new))
os.unlink(test_support.TESTFN)
def test_string(self):
for s in ["", "Andrè Previn", "abc", " "*10000]:
new = marshal.loads(marshal.dumps(s))
self.assertEqual(s, new)
self.assertEqual(type(s), type(new))
marshal.dump(s, file(test_support.TESTFN, "wb"))
new = marshal.load(file(test_support.TESTFN, "rb"))
self.assertEqual(s, new)
self.assertEqual(type(s), type(new))
os.unlink(test_support.TESTFN)
def test_buffer(self):
for s in ["", "Andrè Previn", "abc", " "*10000]:
with test_support.check_py3k_warnings(("buffer.. not supported",
DeprecationWarning)):
b = buffer(s)
new = marshal.loads(marshal.dumps(b))
self.assertEqual(s, new)
marshal.dump(b, file(test_support.TESTFN, "wb"))
new = marshal.load(file(test_support.TESTFN, "rb"))
self.assertEqual(s, new)
os.unlink(test_support.TESTFN)
class ExceptionTestCase(unittest.TestCase):
def test_exceptions(self):
new = marshal.loads(marshal.dumps(StopIteration))
self.assertEqual(StopIteration, new)
class CodeTestCase(unittest.TestCase):
def test_code(self):
co = ExceptionTestCase.test_exceptions.func_code
new = marshal.loads(marshal.dumps(co))
self.assertEqual(co, new)
class ContainerTestCase(unittest.TestCase):
d = {'astring': '[email protected]',
'afloat': 7283.43,
'anint': 2**20,
'ashortlong': 2L,
'alist': ['.zyx.41'],
'atuple': ('.zyx.41',)*10,
'aboolean': False,
'aunicode': u"Andrè Previn"
}
def test_dict(self):
new = marshal.loads(marshal.dumps(self.d))
self.assertEqual(self.d, new)
marshal.dump(self.d, file(test_support.TESTFN, "wb"))
new = marshal.load(file(test_support.TESTFN, "rb"))
self.assertEqual(self.d, new)
os.unlink(test_support.TESTFN)
def test_list(self):
lst = self.d.items()
new = marshal.loads(marshal.dumps(lst))
self.assertEqual(lst, new)
marshal.dump(lst, file(test_support.TESTFN, "wb"))
new = marshal.load(file(test_support.TESTFN, "rb"))
self.assertEqual(lst, new)
os.unlink(test_support.TESTFN)
def test_tuple(self):
t = tuple(self.d.keys())
new = marshal.loads(marshal.dumps(t))
self.assertEqual(t, new)
marshal.dump(t, file(test_support.TESTFN, "wb"))
new = marshal.load(file(test_support.TESTFN, "rb"))
self.assertEqual(t, new)
os.unlink(test_support.TESTFN)
def test_sets(self):
for constructor in (set, frozenset):
t = constructor(self.d.keys())
new = marshal.loads(marshal.dumps(t))
self.assertEqual(t, new)
self.assertTrue(isinstance(new, constructor))
self.assertNotEqual(id(t), id(new))
marshal.dump(t, file(test_support.TESTFN, "wb"))
new = marshal.load(file(test_support.TESTFN, "rb"))
self.assertEqual(t, new)
os.unlink(test_support.TESTFN)
class BugsTestCase(unittest.TestCase):
def test_bug_5888452(self):
# Simple-minded check for SF 588452: Debug build crashes
marshal.dumps([128] * 1000)
def test_patch_873224(self):
self.assertRaises(Exception, marshal.loads, '0')
self.assertRaises(Exception, marshal.loads, 'f')
self.assertRaises(Exception, marshal.loads, marshal.dumps(5L)[:-1])
def test_version_argument(self):
# Python 2.4.0 crashes for any call to marshal.dumps(x, y)
self.assertEqual(marshal.loads(marshal.dumps(5, 0)), 5)
self.assertEqual(marshal.loads(marshal.dumps(5, 1)), 5)
def test_fuzz(self):
# simple test that it's at least not *totally* trivial to
# crash from bad marshal data
for c in [chr(i) for i in range(256)]:
try:
marshal.loads(c)
except Exception:
pass
def test_loads_recursion(self):
s = 'c' + ('X' * 4*4) + '{' * 2**20
self.assertRaises(ValueError, marshal.loads, s)
def test_recursion_limit(self):
# Create a deeply nested structure.
head = last = []
# The max stack depth should match the value in Python/marshal.c.
MAX_MARSHAL_STACK_DEPTH = 2000
for i in range(MAX_MARSHAL_STACK_DEPTH - 2):
last.append([0])
last = last[-1]
# Verify we don't blow out the stack with dumps/load.
data = marshal.dumps(head)
new_head = marshal.loads(data)
# Don't use == to compare objects, it can exceed the recursion limit.
self.assertEqual(len(new_head), len(head))
self.assertEqual(len(new_head[0]), len(head[0]))
self.assertEqual(len(new_head[-1]), len(head[-1]))
last.append([0])
self.assertRaises(ValueError, marshal.dumps, head)
def test_exact_type_match(self):
# Former bug:
# >>> class Int(int): pass
# >>> type(loads(dumps(Int())))
# <type 'int'>
for typ in (int, long, float, complex, tuple, list, dict, set, frozenset):
# Note: str and unicode subclasses are not tested because they get handled
# by marshal's routines for objects supporting the buffer API.
subtyp = type('subtyp', (typ,), {})
self.assertRaises(ValueError, marshal.dumps, subtyp())
# Issue #1792 introduced a change in how marshal increases the size of its
# internal buffer; this test ensures that the new code is exercised.
def test_large_marshal(self):
size = int(1e6)
testString = 'abc' * size
marshal.dumps(testString)
def test_invalid_longs(self):
# Issue #7019: marshal.loads shouldn't produce unnormalized PyLongs
invalid_string = 'l\x02\x00\x00\x00\x00\x00\x00\x00'
self.assertRaises(ValueError, marshal.loads, invalid_string)
def test_main():
test_support.run_unittest(IntTestCase,
FloatTestCase,
StringTestCase,
CodeTestCase,
ContainerTestCase,
ExceptionTestCase,
BugsTestCase)
if __name__ == "__main__":
test_main()
| mit | 3,207,952,097,580,844,000 | 36.34629 | 86 | 0.554357 | false |
jainayush975/zulip | zerver/views/realm.py | 1 | 7084 | from __future__ import absolute_import
from typing import Any, Dict, Optional
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from zerver.decorator import require_realm_admin, to_non_negative_int
from zerver.lib.actions import (
do_set_realm_message_editing,
do_set_realm_authentication_methods,
do_set_realm_property,
)
from zerver.lib.i18n import get_available_language_codes
from zerver.lib.request import has_request_variables, REQ, JsonableError
from zerver.lib.response import json_success, json_error
from zerver.lib.validator import check_string, check_dict, check_bool
from zerver.models import UserProfile
@require_realm_admin
@has_request_variables
def update_realm(request, user_profile, name=REQ(validator=check_string, default=None),
description=REQ(validator=check_string, default=None),
restricted_to_domain=REQ(validator=check_bool, default=None),
invite_required=REQ(validator=check_bool, default=None),
invite_by_admins_only=REQ(validator=check_bool, default=None),
name_changes_disabled=REQ(validator=check_bool, default=None),
email_changes_disabled=REQ(validator=check_bool, default=None),
inline_image_preview=REQ(validator=check_bool, default=None),
inline_url_embed_preview=REQ(validator=check_bool, default=None),
create_stream_by_admins_only=REQ(validator=check_bool, default=None),
add_emoji_by_admins_only=REQ(validator=check_bool, default=None),
allow_message_editing=REQ(validator=check_bool, default=None),
message_content_edit_limit_seconds=REQ(converter=to_non_negative_int, default=None),
default_language=REQ(validator=check_string, default=None),
waiting_period_threshold=REQ(converter=to_non_negative_int, default=None),
authentication_methods=REQ(validator=check_dict([]), default=None)):
# type: (HttpRequest, UserProfile, Optional[str], Optional[str], Optional[bool], Optional[bool], Optional[bool], Optional[bool], Optional[bool], Optional[bool], Optional[bool], Optional[bool], Optional[bool], Optional[bool], Optional[int], Optional[str], Optional[int], Optional[dict]) -> HttpResponse
# Validation for default_language
if default_language is not None and default_language not in get_available_language_codes():
raise JsonableError(_("Invalid language '%s'" % (default_language,)))
realm = user_profile.realm
data = {} # type: Dict[str, Any]
if name is not None and realm.name != name:
do_set_realm_property(realm, 'name', name)
data['name'] = 'updated'
if description is not None and realm.description != description:
if len(description) > 100:
return json_error(_("Realm description cannot exceed 100 characters."))
do_set_realm_property(realm, 'description', description)
data['description'] = 'updated'
if restricted_to_domain is not None and realm.restricted_to_domain != restricted_to_domain:
do_set_realm_property(realm, 'restricted_to_domain', restricted_to_domain)
data['restricted_to_domain'] = restricted_to_domain
if invite_required is not None and realm.invite_required != invite_required:
do_set_realm_property(realm, 'invite_required', invite_required)
data['invite_required'] = invite_required
if invite_by_admins_only is not None and realm.invite_by_admins_only != invite_by_admins_only:
do_set_realm_property(realm, 'invite_by_admins_only', invite_by_admins_only)
data['invite_by_admins_only'] = invite_by_admins_only
if name_changes_disabled is not None and realm.name_changes_disabled != name_changes_disabled:
do_set_realm_property(realm, 'name_changes_disabled', name_changes_disabled)
data['name_changes_disabled'] = name_changes_disabled
if email_changes_disabled is not None and realm.email_changes_disabled != email_changes_disabled:
do_set_realm_property(realm, 'email_changes_disabled', email_changes_disabled)
data['email_changes_disabled'] = email_changes_disabled
if inline_image_preview is not None and realm.inline_image_preview != inline_image_preview:
do_set_realm_property(realm, 'inline_image_preview', inline_image_preview)
data['inline_image_preview'] = inline_image_preview
if inline_url_embed_preview is not None and realm.inline_url_embed_preview != inline_url_embed_preview:
do_set_realm_property(realm, 'inline_url_embed_preview', inline_url_embed_preview)
data['inline_url_embed_preview'] = inline_url_embed_preview
if authentication_methods is not None and realm.authentication_methods_dict() != authentication_methods:
if True not in list(authentication_methods.values()):
return json_error(_("At least one authentication method must be enabled."),
data={"reason": "no authentication"}, status=403)
else:
do_set_realm_authentication_methods(realm, authentication_methods)
data['authentication_methods'] = authentication_methods
if create_stream_by_admins_only is not None and realm.create_stream_by_admins_only != create_stream_by_admins_only:
do_set_realm_property(realm, 'create_stream_by_admins_only', create_stream_by_admins_only)
data['create_stream_by_admins_only'] = create_stream_by_admins_only
if add_emoji_by_admins_only is not None and realm.add_emoji_by_admins_only != add_emoji_by_admins_only:
do_set_realm_property(realm, 'add_emoji_by_admins_only', add_emoji_by_admins_only)
data['add_emoji_by_admins_only'] = add_emoji_by_admins_only
if (allow_message_editing is not None and realm.allow_message_editing != allow_message_editing) or \
(message_content_edit_limit_seconds is not None and
realm.message_content_edit_limit_seconds != message_content_edit_limit_seconds):
if allow_message_editing is None:
allow_message_editing = realm.allow_message_editing
if message_content_edit_limit_seconds is None:
message_content_edit_limit_seconds = realm.message_content_edit_limit_seconds
do_set_realm_message_editing(realm, allow_message_editing, message_content_edit_limit_seconds)
data['allow_message_editing'] = allow_message_editing
data['message_content_edit_limit_seconds'] = message_content_edit_limit_seconds
if default_language is not None and realm.default_language != default_language:
do_set_realm_property(realm, 'default_language', default_language)
data['default_language'] = default_language
if waiting_period_threshold is not None and realm.waiting_period_threshold != waiting_period_threshold:
do_set_realm_property(realm, 'waiting_period_threshold', waiting_period_threshold)
data['waiting_period_threshold'] = waiting_period_threshold
return json_success(data)
| apache-2.0 | 7,669,482,938,933,022,000 | 68.45098 | 305 | 0.704263 | false |
Brett55/moto | docs/conf.py | 12 | 9155 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Moto documentation build configuration file, created by
# sphinx-quickstart on Sun Jul 26 22:16:23 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Moto'
copyright = '2015, Steve Pulec'
author = 'Steve Pulec'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.4.10'
# The full version, including alpha/beta/rc tags.
release = '0.4.10'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Motodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Moto.tex', 'Moto Documentation',
'Steve Pulec', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'moto', 'Moto Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Moto', 'Moto Documentation',
author, 'Moto', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| apache-2.0 | -6,769,149,620,291,133,000 | 31.122807 | 79 | 0.705844 | false |
AICP/external_chromium_org | tools/memory_inspector/memory_inspector/core/native_heap.py | 17 | 1798 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from memory_inspector.core import stacktrace
from memory_inspector.core import symbol
class NativeHeap(object):
"""A snapshot of outstanding (i.e. not freed) native allocations.
This is typically obtained by calling |backends.Process|.DumpNativeHeap()
"""
def __init__(self):
self.allocations = []
self.stack_frames = {} # absolute_address (int) -> |stacktrace.Frame|.
def Add(self, allocation):
assert(isinstance(allocation, Allocation))
self.allocations += [allocation]
def GetStackFrame(self, absolute_addr):
assert(isinstance(absolute_addr, int))
stack_frame = self.stack_frames.get(absolute_addr)
if not stack_frame:
stack_frame = stacktrace.Frame(absolute_addr)
self.stack_frames[absolute_addr] = stack_frame
return stack_frame
def SymbolizeUsingSymbolDB(self, symbols):
assert(isinstance(symbols, symbol.Symbols))
for stack_frame in self.stack_frames.itervalues():
sym = symbols.Lookup(stack_frame.exec_file_rel_path, stack_frame.offset)
if sym:
stack_frame.SetSymbolInfo(sym)
class Allocation(object):
"""A Native allocation, modeled in a size*count fashion.
|count| is the number of identical stack_traces which performed the allocation
of |size| bytes.
"""
def __init__(self, size, count, stack_trace):
assert(isinstance(stack_trace, stacktrace.Stacktrace))
self.size = size # in bytes.
self.count = count
self.stack_trace = stack_trace
@property
def total_size(self):
return self.size * self.count
def __str__(self):
return '%d x %d : %s' % (self.count, self.size, self.stack_trace)
| bsd-3-clause | 4,053,535,745,794,699,000 | 30.54386 | 80 | 0.703003 | false |
ghjm/ansible | lib/ansible/vars/clean.py | 29 | 6423 | # Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils import six
from ansible.module_utils._text import to_text
from ansible.module_utils.common._collections_compat import MutableMapping, MutableSequence
from ansible.plugins.loader import connection_loader
from ansible.utils.display import Display
display = Display()
def module_response_deepcopy(v):
"""Function to create a deep copy of module response data
Designed to be used within the Ansible "engine" to improve performance
issues where ``copy.deepcopy`` was used previously, largely with CPU
and memory contention.
This only supports the following data types, and was designed to only
handle specific workloads:
* ``dict``
* ``list``
The data we pass here will come from a serialization such
as JSON, so we shouldn't have need for other data types such as
``set`` or ``tuple``.
Take note that this function should not be used extensively as a
replacement for ``deepcopy`` due to the naive way in which this
handles other data types.
Do not expect uses outside of those listed below to maintain
backwards compatibility, in case we need to extend this function
to handle our specific needs:
* ``ansible.executor.task_result.TaskResult.clean_copy``
* ``ansible.vars.clean.clean_facts``
* ``ansible.vars.namespace_facts``
"""
if isinstance(v, dict):
ret = v.copy()
items = six.iteritems(ret)
elif isinstance(v, list):
ret = v[:]
items = enumerate(ret)
else:
return v
for key, value in items:
if isinstance(value, (dict, list)):
ret[key] = module_response_deepcopy(value)
else:
ret[key] = value
return ret
def strip_internal_keys(dirty, exceptions=None):
# All keys starting with _ansible_ are internal, so change the 'dirty' mapping and remove them.
if exceptions is None:
exceptions = tuple()
if isinstance(dirty, MutableSequence):
for element in dirty:
if isinstance(element, (MutableMapping, MutableSequence)):
strip_internal_keys(element, exceptions=exceptions)
elif isinstance(dirty, MutableMapping):
# listify to avoid updating dict while iterating over it
for k in list(dirty.keys()):
if isinstance(k, six.string_types):
if k.startswith('_ansible_') and k not in exceptions:
del dirty[k]
continue
if isinstance(dirty[k], (MutableMapping, MutableSequence)):
strip_internal_keys(dirty[k], exceptions=exceptions)
else:
raise AnsibleError("Cannot strip invalid keys from %s" % type(dirty))
return dirty
def remove_internal_keys(data):
'''
More nuanced version of strip_internal_keys
'''
for key in list(data.keys()):
if (key.startswith('_ansible_') and key != '_ansible_parsed') or key in C.INTERNAL_RESULT_KEYS:
display.warning("Removed unexpected internal key in module return: %s = %s" % (key, data[key]))
del data[key]
# remove bad/empty internal keys
for key in ['warnings', 'deprecations']:
if key in data and not data[key]:
del data[key]
# cleanse fact values that are allowed from actions but not modules
for key in list(data.get('ansible_facts', {}).keys()):
if key.startswith('discovered_interpreter_') or key.startswith('ansible_discovered_interpreter_'):
del data['ansible_facts'][key]
def clean_facts(facts):
''' remove facts that can override internal keys or otherwise deemed unsafe '''
data = module_response_deepcopy(facts)
remove_keys = set()
fact_keys = set(data.keys())
# first we add all of our magic variable names to the set of
# keys we want to remove from facts
# NOTE: these will eventually disappear in favor of others below
for magic_var in C.MAGIC_VARIABLE_MAPPING:
remove_keys.update(fact_keys.intersection(C.MAGIC_VARIABLE_MAPPING[magic_var]))
# remove common connection vars
remove_keys.update(fact_keys.intersection(C.COMMON_CONNECTION_VARS))
# next we remove any connection plugin specific vars
for conn_path in connection_loader.all(path_only=True):
conn_name = os.path.splitext(os.path.basename(conn_path))[0]
re_key = re.compile('^ansible_%s_' % conn_name)
for fact_key in fact_keys:
# most lightweight VM or container tech creates devices with this pattern, this avoids filtering them out
if (re_key.match(fact_key) and not fact_key.endswith(('_bridge', '_gwbridge'))) or fact_key.startswith('ansible_become_'):
remove_keys.add(fact_key)
# remove some KNOWN keys
for hard in C.RESTRICTED_RESULT_KEYS + C.INTERNAL_RESULT_KEYS:
if hard in fact_keys:
remove_keys.add(hard)
# finally, we search for interpreter keys to remove
re_interp = re.compile('^ansible_.*_interpreter$')
for fact_key in fact_keys:
if re_interp.match(fact_key):
remove_keys.add(fact_key)
# then we remove them (except for ssh host keys)
for r_key in remove_keys:
if not r_key.startswith('ansible_ssh_host_key_'):
try:
r_val = to_text(data[r_key])
if len(r_val) > 24:
r_val = '%s ... %s' % (r_val[:13], r_val[-6:])
except Exception:
r_val = ' <failed to convert value to a string> '
display.warning("Removed restricted key from module data: %s = %s" % (r_key, r_val))
del data[r_key]
return strip_internal_keys(data)
def namespace_facts(facts):
''' return all facts inside 'ansible_facts' w/o an ansible_ prefix '''
deprefixed = {}
for k in facts:
if k.startswith('ansible_') and k not in ('ansible_local',):
deprefixed[k[8:]] = module_response_deepcopy(facts[k])
else:
deprefixed[k] = module_response_deepcopy(facts[k])
return {'ansible_facts': deprefixed}
| gpl-3.0 | 6,687,968,816,079,650,000 | 35.494318 | 134 | 0.649074 | false |
DimStar77/osc | tests/test_deletefiles.py | 15 | 8629 | import osc.core
import osc.oscerr
import os
from common import OscTestCase
FIXTURES_DIR = os.path.join(os.getcwd(), 'deletefile_fixtures')
def suite():
import unittest
return unittest.makeSuite(TestDeleteFiles)
class TestDeleteFiles(OscTestCase):
def _get_fixtures_dir(self):
return FIXTURES_DIR
def testSimpleRemove(self):
"""delete a file ('foo') from the wc"""
self._change_to_pkg('simple')
p = osc.core.Package('.')
ret = p.delete_file('foo')
self.__check_ret(ret, True, ' ')
self.assertFalse(os.path.exists('foo'))
self.assertTrue(os.path.exists(os.path.join('.osc', 'foo')))
self._check_deletelist('foo\n')
self._check_status(p, 'foo', 'D')
def testDeleteModified(self):
"""delete modified file ('nochange') from the wc (without force)"""
self._change_to_pkg('simple')
p = osc.core.Package('.')
ret = p.delete_file('nochange')
self.__check_ret(ret, False, 'M')
self.assertTrue(os.path.exists('nochange'))
self.assertTrue(os.path.exists(os.path.join('.osc', 'nochange')))
self.assertFalse(os.path.exists(os.path.join('.osc', '_to_be_deleted')))
self._check_status(p, 'nochange', 'M')
def testDeleteUnversioned(self):
"""delete an unversioned file ('toadd2') from the wc"""
self._change_to_pkg('simple')
p = osc.core.Package('.')
ret = p.delete_file('toadd2')
self.__check_ret(ret, False, '?')
self.assertTrue(os.path.exists('toadd2'))
self.assertFalse(os.path.exists(os.path.join('.osc', '_to_be_deleted')))
self._check_status(p, 'toadd2', '?')
def testDeleteAdded(self):
"""delete an added file ('toadd1') from the wc (without force)"""
self._change_to_pkg('simple')
p = osc.core.Package('.')
ret = p.delete_file('toadd1')
self.__check_ret(ret, False, 'A')
self.assertTrue(os.path.exists('toadd1'))
self.assertFalse(os.path.exists(os.path.join('.osc', '_to_be_deleted')))
self._check_status(p, 'toadd1', 'A')
def testDeleteReplaced(self):
"""delete an added file ('merge') from the wc (without force)"""
self._change_to_pkg('replace')
p = osc.core.Package('.')
ret = p.delete_file('merge')
self.__check_ret(ret, False, 'R')
self.assertTrue(os.path.exists('merge'))
self.assertFalse(os.path.exists(os.path.join('.osc', '_to_be_deleted')))
self._check_addlist('toadd1\nmerge\n')
self._check_status(p, 'merge', 'R')
def testDeleteConflict(self):
"""delete a file ('foo', state='C') from the wc (without force)"""
self._change_to_pkg('conflict')
p = osc.core.Package('.')
ret = p.delete_file('foo')
self.__check_ret(ret, False, 'C')
self.assertTrue(os.path.exists('foo'))
self.assertTrue(os.path.exists(os.path.join('.osc', 'foo')))
self.assertFalse(os.path.exists(os.path.join('.osc', '_to_be_deleted')))
self._check_conflictlist('foo\n')
self._check_status(p, 'foo', 'C')
def testDeleteModifiedForce(self):
"""force deletion modified file ('nochange') from wc"""
self._change_to_pkg('simple')
p = osc.core.Package('.')
ret = p.delete_file('nochange', force=True)
self.__check_ret(ret, True, 'M')
self.assertFalse(os.path.exists('nochange'))
self.assertTrue(os.path.exists(os.path.join('.osc', 'nochange')))
self._check_deletelist('nochange\n')
self._check_status(p, 'nochange', 'D')
def testDeleteUnversionedForce(self):
"""delete an unversioned file ('toadd2') from the wc (with force)"""
self._change_to_pkg('simple')
p = osc.core.Package('.')
ret = p.delete_file('toadd2', force=True)
self.__check_ret(ret, True, '?')
self.assertFalse(os.path.exists('toadd2'))
self.assertFalse(os.path.exists(os.path.join('.osc', '_to_be_deleted')))
self.assertRaises(osc.oscerr.OscIOError, p.status, 'toadd2')
def testDeleteAddedForce(self):
"""delete an added file ('toadd1') from the wc (with force)"""
self._change_to_pkg('simple')
p = osc.core.Package('.')
ret = p.delete_file('toadd1', force=True)
self.__check_ret(ret, True, 'A')
self.assertFalse(os.path.exists('toadd1'))
self.assertFalse(os.path.exists(os.path.join('.osc', '_to_be_deleted')))
self.assertFalse(os.path.exists(os.path.join('.osc', '_to_be_added')))
self.assertRaises(osc.oscerr.OscIOError, p.status, 'toadd1')
def testDeleteReplacedForce(self):
"""delete an added file ('merge') from the wc (with force)"""
self._change_to_pkg('replace')
p = osc.core.Package('.')
ret = p.delete_file('merge', force=True)
self.__check_ret(ret, True, 'R')
self.assertFalse(os.path.exists('merge'))
self.assertTrue(os.path.exists(os.path.join('.osc', 'merge')))
self._check_deletelist('merge\n')
self._check_addlist('toadd1\n')
self._check_status(p, 'merge', 'D')
def testDeleteConflictForce(self):
"""delete a file ('foo', state='C') from the wc (with force)"""
self._change_to_pkg('conflict')
p = osc.core.Package('.')
ret = p.delete_file('foo', force=True)
self.__check_ret(ret, True, 'C')
self.assertFalse(os.path.exists('foo'))
self.assertTrue(os.path.exists('foo.r2'))
self.assertTrue(os.path.exists('foo.mine'))
self.assertTrue(os.path.exists(os.path.join('.osc', 'foo')))
self._check_deletelist('foo\n')
self.assertFalse(os.path.exists(os.path.join('.osc', '_in_conflict')))
self._check_status(p, 'foo', 'D')
def testDeleteMultiple(self):
"""delete mutliple files from the wc"""
self._change_to_pkg('simple')
p = osc.core.Package('.')
ret = p.delete_file('foo')
self.__check_ret(ret, True, ' ')
ret = p.delete_file('merge')
self.__check_ret(ret, True, ' ')
self.assertFalse(os.path.exists('foo'))
self.assertFalse(os.path.exists('merge'))
self.assertTrue(os.path.exists(os.path.join('.osc', 'foo')))
self.assertTrue(os.path.exists(os.path.join('.osc', 'merge')))
self._check_deletelist('foo\nmerge\n')
def testDeleteAlreadyDeleted(self):
"""delete already deleted file from the wc"""
self._change_to_pkg('already_deleted')
p = osc.core.Package('.')
ret = p.delete_file('foo')
self.__check_ret(ret, True, 'D')
self.assertFalse(os.path.exists('foo'))
self.assertTrue(os.path.exists(os.path.join('.osc', 'foo')))
self._check_deletelist('foo\n')
self._check_status(p, 'foo', 'D')
def testDeleteAddedMissing(self):
"""
delete a file which was added to the wc and is removed again
(via a non osc command). It's current state is '!'
"""
self._change_to_pkg('delete')
p = osc.core.Package('.')
ret = p.delete_file('toadd1')
self.__check_ret(ret, True, '!')
self.assertFalse(os.path.exists('toadd1'))
self.assertFalse(os.path.exists(os.path.join('.osc', 'toadd1')))
self._check_deletelist('foo\n')
self.assertFalse(os.path.exists(os.path.join('.osc', '_to_be_added')))
def testDeleteSkippedLocalNotExistent(self):
"""
delete a skipped file: no local file with that name exists
"""
self._change_to_pkg('simple')
p = osc.core.Package('.')
ret = p.delete_file('skipped')
self.__check_ret(ret, False, 'S')
self.assertFalse(os.path.exists(os.path.join('.osc', '_to_be_deleted')))
def testDeleteSkippedLocalExistent(self):
"""
delete a skipped file: a local file with that name exists and will be deleted
(for instance _service:* files have status 'S' but a local files might exist)
"""
self._change_to_pkg('simple')
p = osc.core.Package('.')
ret = p.delete_file('skipped_exists')
self.__check_ret(ret, True, 'S')
self.assertFalse(os.path.exists('skipped_exists'))
self.assertFalse(os.path.exists(os.path.join('.osc', '_to_be_deleted')))
def __check_ret(self, ret, exp1, exp2):
self.assertTrue(len(ret) == 2)
self.assertTrue(ret[0] == exp1)
self.assertTrue(ret[1] == exp2)
if __name__ == '__main__':
import unittest
unittest.main()
| gpl-2.0 | -981,554,177,680,868,900 | 40.68599 | 85 | 0.588365 | false |
aferr/TemporalPartitioningMemCtl | src/cpu/testers/memtest/MemTest.py | 15 | 2921 | # Copyright (c) 2005-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
from MemObject import MemObject
from m5.params import *
from m5.proxy import *
class MemTest(MemObject):
type = 'MemTest'
max_loads = Param.Counter(0, "number of loads to execute")
atomic = Param.Bool(False, "Execute tester in atomic mode? (or timing)\n")
memory_size = Param.Int(65536, "memory size")
percent_dest_unaligned = Param.Percent(50,
"percent of copy dest address that are unaligned")
percent_reads = Param.Percent(65, "target read percentage")
issue_dmas = Param.Bool(False, "this memtester should issue dma requests")
percent_source_unaligned = Param.Percent(50,
"percent of copy source address that are unaligned")
percent_functional = Param.Percent(50, "percent of access that are functional")
percent_uncacheable = Param.Percent(10,
"target uncacheable percentage")
progress_interval = Param.Counter(1000000,
"progress report interval (in accesses)")
trace_addr = Param.Addr(0, "address to trace")
test = MasterPort("Port to the memory system to test")
functional = MasterPort("Port to the functional memory " \
"used for verification")
suppress_func_warnings = Param.Bool(False,
"suppress warnings when functional accesses fail.\n")
sys = Param.System(Parent.any, "System Parameter")
| bsd-3-clause | 3,171,767,599,402,096,000 | 50.245614 | 83 | 0.74632 | false |
chuckcoughlin/sarah-bella | robot/src/gpio_msgs/src/GPIOConfiguration.py | 1 | 5812 | #!/usr/bin/env python
#
# Configure the GPIO ports as IN or OUT appropriately.
# This should be called on startup.
#
import rospy
import RPi.GPIO as GPIO
from gpio_msgs.msg import GPIOState, GPIOPin
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
# Get warning if pins are set as outputs, but they are not.
def configure():
rospy.loginfo("Configuring GPIO pins ...")
chanlist = [3,5,8,10,13,15,19,21,22,23,24,26,29,32,33,35,36,37,38,40]
for channel in chanlist:
try:
GPIO.setup(channel,GPIO.IN)
rospy.loginfo("Setting GPIO ",channel," to IN")
except:
rospy.logwarn("ERROR: Setting GPIO ",channel," to IN")
chanlist = [7,11,12,16,18]
for channel in chanlist:
try:
GPIO.setup(channel,GPIO.OUT,initial=GPIO.LOW)
rospy.loginfo("Setting GPIO ",channel," to OUT")
except:
rospy.logwarn("ERROR: Setting GPIO ",channel," to OUT")
# Argument is a GPIOState.
def initialize(state):
count = state.PIN_COUNT
state.pins = definePins()
rospy.loginfo("Initialized state with %d pins"%(len(state.pins)))
return state.pins
def getMode(channel):
try:
mode = GPIO.gpio_function(channel)
if mode==GPIO.IN:
return "IN"
elif mode==GPIO.OUT:
return "OUT"
elif mode==GPIO.SPI:
return "SPI"
elif mode==GPIO.I2C:
return "I2C"
elif mode==GPIO.HARD_PWM:
return "HWE"
elif mode==GPIO.SERIAL:
return "SER"
elif mode==GPIO.UNKNOWN:
return "UNK"
else:
return "??"
except:
# Illegal on a Raspberry Pi
return "BAD"
def definePins():
pins = []
pin1 = GPIOPin()
pin1.label = "3.3V"
pin1.channel = 1
pin1.mode = "PWR"
pins.append(pin1)
pin2 = GPIOPin()
pin2.label = "5V"
pin2.channel = 2
pin2.mode = "PWR"
pins.append(pin2)
pin3 = GPIOPin()
pin3.label = "BCM 2"
pin3.channel = 3
pin3.mode = getMode(3)
pins.append(pin3)
pin4 = GPIOPin()
pin4.label = "5V"
pin4.channel = 4
pin4.mode = "PWR"
pins.append(pin4)
pin5 = GPIOPin()
pin5.label = "BCM 3"
pin5.channel = 5
pin5.mode = getMode(5)
pins.append(pin5)
pin6 = GPIOPin()
pin6.label = "GND"
pin6.channel = 6
pin6.mode = "GND"
pins.append(pin6)
pin7 = GPIOPin()
pin7.label = "BCM 4"
pin7.channel = 7
pin7.mode = getMode(7)
pins.append(pin7)
pin8 = GPIOPin()
pin8.label = "BCM 14"
pin8.channel = 8
pin8.mode = getMode(8)
pins.append(pin8)
pin9 = GPIOPin()
pin9.label = "GND"
pin9.channel = 9
pin9.mode = "GND"
pins.append(pin9)
pin10 = GPIOPin()
pin10.label = "BCM 15"
pin10.channel = 10
pin10.mode = getMode(10)
pins.append(pin10)
pin11 = GPIOPin()
pin11.label = "BCM 17"
pin11.channel = 11
pin11.mode = getMode(11)
pins.append(pin11)
pin12 = GPIOPin()
pin12.label = "BCM 18"
pin12.channel = 12
pin12.mode = getMode(12)
pins.append(pin12)
pin13 = GPIOPin()
pin13.label = "BCM 27"
pin13.mode = "PWR"
pin13.mode = getMode(13)
pins.append(pin13)
pin14 = GPIOPin()
pin14.label = "GND"
pin14.channel = 14
pin14.mode = "GND"
pins.append(pin14)
pin15 = GPIOPin()
pin15.label = "BCM 22"
pin15.channel = 15
pin15.mode = getMode(15)
pins.append(pin15)
pin16 = GPIOPin()
pin16.label = "BCM 23"
pin16.channel = 16
pin16.mode = getMode(16)
pins.append(pin16)
pin17 = GPIOPin()
pin17.label = "3.3V"
pin17.channel = 17
pin17.mode = "PWR"
pins.append(pin17)
pin18 = GPIOPin()
pin18.label = "BCM 24"
pin18.channel = 18
pin18.mode = getMode(18)
pins.append(pin18)
pin19 = GPIOPin()
pin19.label = "BCM 10"
pin19.channel = 19
pin19.mode = getMode(19)
pins.append(pin19)
pin20 = GPIOPin()
pin20.label = "GND"
pin20.channel = 20
pin20.mode = "GND"
pins.append(pin20)
pin21 = GPIOPin()
pin21.label = "BCM 9"
pin21.channel = 21
pin21.mode = getMode(21)
pins.append(pin21)
pin22 = GPIOPin()
pin22.label = "BCM 25"
pin22.channel = 22
pin22.mode = getMode(22)
pins.append(pin22)
pin23 = GPIOPin()
pin23.label = "BCM 11"
pin23.channel = 23
pin23.mode = getMode(23)
pins.append(pin23)
pin24 = GPIOPin()
pin24.label = "BCM 8"
pin24.channel = 24
pin24.mode = getMode(24)
pins.append(pin24)
pin25 = GPIOPin()
pin25.label = "GND"
pin25.channel = 25
pin25.mode = "GND"
pins.append(pin25)
pin26 = GPIOPin()
pin26.label = "BCM 7"
pin26.channel = 26
pin26.mode = getMode(26)
pins.append(pin26)
# Got error when tried to set this
pin27 = GPIOPin()
pin27.label = "BCM 0"
pin27.channel = 27
pin27.mode = getMode(27)
pins.append(pin27)
# Got error when tried to set this
pin28 = GPIOPin()
pin28.label = "BCM 1"
pin28.channel = 28
pin28.mode = getMode(28)
pins.append(pin28)
pin29 = GPIOPin()
pin29.label = "BCM 5"
pin29.channel = 29
pin29.mode = getMode(29)
pins.append(pin29)
pin30 = GPIOPin()
pin30.label = "GND"
pin30.channel = 30
pin30.mode = "GND"
pins.append(pin30)
pin31 = GPIOPin()
pin31.label = "BCM 6"
pin31.channel = 31
pin31.mode = getMode(31)
pins.append(pin31)
pin32 = GPIOPin()
pin32.label = "BCM 12"
pin32.channel = 32
pin32.mode = getMode(32)
pins.append(pin32)
pin33 = GPIOPin()
pin33.label = "BCM 13"
pin33.channel = 33
pin33.mode = getMode(33)
pins.append(pin33)
pin34 = GPIOPin()
pin34.label = "GND"
pin34.channel = 34
pin34.mode = "GND"
pins.append(pin34)
pin35 = GPIOPin()
pin35.label = "BCM 19"
pin35.channel = 35
pin35.mode = getMode(35)
pins.append(pin35)
pin36 = GPIOPin()
pin36.label = "BCM 16"
pin36.channel = 36
pin36.mode = getMode(36)
pins.append(pin36)
pin37 = GPIOPin()
pin37.label = "BCM 26"
pin37.channel = 37
pin37.mode = getMode(37)
pins.append(pin37)
pin38 = GPIOPin()
pin38.label = "BCM 20"
pin38.channel = 38
pin38.mode = getMode(38)
pins.append(pin38)
pin39 = GPIOPin()
pin39.label = "GND"
pin39.channel = 39
pin39.mode = "GND"
pins.append(pin39)
pin40= GPIOPin()
pin40.label = "BCM 21"
pin40.channel = 40
pin40.mode = getMode(40)
pins.append(pin40)
return pins
| mit | 3,764,982,497,583,951,400 | 17.931596 | 70 | 0.668273 | false |
image72/browserscope | static_mode/richtext_2.py | 9 | 58326 | (dp0
VOpera 9.7
p1
(dp2
S'summary_display'
p3
S'0/149'
p4
sS'total_runs'
p5
L2L
sS'summary_score'
p6
I0
sS'results'
p7
(dp8
S'unapply'
p9
(dp10
S'score'
p11
I0
sS'raw_score'
p12
I0
sS'display'
p13
S'0/26'
p14
ssS'apply'
p15
(dp16
g11
I0
sg12
I0
sg13
S'0/41'
p17
ssS'change'
p18
(dp19
g11
I0
sg12
I0
sg13
S'0/17'
p20
ssS'query'
p21
(dp22
g11
I0
sg12
I0
sg13
S'0/65'
p23
ssssVOpera 9.6
p24
(dp25
S'summary_display'
p26
S'0/149'
p27
sS'total_runs'
p28
L1L
sS'summary_score'
p29
I0
sS'results'
p30
(dp31
S'unapply'
p32
(dp33
S'score'
p34
I0
sS'raw_score'
p35
I0
sS'display'
p36
S'0/26'
p37
ssS'apply'
p38
(dp39
g34
I0
sg35
I0
sg36
S'0/41'
p40
ssS'change'
p41
(dp42
g34
I0
sg35
I0
sg36
S'0/17'
p43
ssS'query'
p44
(dp45
g34
I0
sg35
I0
sg36
S'0/65'
p46
ssssVFennec 1.0
p47
(dp48
S'summary_display'
p49
S'117/149'
p50
sS'total_runs'
p51
L6L
sS'summary_score'
p52
I79
sS'results'
p53
(dp54
S'unapply'
p55
(dp56
S'score'
p57
I65
sS'raw_score'
p58
I0
sS'display'
p59
S'17/26'
p60
ssS'apply'
p61
(dp62
g57
I83
sg58
I0
sg59
S'34/41'
p63
ssS'change'
p64
(dp65
g57
I41
sg58
I0
sg59
S'7/17'
p66
ssS'query'
p67
(dp68
g57
I91
sg58
I0
sg59
S'59/65'
p69
ssssVPlayStation 3
p70
(dp71
S'summary_display'
p72
S'0/149'
p73
sS'total_runs'
p74
L3L
sS'summary_score'
p75
I0
sS'results'
p76
(dp77
S'unapply'
p78
(dp79
S'score'
p80
I0
sS'raw_score'
p81
I0
sS'display'
p82
S'0/26'
p83
ssS'apply'
p84
(dp85
g80
I0
sg81
I0
sg82
S'0/41'
p86
ssS'change'
p87
(dp88
g80
I0
sg81
I0
sg82
S'0/17'
p89
ssS'query'
p90
(dp91
g80
I0
sg81
I0
sg82
S'0/65'
p92
ssssVMaxthon 0
p93
(dp94
S'summary_display'
p95
S'99/149'
p96
sS'total_runs'
p97
L1L
sS'summary_score'
p98
I66
sS'results'
p99
(dp100
S'unapply'
p101
(dp102
S'score'
p103
I54
sS'raw_score'
p104
I0
sS'display'
p105
S'14/26'
p106
ssS'apply'
p107
(dp108
g103
I59
sg104
I0
sg105
S'24/41'
p109
ssS'change'
p110
(dp111
g103
I29
sg104
I0
sg105
S'5/17'
p112
ssS'query'
p113
(dp114
g103
I86
sg104
I0
sg105
S'56/65'
p115
ssssVFirefox (Shiretoko) 3.5
p116
(dp117
S'summary_display'
p118
S'117/149'
p119
sS'total_runs'
p120
L176L
sS'summary_score'
p121
I79
sS'results'
p122
(dp123
S'unapply'
p124
(dp125
S'score'
p126
I65
sS'raw_score'
p127
I0
sS'display'
p128
S'17/26'
p129
ssS'apply'
p130
(dp131
g126
I83
sg127
I0
sg128
S'34/41'
p132
ssS'change'
p133
(dp134
g126
I41
sg127
I0
sg128
S'7/17'
p135
ssS'query'
p136
(dp137
g126
I91
sg127
I0
sg128
S'59/65'
p138
ssssVOpera 9.5
p139
(dp140
S'summary_display'
p141
S'0/149'
p142
sS'total_runs'
p143
L1L
sS'summary_score'
p144
I0
sS'results'
p145
(dp146
S'unapply'
p147
(dp148
S'score'
p149
I0
sS'raw_score'
p150
I0
sS'display'
p151
S'0/26'
p152
ssS'apply'
p153
(dp154
g149
I0
sg150
I0
sg151
S'0/41'
p155
ssS'change'
p156
(dp157
g149
I0
sg150
I0
sg151
S'0/17'
p158
ssS'query'
p159
(dp160
g149
I0
sg150
I0
sg151
S'0/65'
p161
ssssVIron 4.0
p162
(dp163
S'summary_display'
p164
S'129/149'
p165
sS'total_runs'
p166
L11L
sS'summary_score'
p167
I87
sS'results'
p168
(dp169
S'unapply'
p170
(dp171
S'score'
p172
I88
sS'raw_score'
p173
I0
sS'display'
p174
S'23/26'
p175
ssS'apply'
p176
(dp177
g172
I95
sg173
I0
sg174
S'39/41'
p178
ssS'change'
p179
(dp180
g172
I100
sg173
I0
sg174
S'17/17'
p181
ssS'query'
p182
(dp183
g172
I77
sg173
I0
sg174
S'50/65'
p184
ssssVOpera 9.62
p185
(dp186
S'summary_display'
p187
S'88/149'
p188
sS'total_runs'
p189
L1L
sS'summary_score'
p190
I59
sS'results'
p191
(dp192
S'unapply'
p193
(dp194
S'score'
p195
I50
sS'raw_score'
p196
I0
sS'display'
p197
S'13/26'
p198
ssS'apply'
p199
(dp200
g195
I63
sg196
I0
sg197
S'26/41'
p201
ssS'change'
p202
(dp203
g195
I41
sg196
I0
sg197
S'7/17'
p204
ssS'query'
p205
(dp206
g195
I65
sg196
I0
sg197
S'42/65'
p207
ssssVOpera 9.63
p208
(dp209
S'summary_display'
p210
S'88/149'
p211
sS'total_runs'
p212
L8L
sS'summary_score'
p213
I59
sS'results'
p214
(dp215
S'unapply'
p216
(dp217
S'score'
p218
I50
sS'raw_score'
p219
I0
sS'display'
p220
S'13/26'
p221
ssS'apply'
p222
(dp223
g218
I63
sg219
I0
sg220
S'26/41'
p224
ssS'change'
p225
(dp226
g218
I41
sg219
I0
sg220
S'7/17'
p227
ssS'query'
p228
(dp229
g218
I65
sg219
I0
sg220
S'42/65'
p230
ssssVFluid 0.9
p231
(dp232
S'summary_display'
p233
S'125/149'
p234
sS'total_runs'
p235
L1L
sS'summary_score'
p236
I84
sS'results'
p237
(dp238
S'unapply'
p239
(dp240
S'score'
p241
I73
sS'raw_score'
p242
I0
sS'display'
p243
S'19/26'
p244
ssS'apply'
p245
(dp246
g241
I95
sg242
I0
sg243
S'39/41'
p247
ssS'change'
p248
(dp249
g241
I100
sg242
I0
sg243
S'17/17'
p250
ssS'query'
p251
(dp252
g241
I77
sg242
I0
sg243
S'50/65'
p253
ssssVOpera 9.61
p254
(dp255
S'summary_display'
p256
S'128/149'
p257
sS'total_runs'
p258
L2L
sS'summary_score'
p259
I86
sS'results'
p260
(dp261
S'unapply'
p262
(dp263
S'score'
p264
I77
sS'raw_score'
p265
I0
sS'display'
p266
S'20/26'
p267
ssS'apply'
p268
(dp269
g264
I85
sg265
I0
sg266
S'35/41'
p270
ssS'change'
p271
(dp272
g264
I76
sg265
I0
sg266
S'13/17'
p273
ssS'query'
p274
(dp275
g264
I92
sg265
I0
sg266
S'60/65'
p276
ssssVOpera 9.64
p277
(dp278
S'summary_display'
p279
S'88/149'
p280
sS'total_runs'
p281
L30L
sS'summary_score'
p282
I59
sS'results'
p283
(dp284
S'unapply'
p285
(dp286
S'score'
p287
I50
sS'raw_score'
p288
I0
sS'display'
p289
S'13/26'
p290
ssS'apply'
p291
(dp292
g287
I63
sg288
I0
sg289
S'26/41'
p293
ssS'change'
p294
(dp295
g287
I41
sg288
I0
sg289
S'7/17'
p296
ssS'query'
p297
(dp298
g287
I65
sg288
I0
sg289
S'42/65'
p299
ssssViPhone 3.0
p300
(dp301
S'summary_display'
p302
S'118/149'
p303
sS'total_runs'
p304
L28L
sS'summary_score'
p305
I79
sS'results'
p306
(dp307
S'unapply'
p308
(dp309
S'score'
p310
I65
sS'raw_score'
p311
I0
sS'display'
p312
S'17/26'
p313
ssS'apply'
p314
(dp315
g310
I83
sg311
I0
sg312
S'34/41'
p316
ssS'change'
p317
(dp318
g310
I100
sg311
I0
sg312
S'17/17'
p319
ssS'query'
p320
(dp321
g310
I77
sg311
I0
sg312
S'50/65'
p322
ssssViPhone 3.1
p323
(dp324
S'summary_display'
p325
S'118/149'
p326
sS'total_runs'
p327
L80L
sS'summary_score'
p328
I79
sS'results'
p329
(dp330
S'unapply'
p331
(dp332
S'score'
p333
I65
sS'raw_score'
p334
I0
sS'display'
p335
S'17/26'
p336
ssS'apply'
p337
(dp338
g333
I83
sg334
I0
sg335
S'34/41'
p339
ssS'change'
p340
(dp341
g333
I100
sg334
I0
sg335
S'17/17'
p342
ssS'query'
p343
(dp344
g333
I77
sg334
I0
sg335
S'50/65'
p345
ssssVNokia 97
p346
(dp347
S'summary_display'
p348
S'100/149'
p349
sS'total_runs'
p350
L2L
sS'summary_score'
p351
I67
sS'results'
p352
(dp353
S'unapply'
p354
(dp355
S'score'
p356
I31
sS'raw_score'
p357
I0
sS'display'
p358
S'8/26'
p359
ssS'apply'
p360
(dp361
g356
I68
sg357
I0
sg358
S'28/41'
p362
ssS'change'
p363
(dp364
g356
I100
sg357
I0
sg358
S'17/17'
p365
ssS'query'
p366
(dp367
g356
I72
sg357
I0
sg358
S'47/65'
p368
ssssVIceweasel 3.5
p369
(dp370
S'summary_display'
p371
S'117/149'
p372
sS'total_runs'
p373
L17L
sS'summary_score'
p374
I79
sS'results'
p375
(dp376
S'unapply'
p377
(dp378
S'score'
p379
I65
sS'raw_score'
p380
I0
sS'display'
p381
S'17/26'
p382
ssS'apply'
p383
(dp384
g379
I83
sg380
I0
sg381
S'34/41'
p385
ssS'change'
p386
(dp387
g379
I41
sg380
I0
sg381
S'7/17'
p388
ssS'query'
p389
(dp390
g379
I91
sg380
I0
sg381
S'59/65'
p391
ssssVCamino 2.1
p392
(dp393
S'summary_display'
p394
S'112/149'
p395
sS'total_runs'
p396
L1L
sS'summary_score'
p397
I75
sS'results'
p398
(dp399
S'unapply'
p400
(dp401
S'score'
p402
I65
sS'raw_score'
p403
I0
sS'display'
p404
S'17/26'
p405
ssS'apply'
p406
(dp407
g402
I83
sg403
I0
sg404
S'34/41'
p408
ssS'change'
p409
(dp410
g402
I41
sg403
I0
sg404
S'7/17'
p411
ssS'query'
p412
(dp413
g402
I83
sg403
I0
sg404
S'54/65'
p414
ssssVCamino 2.0
p415
(dp416
S'summary_display'
p417
S'117/149'
p418
sS'total_runs'
p419
L5L
sS'summary_score'
p420
I79
sS'results'
p421
(dp422
S'unapply'
p423
(dp424
S'score'
p425
I65
sS'raw_score'
p426
I0
sS'display'
p427
S'17/26'
p428
ssS'apply'
p429
(dp430
g425
I83
sg426
I0
sg427
S'34/41'
p431
ssS'change'
p432
(dp433
g425
I41
sg426
I0
sg427
S'7/17'
p434
ssS'query'
p435
(dp436
g425
I91
sg426
I0
sg427
S'59/65'
p437
ssssVIceweasel 3.0
p438
(dp439
S'summary_display'
p440
S'117/149'
p441
sS'total_runs'
p442
L37L
sS'summary_score'
p443
I79
sS'results'
p444
(dp445
S'unapply'
p446
(dp447
S'score'
p448
I65
sS'raw_score'
p449
I0
sS'display'
p450
S'17/26'
p451
ssS'apply'
p452
(dp453
g448
I83
sg449
I0
sg450
S'34/41'
p454
ssS'change'
p455
(dp456
g448
I41
sg449
I0
sg450
S'7/17'
p457
ssS'query'
p458
(dp459
g448
I91
sg449
I0
sg450
S'59/65'
p460
ssssVFlock 1.2
p461
(dp462
S'summary_display'
p463
S'117/149'
p464
sS'total_runs'
p465
L1L
sS'summary_score'
p466
I79
sS'results'
p467
(dp468
S'unapply'
p469
(dp470
S'score'
p471
I65
sS'raw_score'
p472
I0
sS'display'
p473
S'17/26'
p474
ssS'apply'
p475
(dp476
g471
I83
sg472
I0
sg473
S'34/41'
p477
ssS'change'
p478
(dp479
g471
I41
sg472
I0
sg473
S'7/17'
p480
ssS'query'
p481
(dp482
g471
I91
sg472
I0
sg473
S'59/65'
p483
ssssVJasmine 1.0
p484
(dp485
S'summary_display'
p486
S'145/149'
p487
sS'total_runs'
p488
L2L
sS'summary_score'
p489
I97
sS'results'
p490
(dp491
S'unapply'
p492
(dp493
S'score'
p494
I88
sS'raw_score'
p495
I0
sS'display'
p496
S'23/26'
p497
ssS'apply'
p498
(dp499
g494
I98
sg495
I0
sg496
S'40/41'
p500
ssS'change'
p501
(dp502
g494
I100
sg495
I0
sg496
S'17/17'
p503
ssS'query'
p504
(dp505
g494
I100
sg495
I0
sg496
S'65/65'
p506
ssssVAvant 1
p507
(dp508
S'summary_display'
p509
S'96/149'
p510
sS'total_runs'
p511
L13L
sS'summary_score'
p512
I64
sS'results'
p513
(dp514
S'unapply'
p515
(dp516
S'score'
p517
I42
sS'raw_score'
p518
I0
sS'display'
p519
S'11/26'
p520
ssS'apply'
p521
(dp522
g517
I59
sg518
I0
sg519
S'24/41'
p523
ssS'change'
p524
(dp525
g517
I29
sg518
I0
sg519
S'5/17'
p526
ssS'query'
p527
(dp528
g517
I86
sg518
I0
sg519
S'56/65'
p529
ssssVChrome 3.0
p530
(dp531
S'summary_display'
p532
S'126/149'
p533
sS'total_runs'
p534
L555L
sS'summary_score'
p535
I85
sS'results'
p536
(dp537
S'unapply'
p538
(dp539
S'score'
p540
I77
sS'raw_score'
p541
I0
sS'display'
p542
S'20/26'
p543
ssS'apply'
p544
(dp545
g540
I95
sg541
I0
sg542
S'39/41'
p546
ssS'change'
p547
(dp548
g540
I100
sg541
I0
sg542
S'17/17'
p549
ssS'query'
p550
(dp551
g540
I77
sg541
I0
sg542
S'50/65'
p552
ssssViPhone 2.2
p553
(dp554
S'summary_display'
p555
S'104/149'
p556
sS'total_runs'
p557
L8L
sS'summary_score'
p558
I70
sS'results'
p559
(dp560
S'unapply'
p561
(dp562
S'score'
p563
I35
sS'raw_score'
p564
I0
sS'display'
p565
S'9/26'
p566
ssS'apply'
p567
(dp568
g563
I68
sg564
I0
sg565
S'28/41'
p569
ssS'change'
p570
(dp571
g563
I100
sg564
I0
sg565
S'17/17'
p572
ssS'query'
p573
(dp574
g563
I77
sg564
I0
sg565
S'50/65'
p575
ssssVFirefox 1.5
p576
(dp577
S'summary_display'
p578
S'117/149'
p579
sS'total_runs'
p580
L5L
sS'summary_score'
p581
I79
sS'results'
p582
(dp583
S'unapply'
p584
(dp585
S'score'
p586
I65
sS'raw_score'
p587
I0
sS'display'
p588
S'17/26'
p589
ssS'apply'
p590
(dp591
g586
I83
sg587
I0
sg588
S'34/41'
p592
ssS'change'
p593
(dp594
g586
I41
sg587
I0
sg588
S'7/17'
p595
ssS'query'
p596
(dp597
g586
I91
sg587
I0
sg588
S'59/65'
p598
ssssVChrome 1.0
p599
(dp600
S'summary_display'
p601
S'104/149'
p602
sS'total_runs'
p603
L10L
sS'summary_score'
p604
I70
sS'results'
p605
(dp606
S'unapply'
p607
(dp608
S'score'
p609
I35
sS'raw_score'
p610
I0
sS'display'
p611
S'9/26'
p612
ssS'apply'
p613
(dp614
g609
I68
sg610
I0
sg611
S'28/41'
p615
ssS'change'
p616
(dp617
g609
I100
sg610
I0
sg611
S'17/17'
p618
ssS'query'
p619
(dp620
g609
I77
sg610
I0
sg611
S'50/65'
p621
ssssVNetscape 8.1
p622
(dp623
S'summary_display'
p624
S'118/149'
p625
sS'total_runs'
p626
L1L
sS'summary_score'
p627
I79
sS'results'
p628
(dp629
S'unapply'
p630
(dp631
S'score'
p632
I65
sS'raw_score'
p633
I0
sS'display'
p634
S'17/26'
p635
ssS'apply'
p636
(dp637
g632
I85
sg633
I0
sg634
S'35/41'
p638
ssS'change'
p639
(dp640
g632
I41
sg633
I0
sg634
S'7/17'
p641
ssS'query'
p642
(dp643
g632
I91
sg633
I0
sg634
S'59/65'
p644
ssssVOpera 9.70
p645
(dp646
S'summary_display'
p647
S'88/149'
p648
sS'total_runs'
p649
L1L
sS'summary_score'
p650
I59
sS'results'
p651
(dp652
S'unapply'
p653
(dp654
S'score'
p655
I50
sS'raw_score'
p656
I0
sS'display'
p657
S'13/26'
p658
ssS'apply'
p659
(dp660
g655
I63
sg656
I0
sg657
S'26/41'
p661
ssS'change'
p662
(dp663
g655
I41
sg656
I0
sg657
S'7/17'
p664
ssS'query'
p665
(dp666
g655
I65
sg656
I0
sg657
S'42/65'
p667
ssssVFirefox 3.2
p668
(dp669
S'summary_display'
p670
S'117/149'
p671
sS'total_runs'
p672
L1L
sS'summary_score'
p673
I79
sS'results'
p674
(dp675
S'unapply'
p676
(dp677
S'score'
p678
I65
sS'raw_score'
p679
I0
sS'display'
p680
S'17/26'
p681
ssS'apply'
p682
(dp683
g678
I83
sg679
I0
sg680
S'34/41'
p684
ssS'change'
p685
(dp686
g678
I41
sg679
I0
sg680
S'7/17'
p687
ssS'query'
p688
(dp689
g678
I91
sg679
I0
sg680
S'59/65'
p690
ssssVOpera Mini 5.0
p691
(dp692
S'summary_display'
p693
S'26/149'
p694
sS'total_runs'
p695
L4L
sS'summary_score'
p696
I17
sS'results'
p697
(dp698
S'unapply'
p699
(dp700
S'score'
p701
I100
sS'raw_score'
p702
I0
sS'display'
p703
S'26/26'
p704
ssS'apply'
p705
(dp706
g701
I0
sg702
I0
sg703
S'0/41'
p707
ssS'change'
p708
(dp709
g701
I0
sg702
I0
sg703
S'0/17'
p710
ssS'query'
p711
(dp712
g701
I0
sg702
I0
sg703
S'0/65'
p713
ssssVSeaMonkey 2.1
p714
(dp715
S'summary_display'
p716
S'117/149'
p717
sS'total_runs'
p718
L1L
sS'summary_score'
p719
I79
sS'results'
p720
(dp721
S'unapply'
p722
(dp723
S'score'
p724
I65
sS'raw_score'
p725
I0
sS'display'
p726
S'17/26'
p727
ssS'apply'
p728
(dp729
g724
I83
sg725
I0
sg726
S'34/41'
p730
ssS'change'
p731
(dp732
g724
I41
sg725
I0
sg726
S'7/17'
p733
ssS'query'
p734
(dp735
g724
I91
sg725
I0
sg726
S'59/65'
p736
ssssVSeaMonkey 2.0
p737
(dp738
S'summary_display'
p739
S'117/149'
p740
sS'total_runs'
p741
L26L
sS'summary_score'
p742
I79
sS'results'
p743
(dp744
S'unapply'
p745
(dp746
S'score'
p747
I65
sS'raw_score'
p748
I0
sS'display'
p749
S'17/26'
p750
ssS'apply'
p751
(dp752
g747
I83
sg748
I0
sg749
S'34/41'
p753
ssS'change'
p754
(dp755
g747
I41
sg748
I0
sg749
S'7/17'
p756
ssS'query'
p757
(dp758
g747
I91
sg748
I0
sg749
S'59/65'
p759
ssssVFlock 2.0
p760
(dp761
S'summary_display'
p762
S'117/149'
p763
sS'total_runs'
p764
L3L
sS'summary_score'
p765
I79
sS'results'
p766
(dp767
S'unapply'
p768
(dp769
S'score'
p770
I65
sS'raw_score'
p771
I0
sS'display'
p772
S'17/26'
p773
ssS'apply'
p774
(dp775
g770
I83
sg771
I0
sg772
S'34/41'
p776
ssS'change'
p777
(dp778
g770
I41
sg771
I0
sg772
S'7/17'
p779
ssS'query'
p780
(dp781
g770
I91
sg771
I0
sg772
S'59/65'
p782
ssssVFlock 2.5
p783
(dp784
S'summary_display'
p785
S'117/149'
p786
sS'total_runs'
p787
L3L
sS'summary_score'
p788
I79
sS'results'
p789
(dp790
S'unapply'
p791
(dp792
S'score'
p793
I65
sS'raw_score'
p794
I0
sS'display'
p795
S'17/26'
p796
ssS'apply'
p797
(dp798
g793
I83
sg794
I0
sg795
S'34/41'
p799
ssS'change'
p800
(dp801
g793
I41
sg794
I0
sg795
S'7/17'
p802
ssS'query'
p803
(dp804
g793
I91
sg794
I0
sg795
S'59/65'
p805
ssssVIron 2.0
p806
(dp807
S'summary_display'
p808
S'125/149'
p809
sS'total_runs'
p810
L8L
sS'summary_score'
p811
I84
sS'results'
p812
(dp813
S'unapply'
p814
(dp815
S'score'
p816
I73
sS'raw_score'
p817
I0
sS'display'
p818
S'19/26'
p819
ssS'apply'
p820
(dp821
g816
I95
sg817
I0
sg818
S'39/41'
p822
ssS'change'
p823
(dp824
g816
I100
sg817
I0
sg818
S'17/17'
p825
ssS'query'
p826
(dp827
g816
I77
sg817
I0
sg818
S'50/65'
p828
ssssVIE 6.0
p829
(dp830
S'summary_display'
p831
S'99/149'
p832
sS'total_runs'
p833
L104L
sS'summary_score'
p834
I66
sS'results'
p835
(dp836
S'unapply'
p837
(dp838
S'score'
p839
I54
sS'raw_score'
p840
I0
sS'display'
p841
S'14/26'
p842
ssS'apply'
p843
(dp844
g839
I59
sg840
I0
sg841
S'24/41'
p845
ssS'change'
p846
(dp847
g839
I29
sg840
I0
sg841
S'5/17'
p848
ssS'query'
p849
(dp850
g839
I86
sg840
I0
sg841
S'56/65'
p851
ssssVLunascape 4.9
p852
(dp853
S'summary_display'
p854
S'111/149'
p855
sS'total_runs'
p856
L1L
sS'summary_score'
p857
I74
sS'results'
p858
(dp859
S'unapply'
p860
(dp861
S'score'
p862
I35
sS'raw_score'
p863
I0
sS'display'
p864
S'9/26'
p865
ssS'apply'
p866
(dp867
g862
I85
sg863
I0
sg864
S'35/41'
p868
ssS'change'
p869
(dp870
g862
I100
sg863
I0
sg864
S'17/17'
p871
ssS'query'
p872
(dp873
g862
I77
sg863
I0
sg864
S'50/65'
p874
ssssVSeaMonkey 1.1
p875
(dp876
S'summary_display'
p877
S'117/149'
p878
sS'total_runs'
p879
L9L
sS'summary_score'
p880
I79
sS'results'
p881
(dp882
S'unapply'
p883
(dp884
S'score'
p885
I65
sS'raw_score'
p886
I0
sS'display'
p887
S'17/26'
p888
ssS'apply'
p889
(dp890
g885
I83
sg886
I0
sg887
S'34/41'
p891
ssS'change'
p892
(dp893
g885
I41
sg886
I0
sg887
S'7/17'
p894
ssS'query'
p895
(dp896
g885
I91
sg886
I0
sg887
S'59/65'
p897
ssssVOpera Mini 4.2
p898
(dp899
S'summary_display'
p900
S'26/149'
p901
sS'total_runs'
p902
L18L
sS'summary_score'
p903
I17
sS'results'
p904
(dp905
S'unapply'
p906
(dp907
S'score'
p908
I100
sS'raw_score'
p909
I0
sS'display'
p910
S'26/26'
p911
ssS'apply'
p912
(dp913
g908
I0
sg909
I0
sg910
S'0/41'
p914
ssS'change'
p915
(dp916
g908
I0
sg909
I0
sg910
S'0/17'
p917
ssS'query'
p918
(dp919
g908
I0
sg909
I0
sg910
S'0/65'
p920
ssssVOpera Mini 4.0
p921
(dp922
S'summary_display'
p923
S'0/149'
p924
sS'total_runs'
p925
L3L
sS'summary_score'
p926
I0
sS'results'
p927
(dp928
S'unapply'
p929
(dp930
S'score'
p931
I0
sS'raw_score'
p932
I0
sS'display'
p933
S'0/26'
p934
ssS'apply'
p935
(dp936
g931
I0
sg932
I0
sg933
S'0/41'
p937
ssS'change'
p938
(dp939
g931
I0
sg932
I0
sg933
S'0/17'
p940
ssS'query'
p941
(dp942
g931
I0
sg932
I0
sg933
S'0/65'
p943
ssssVKonqueror 4.3
p944
(dp945
S'summary_display'
p946
S'0/149'
p947
sS'total_runs'
p948
L25L
sS'summary_score'
p949
I0
sS'results'
p950
(dp951
S'unapply'
p952
(dp953
S'score'
p954
I0
sS'raw_score'
p955
I0
sS'display'
p956
S'0/26'
p957
ssS'apply'
p958
(dp959
g954
I0
sg955
I0
sg956
S'0/41'
p960
ssS'change'
p961
(dp962
g954
I0
sg955
I0
sg956
S'0/17'
p963
ssS'query'
p964
(dp965
g954
I0
sg955
I0
sg956
S'0/65'
p966
ssssVKonqueror 4.2
p967
(dp968
S'summary_display'
p969
S'0/149'
p970
sS'total_runs'
p971
L2L
sS'summary_score'
p972
I0
sS'results'
p973
(dp974
S'unapply'
p975
(dp976
S'score'
p977
I0
sS'raw_score'
p978
I0
sS'display'
p979
S'0/26'
p980
ssS'apply'
p981
(dp982
g977
I0
sg978
I0
sg979
S'0/41'
p983
ssS'change'
p984
(dp985
g977
I0
sg978
I0
sg979
S'0/17'
p986
ssS'query'
p987
(dp988
g977
I0
sg978
I0
sg979
S'0/65'
p989
ssssVFirefox (Namoroka) 3.6
p990
(dp991
S'summary_display'
p992
S'117/149'
p993
sS'total_runs'
p994
L47L
sS'summary_score'
p995
I79
sS'results'
p996
(dp997
S'unapply'
p998
(dp999
S'score'
p1000
I65
sS'raw_score'
p1001
I0
sS'display'
p1002
S'17/26'
p1003
ssS'apply'
p1004
(dp1005
g1000
I83
sg1001
I0
sg1002
S'34/41'
p1006
ssS'change'
p1007
(dp1008
g1000
I41
sg1001
I0
sg1002
S'7/17'
p1009
ssS'query'
p1010
(dp1011
g1000
I91
sg1001
I0
sg1002
S'59/65'
p1012
ssssVIE 4.0
p1013
(dp1014
S'summary_display'
p1015
S'64/149'
p1016
sS'total_runs'
p1017
L1L
sS'summary_score'
p1018
I43
sS'results'
p1019
(dp1020
S'unapply'
p1021
(dp1022
S'score'
p1023
I4
sS'raw_score'
p1024
I0
sS'display'
p1025
S'1/26'
p1026
ssS'apply'
p1027
(dp1028
g1023
I71
sg1024
I0
sg1025
S'29/41'
p1029
ssS'change'
p1030
(dp1031
g1023
I0
sg1024
I0
sg1025
S'0/17'
p1032
ssS'query'
p1033
(dp1034
g1023
I52
sg1024
I0
sg1025
S'34/65'
p1035
ssssVKazehakase 0.5
p1036
(dp1037
S'summary_display'
p1038
S'117/149'
p1039
sS'total_runs'
p1040
L1L
sS'summary_score'
p1041
I79
sS'results'
p1042
(dp1043
S'unapply'
p1044
(dp1045
S'score'
p1046
I65
sS'raw_score'
p1047
I0
sS'display'
p1048
S'17/26'
p1049
ssS'apply'
p1050
(dp1051
g1046
I83
sg1047
I0
sg1048
S'34/41'
p1052
ssS'change'
p1053
(dp1054
g1046
I41
sg1047
I0
sg1048
S'7/17'
p1055
ssS'query'
p1056
(dp1057
g1046
I91
sg1047
I0
sg1048
S'59/65'
p1058
ssssVLunascape 6.0
p1059
(dp1060
S'summary_display'
p1061
S'117/149'
p1062
sS'total_runs'
p1063
L11L
sS'summary_score'
p1064
I79
sS'results'
p1065
(dp1066
S'unapply'
p1067
(dp1068
S'score'
p1069
I65
sS'raw_score'
p1070
I0
sS'display'
p1071
S'17/26'
p1072
ssS'apply'
p1073
(dp1074
g1069
I83
sg1070
I0
sg1071
S'34/41'
p1075
ssS'change'
p1076
(dp1077
g1069
I41
sg1070
I0
sg1071
S'7/17'
p1078
ssS'query'
p1079
(dp1080
g1069
I91
sg1070
I0
sg1071
S'59/65'
p1081
ssssVOpera 10.01
p1082
(dp1083
S'summary_display'
p1084
S'89/149'
p1085
sS'total_runs'
p1086
L112L
sS'summary_score'
p1087
I60
sS'results'
p1088
(dp1089
S'unapply'
p1090
(dp1091
S'score'
p1092
I50
sS'raw_score'
p1093
I0
sS'display'
p1094
S'13/26'
p1095
ssS'apply'
p1096
(dp1097
g1092
I63
sg1093
I0
sg1094
S'26/41'
p1098
ssS'change'
p1099
(dp1100
g1092
I41
sg1093
I0
sg1094
S'7/17'
p1101
ssS'query'
p1102
(dp1103
g1092
I66
sg1093
I0
sg1094
S'43/65'
p1104
ssssVOpera 10.00
p1105
(dp1106
S'summary_display'
p1107
S'89/149'
p1108
sS'total_runs'
p1109
L565L
sS'summary_score'
p1110
I60
sS'results'
p1111
(dp1112
S'unapply'
p1113
(dp1114
S'score'
p1115
I50
sS'raw_score'
p1116
I0
sS'display'
p1117
S'13/26'
p1118
ssS'apply'
p1119
(dp1120
g1115
I63
sg1116
I0
sg1117
S'26/41'
p1121
ssS'change'
p1122
(dp1123
g1115
I41
sg1116
I0
sg1117
S'7/17'
p1124
ssS'query'
p1125
(dp1126
g1115
I66
sg1116
I0
sg1117
S'43/65'
p1127
ssssVSwiftfox 3.5
p1128
(dp1129
S'summary_display'
p1130
S'117/149'
p1131
sS'total_runs'
p1132
L9L
sS'summary_score'
p1133
I79
sS'results'
p1134
(dp1135
S'unapply'
p1136
(dp1137
S'score'
p1138
I65
sS'raw_score'
p1139
I0
sS'display'
p1140
S'17/26'
p1141
ssS'apply'
p1142
(dp1143
g1138
I83
sg1139
I0
sg1140
S'34/41'
p1144
ssS'change'
p1145
(dp1146
g1138
I41
sg1139
I0
sg1140
S'7/17'
p1147
ssS'query'
p1148
(dp1149
g1138
I91
sg1139
I0
sg1140
S'59/65'
p1150
ssssVChrome 5.0
p1151
(dp1152
S'summary_display'
p1153
S'129/149'
p1154
sS'total_runs'
p1155
L2L
sS'summary_score'
p1156
I87
sS'results'
p1157
(dp1158
S'unapply'
p1159
(dp1160
S'score'
p1161
I88
sS'raw_score'
p1162
I0
sS'display'
p1163
S'23/26'
p1164
ssS'apply'
p1165
(dp1166
g1161
I95
sg1162
I0
sg1163
S'39/41'
p1167
ssS'change'
p1168
(dp1169
g1161
I100
sg1162
I0
sg1163
S'17/17'
p1170
ssS'query'
p1171
(dp1172
g1161
I77
sg1162
I0
sg1163
S'50/65'
p1173
ssssVFirefox 3.1
p1174
(dp1175
S'summary_display'
p1176
S'117/149'
p1177
sS'total_runs'
p1178
L1L
sS'summary_score'
p1179
I79
sS'results'
p1180
(dp1181
S'unapply'
p1182
(dp1183
S'score'
p1184
I65
sS'raw_score'
p1185
I0
sS'display'
p1186
S'17/26'
p1187
ssS'apply'
p1188
(dp1189
g1184
I83
sg1185
I0
sg1186
S'34/41'
p1190
ssS'change'
p1191
(dp1192
g1184
I41
sg1185
I0
sg1186
S'7/17'
p1193
ssS'query'
p1194
(dp1195
g1184
I91
sg1185
I0
sg1186
S'59/65'
p1196
ssssVFirefox 3.0
p1197
(dp1198
S'summary_display'
p1199
S'117/149'
p1200
sS'total_runs'
p1201
L582L
sS'summary_score'
p1202
I79
sS'results'
p1203
(dp1204
S'unapply'
p1205
(dp1206
S'score'
p1207
I65
sS'raw_score'
p1208
I0
sS'display'
p1209
S'17/26'
p1210
ssS'apply'
p1211
(dp1212
g1207
I83
sg1208
I0
sg1209
S'34/41'
p1213
ssS'change'
p1214
(dp1215
g1207
I41
sg1208
I0
sg1209
S'7/17'
p1216
ssS'query'
p1217
(dp1218
g1207
I91
sg1208
I0
sg1209
S'59/65'
p1219
ssssVAndroid 2.0
p1220
(dp1221
S'summary_display'
p1222
S'129/149'
p1223
sS'total_runs'
p1224
L11L
sS'summary_score'
p1225
I87
sS'results'
p1226
(dp1227
S'unapply'
p1228
(dp1229
S'score'
p1230
I88
sS'raw_score'
p1231
I0
sS'display'
p1232
S'23/26'
p1233
ssS'apply'
p1234
(dp1235
g1230
I95
sg1231
I0
sg1232
S'39/41'
p1236
ssS'change'
p1237
(dp1238
g1230
I100
sg1231
I0
sg1232
S'17/17'
p1239
ssS'query'
p1240
(dp1241
g1230
I77
sg1231
I0
sg1232
S'50/65'
p1242
ssssVAndroid 2.1
p1243
(dp1244
S'summary_display'
p1245
S'126/146'
p1246
sS'total_runs'
p1247
L1L
sS'summary_score'
p1248
I86
sS'results'
p1249
(dp1250
S'unapply'
p1251
(dp1252
S'score'
p1253
I88
sS'raw_score'
p1254
I0
sS'display'
p1255
S'22/25'
p1256
ssS'apply'
p1257
(dp1258
g1253
I95
sg1254
I0
sg1255
S'37/39'
p1259
ssS'change'
p1260
(dp1261
g1253
I100
sg1254
I0
sg1255
S'17/17'
p1262
ssS'query'
p1263
(dp1264
g1253
I77
sg1254
I0
sg1255
S'50/65'
p1265
ssssVFirefox 3.5
p1266
(dp1267
S'summary_display'
p1268
S'117/149'
p1269
sS'total_runs'
p1270
L3799L
sS'summary_score'
p1271
I79
sS'results'
p1272
(dp1273
S'unapply'
p1274
(dp1275
S'score'
p1276
I65
sS'raw_score'
p1277
I0
sS'display'
p1278
S'17/26'
p1279
ssS'apply'
p1280
(dp1281
g1276
I83
sg1277
I0
sg1278
S'34/41'
p1282
ssS'change'
p1283
(dp1284
g1276
I41
sg1277
I0
sg1278
S'7/17'
p1285
ssS'query'
p1286
(dp1287
g1276
I91
sg1277
I0
sg1278
S'59/65'
p1288
ssssVFirefox 3.7
p1289
(dp1290
S'summary_display'
p1291
S'117/149'
p1292
sS'total_runs'
p1293
L3L
sS'summary_score'
p1294
I79
sS'results'
p1295
(dp1296
S'unapply'
p1297
(dp1298
S'score'
p1299
I65
sS'raw_score'
p1300
I0
sS'display'
p1301
S'17/26'
p1302
ssS'apply'
p1303
(dp1304
g1299
I83
sg1300
I0
sg1301
S'34/41'
p1305
ssS'change'
p1306
(dp1307
g1299
I41
sg1300
I0
sg1301
S'7/17'
p1308
ssS'query'
p1309
(dp1310
g1299
I91
sg1300
I0
sg1301
S'59/65'
p1311
ssssVFirefox 3.6
p1312
(dp1313
S'summary_display'
p1314
S'117/149'
p1315
sS'total_runs'
p1316
L225L
sS'summary_score'
p1317
I79
sS'results'
p1318
(dp1319
S'unapply'
p1320
(dp1321
S'score'
p1322
I65
sS'raw_score'
p1323
I0
sS'display'
p1324
S'17/26'
p1325
ssS'apply'
p1326
(dp1327
g1322
I83
sg1323
I0
sg1324
S'34/41'
p1328
ssS'change'
p1329
(dp1330
g1322
I41
sg1323
I0
sg1324
S'7/17'
p1331
ssS'query'
p1332
(dp1333
g1322
I91
sg1323
I0
sg1324
S'59/65'
p1334
ssssVFirefox 2.0
p1335
(dp1336
S'summary_display'
p1337
S'117/149'
p1338
sS'total_runs'
p1339
L50L
sS'summary_score'
p1340
I79
sS'results'
p1341
(dp1342
S'unapply'
p1343
(dp1344
S'score'
p1345
I65
sS'raw_score'
p1346
I0
sS'display'
p1347
S'17/26'
p1348
ssS'apply'
p1349
(dp1350
g1345
I83
sg1346
I0
sg1347
S'34/41'
p1351
ssS'change'
p1352
(dp1353
g1345
I41
sg1346
I0
sg1347
S'7/17'
p1354
ssS'query'
p1355
(dp1356
g1345
I91
sg1346
I0
sg1347
S'59/65'
p1357
ssssVAndroid 1.5
p1358
(dp1359
S'summary_display'
p1360
S'118/149'
p1361
sS'total_runs'
p1362
L25L
sS'summary_score'
p1363
I79
sS'results'
p1364
(dp1365
S'unapply'
p1366
(dp1367
S'score'
p1368
I65
sS'raw_score'
p1369
I0
sS'display'
p1370
S'17/26'
p1371
ssS'apply'
p1372
(dp1373
g1368
I83
sg1369
I0
sg1370
S'34/41'
p1374
ssS'change'
p1375
(dp1376
g1368
I100
sg1369
I0
sg1370
S'17/17'
p1377
ssS'query'
p1378
(dp1379
g1368
I77
sg1369
I0
sg1370
S'50/65'
p1380
ssssVChrome 4.0
p1381
(dp1382
S'summary_display'
p1383
S'129/149'
p1384
sS'total_runs'
p1385
L1045L
sS'summary_score'
p1386
I87
sS'results'
p1387
(dp1388
S'unapply'
p1389
(dp1390
S'score'
p1391
I88
sS'raw_score'
p1392
I0
sS'display'
p1393
S'23/26'
p1394
ssS'apply'
p1395
(dp1396
g1391
I95
sg1392
I0
sg1393
S'39/41'
p1397
ssS'change'
p1398
(dp1399
g1391
I100
sg1392
I0
sg1393
S'17/17'
p1400
ssS'query'
p1401
(dp1402
g1391
I77
sg1392
I0
sg1393
S'50/65'
p1403
ssssVAndroid 1.6
p1404
(dp1405
S'summary_display'
p1406
S'118/149'
p1407
sS'total_runs'
p1408
L12L
sS'summary_score'
p1409
I79
sS'results'
p1410
(dp1411
S'unapply'
p1412
(dp1413
S'score'
p1414
I65
sS'raw_score'
p1415
I0
sS'display'
p1416
S'17/26'
p1417
ssS'apply'
p1418
(dp1419
g1414
I83
sg1415
I0
sg1416
S'34/41'
p1420
ssS'change'
p1421
(dp1422
g1414
I100
sg1415
I0
sg1416
S'17/17'
p1423
ssS'query'
p1424
(dp1425
g1414
I77
sg1415
I0
sg1416
S'50/65'
p1426
ssssVAndroid 1.1
p1427
(dp1428
S'summary_display'
p1429
S'103/149'
p1430
sS'total_runs'
p1431
L2L
sS'summary_score'
p1432
I69
sS'results'
p1433
(dp1434
S'unapply'
p1435
(dp1436
S'score'
p1437
I35
sS'raw_score'
p1438
I0
sS'display'
p1439
S'9/26'
p1440
ssS'apply'
p1441
(dp1442
g1437
I68
sg1438
I0
sg1439
S'28/41'
p1443
ssS'change'
p1444
(dp1445
g1437
I94
sg1438
I0
sg1439
S'16/17'
p1446
ssS'query'
p1447
(dp1448
g1437
I77
sg1438
I0
sg1439
S'50/65'
p1449
ssssVOpera 9.60
p1450
(dp1451
S'summary_display'
p1452
S'88/149'
p1453
sS'total_runs'
p1454
L1L
sS'summary_score'
p1455
I59
sS'results'
p1456
(dp1457
S'unapply'
p1458
(dp1459
S'score'
p1460
I50
sS'raw_score'
p1461
I0
sS'display'
p1462
S'13/26'
p1463
ssS'apply'
p1464
(dp1465
g1460
I63
sg1461
I0
sg1462
S'26/41'
p1466
ssS'change'
p1467
(dp1468
g1460
I41
sg1461
I0
sg1462
S'7/17'
p1469
ssS'query'
p1470
(dp1471
g1460
I65
sg1461
I0
sg1462
S'42/65'
p1472
ssssVOpera 9.52
p1473
(dp1474
S'summary_display'
p1475
S'88/149'
p1476
sS'total_runs'
p1477
L1L
sS'summary_score'
p1478
I59
sS'results'
p1479
(dp1480
S'unapply'
p1481
(dp1482
S'score'
p1483
I50
sS'raw_score'
p1484
I0
sS'display'
p1485
S'13/26'
p1486
ssS'apply'
p1487
(dp1488
g1483
I63
sg1484
I0
sg1485
S'26/41'
p1489
ssS'change'
p1490
(dp1491
g1483
I41
sg1484
I0
sg1485
S'7/17'
p1492
ssS'query'
p1493
(dp1494
g1483
I65
sg1484
I0
sg1485
S'42/65'
p1495
ssssVOpera 9.51
p1496
(dp1497
S'summary_display'
p1498
S'88/149'
p1499
sS'total_runs'
p1500
L3L
sS'summary_score'
p1501
I59
sS'results'
p1502
(dp1503
S'unapply'
p1504
(dp1505
S'score'
p1506
I50
sS'raw_score'
p1507
I0
sS'display'
p1508
S'13/26'
p1509
ssS'apply'
p1510
(dp1511
g1506
I63
sg1507
I0
sg1508
S'26/41'
p1512
ssS'change'
p1513
(dp1514
g1506
I41
sg1507
I0
sg1508
S'7/17'
p1515
ssS'query'
p1516
(dp1517
g1506
I65
sg1507
I0
sg1508
S'42/65'
p1518
ssssVOpera 9.50
p1519
(dp1520
S'summary_display'
p1521
S'0/149'
p1522
sS'total_runs'
p1523
L1L
sS'summary_score'
p1524
I0
sS'results'
p1525
(dp1526
S'unapply'
p1527
(dp1528
S'score'
p1529
I0
sS'raw_score'
p1530
I0
sS'display'
p1531
S'0/26'
p1532
ssS'apply'
p1533
(dp1534
g1529
I0
sg1530
I0
sg1531
S'0/41'
p1535
ssS'change'
p1536
(dp1537
g1529
I0
sg1530
I0
sg1531
S'0/17'
p1538
ssS'query'
p1539
(dp1540
g1529
I0
sg1530
I0
sg1531
S'0/65'
p1541
ssssVOpera 10.10
p1542
(dp1543
S'summary_display'
p1544
S'89/149'
p1545
sS'total_runs'
p1546
L406L
sS'summary_score'
p1547
I60
sS'results'
p1548
(dp1549
S'unapply'
p1550
(dp1551
S'score'
p1552
I50
sS'raw_score'
p1553
I0
sS'display'
p1554
S'13/26'
p1555
ssS'apply'
p1556
(dp1557
g1552
I63
sg1553
I0
sg1554
S'26/41'
p1558
ssS'change'
p1559
(dp1560
g1552
I41
sg1553
I0
sg1554
S'7/17'
p1561
ssS'query'
p1562
(dp1563
g1552
I66
sg1553
I0
sg1554
S'43/65'
p1564
ssssVSleipnir 2.8
p1565
(dp1566
S'summary_display'
p1567
S'99/149'
p1568
sS'total_runs'
p1569
L10L
sS'summary_score'
p1570
I66
sS'results'
p1571
(dp1572
S'unapply'
p1573
(dp1574
S'score'
p1575
I54
sS'raw_score'
p1576
I0
sS'display'
p1577
S'14/26'
p1578
ssS'apply'
p1579
(dp1580
g1575
I59
sg1576
I0
sg1577
S'24/41'
p1581
ssS'change'
p1582
(dp1583
g1575
I29
sg1576
I0
sg1577
S'5/17'
p1584
ssS'query'
p1585
(dp1586
g1575
I86
sg1576
I0
sg1577
S'56/65'
p1587
ssssVIE 8.0
p1588
(dp1589
S'summary_display'
p1590
S'99/149'
p1591
sS'total_runs'
p1592
L474L
sS'summary_score'
p1593
I66
sS'results'
p1594
(dp1595
S'unapply'
p1596
(dp1597
S'score'
p1598
I54
sS'raw_score'
p1599
I0
sS'display'
p1600
S'14/26'
p1601
ssS'apply'
p1602
(dp1603
g1598
I59
sg1599
I0
sg1600
S'24/41'
p1604
ssS'change'
p1605
(dp1606
g1598
I29
sg1599
I0
sg1600
S'5/17'
p1607
ssS'query'
p1608
(dp1609
g1598
I86
sg1599
I0
sg1600
S'56/65'
p1610
ssssVSleipnir 2.9
p1611
(dp1612
S'summary_display'
p1613
S'99/149'
p1614
sS'total_runs'
p1615
L5L
sS'summary_score'
p1616
I66
sS'results'
p1617
(dp1618
S'unapply'
p1619
(dp1620
S'score'
p1621
I54
sS'raw_score'
p1622
I0
sS'display'
p1623
S'14/26'
p1624
ssS'apply'
p1625
(dp1626
g1621
I59
sg1622
I0
sg1623
S'24/41'
p1627
ssS'change'
p1628
(dp1629
g1621
I29
sg1622
I0
sg1623
S'5/17'
p1630
ssS'query'
p1631
(dp1632
g1621
I86
sg1622
I0
sg1623
S'56/65'
p1633
ssssVNetNewsWire 3.1
p1634
(dp1635
S'summary_display'
p1636
S'125/149'
p1637
sS'total_runs'
p1638
L1L
sS'summary_score'
p1639
I84
sS'results'
p1640
(dp1641
S'unapply'
p1642
(dp1643
S'score'
p1644
I73
sS'raw_score'
p1645
I0
sS'display'
p1646
S'19/26'
p1647
ssS'apply'
p1648
(dp1649
g1644
I95
sg1645
I0
sg1646
S'39/41'
p1650
ssS'change'
p1651
(dp1652
g1644
I100
sg1645
I0
sg1646
S'17/17'
p1653
ssS'query'
p1654
(dp1655
g1644
I77
sg1645
I0
sg1646
S'50/65'
p1656
ssssVNetscape 7.1
p1657
(dp1658
S'summary_display'
p1659
S'117/149'
p1660
sS'total_runs'
p1661
L1L
sS'summary_score'
p1662
I79
sS'results'
p1663
(dp1664
S'unapply'
p1665
(dp1666
S'score'
p1667
I65
sS'raw_score'
p1668
I0
sS'display'
p1669
S'17/26'
p1670
ssS'apply'
p1671
(dp1672
g1667
I83
sg1668
I0
sg1669
S'34/41'
p1673
ssS'change'
p1674
(dp1675
g1667
I41
sg1668
I0
sg1669
S'7/17'
p1676
ssS'query'
p1677
(dp1678
g1667
I91
sg1668
I0
sg1669
S'59/65'
p1679
ssssVNetNewsWire 3.2
p1680
(dp1681
S'summary_display'
p1682
S'125/149'
p1683
sS'total_runs'
p1684
L9L
sS'summary_score'
p1685
I84
sS'results'
p1686
(dp1687
S'unapply'
p1688
(dp1689
S'score'
p1690
I73
sS'raw_score'
p1691
I0
sS'display'
p1692
S'19/26'
p1693
ssS'apply'
p1694
(dp1695
g1690
I95
sg1691
I0
sg1692
S'39/41'
p1696
ssS'change'
p1697
(dp1698
g1690
I100
sg1691
I0
sg1692
S'17/17'
p1699
ssS'query'
p1700
(dp1701
g1690
I77
sg1691
I0
sg1692
S'50/65'
p1702
ssssVSafari 4.0
p1703
(dp1704
S'summary_display'
p1705
S'125/149'
p1706
sS'total_runs'
p1707
L984L
sS'summary_score'
p1708
I84
sS'results'
p1709
(dp1710
S'unapply'
p1711
(dp1712
S'score'
p1713
I73
sS'raw_score'
p1714
I0
sS'display'
p1715
S'19/26'
p1716
ssS'apply'
p1717
(dp1718
g1713
I95
sg1714
I0
sg1715
S'39/41'
p1719
ssS'change'
p1720
(dp1721
g1713
I100
sg1714
I0
sg1715
S'17/17'
p1722
ssS'query'
p1723
(dp1724
g1713
I77
sg1714
I0
sg1715
S'50/65'
p1725
ssssVKonqueror 3.5
p1726
(dp1727
S'summary_display'
p1728
S'0/149'
p1729
sS'total_runs'
p1730
L10L
sS'summary_score'
p1731
I0
sS'results'
p1732
(dp1733
S'unapply'
p1734
(dp1735
S'score'
p1736
I0
sS'raw_score'
p1737
I0
sS'display'
p1738
S'0/26'
p1739
ssS'apply'
p1740
(dp1741
g1736
I0
sg1737
I0
sg1738
S'0/41'
p1742
ssS'change'
p1743
(dp1744
g1736
I0
sg1737
I0
sg1738
S'0/17'
p1745
ssS'query'
p1746
(dp1747
g1736
I0
sg1737
I0
sg1738
S'0/65'
p1748
ssssVFirefox (Minefield) 3.7
p1749
(dp1750
S'summary_display'
p1751
S'117/149'
p1752
sS'total_runs'
p1753
L174L
sS'summary_score'
p1754
I79
sS'results'
p1755
(dp1756
S'unapply'
p1757
(dp1758
S'score'
p1759
I65
sS'raw_score'
p1760
I0
sS'display'
p1761
S'17/26'
p1762
ssS'apply'
p1763
(dp1764
g1759
I83
sg1760
I0
sg1761
S'34/41'
p1765
ssS'change'
p1766
(dp1767
g1759
I41
sg1760
I0
sg1761
S'7/17'
p1768
ssS'query'
p1769
(dp1770
g1759
I91
sg1760
I0
sg1761
S'59/65'
p1771
ssssVFirefox (Minefield) 3.6
p1772
(dp1773
S'summary_display'
p1774
S'117/149'
p1775
sS'total_runs'
p1776
L8L
sS'summary_score'
p1777
I79
sS'results'
p1778
(dp1779
S'unapply'
p1780
(dp1781
S'score'
p1782
I65
sS'raw_score'
p1783
I0
sS'display'
p1784
S'17/26'
p1785
ssS'apply'
p1786
(dp1787
g1782
I83
sg1783
I0
sg1784
S'34/41'
p1788
ssS'change'
p1789
(dp1790
g1782
I41
sg1783
I0
sg1784
S'7/17'
p1791
ssS'query'
p1792
(dp1793
g1782
I91
sg1783
I0
sg1784
S'59/65'
p1794
ssssVFlock 1.1
p1795
(dp1796
S'summary_display'
p1797
S'117/149'
p1798
sS'total_runs'
p1799
L1L
sS'summary_score'
p1800
I79
sS'results'
p1801
(dp1802
S'unapply'
p1803
(dp1804
S'score'
p1805
I65
sS'raw_score'
p1806
I0
sS'display'
p1807
S'17/26'
p1808
ssS'apply'
p1809
(dp1810
g1805
I83
sg1806
I0
sg1807
S'34/41'
p1811
ssS'change'
p1812
(dp1813
g1805
I41
sg1806
I0
sg1807
S'7/17'
p1814
ssS'query'
p1815
(dp1816
g1805
I91
sg1806
I0
sg1807
S'59/65'
p1817
ssssVGranParadiso 3.0
p1818
(dp1819
S'summary_display'
p1820
S'117/149'
p1821
sS'total_runs'
p1822
L1L
sS'summary_score'
p1823
I79
sS'results'
p1824
(dp1825
S'unapply'
p1826
(dp1827
S'score'
p1828
I65
sS'raw_score'
p1829
I0
sS'display'
p1830
S'17/26'
p1831
ssS'apply'
p1832
(dp1833
g1828
I83
sg1829
I0
sg1830
S'34/41'
p1834
ssS'change'
p1835
(dp1836
g1828
I41
sg1829
I0
sg1830
S'7/17'
p1837
ssS'query'
p1838
(dp1839
g1828
I91
sg1829
I0
sg1830
S'59/65'
p1840
ssssVIron 3.0
p1841
(dp1842
S'summary_display'
p1843
S'129/149'
p1844
sS'total_runs'
p1845
L55L
sS'summary_score'
p1846
I87
sS'results'
p1847
(dp1848
S'unapply'
p1849
(dp1850
S'score'
p1851
I88
sS'raw_score'
p1852
I0
sS'display'
p1853
S'23/26'
p1854
ssS'apply'
p1855
(dp1856
g1851
I95
sg1852
I0
sg1853
S'39/41'
p1857
ssS'change'
p1858
(dp1859
g1851
I100
sg1852
I0
sg1853
S'17/17'
p1860
ssS'query'
p1861
(dp1862
g1851
I77
sg1852
I0
sg1853
S'50/65'
p1863
ssssVLunascape 5.1
p1864
(dp1865
S'summary_display'
p1866
S'117/149'
p1867
sS'total_runs'
p1868
L10L
sS'summary_score'
p1869
I79
sS'results'
p1870
(dp1871
S'unapply'
p1872
(dp1873
S'score'
p1874
I65
sS'raw_score'
p1875
I0
sS'display'
p1876
S'17/26'
p1877
ssS'apply'
p1878
(dp1879
g1874
I83
sg1875
I0
sg1876
S'34/41'
p1880
ssS'change'
p1881
(dp1882
g1874
I41
sg1875
I0
sg1876
S'7/17'
p1883
ssS'query'
p1884
(dp1885
g1874
I91
sg1875
I0
sg1876
S'59/65'
p1886
ssssVOpera 8.54
p1887
(dp1888
S'summary_display'
p1889
S'0/149'
p1890
sS'total_runs'
p1891
L1L
sS'summary_score'
p1892
I0
sS'results'
p1893
(dp1894
S'unapply'
p1895
(dp1896
S'score'
p1897
I0
sS'raw_score'
p1898
I0
sS'display'
p1899
S'0/26'
p1900
ssS'apply'
p1901
(dp1902
g1897
I0
sg1898
I0
sg1899
S'0/41'
p1903
ssS'change'
p1904
(dp1905
g1897
I0
sg1898
I0
sg1899
S'0/17'
p1906
ssS'query'
p1907
(dp1908
g1897
I0
sg1898
I0
sg1899
S'0/65'
p1909
ssssVChrome Frame (IE 8) 4.0
p1910
(dp1911
S'summary_display'
p1912
S'129/149'
p1913
sS'total_runs'
p1914
L12L
sS'summary_score'
p1915
I87
sS'results'
p1916
(dp1917
S'unapply'
p1918
(dp1919
S'score'
p1920
I88
sS'raw_score'
p1921
I0
sS'display'
p1922
S'23/26'
p1923
ssS'apply'
p1924
(dp1925
g1920
I95
sg1921
I0
sg1922
S'39/41'
p1926
ssS'change'
p1927
(dp1928
g1920
I100
sg1921
I0
sg1922
S'17/17'
p1929
ssS'query'
p1930
(dp1931
g1920
I77
sg1921
I0
sg1922
S'50/65'
p1932
ssssVIE 7.0
p1933
(dp1934
S'summary_display'
p1935
S'99/149'
p1936
sS'total_runs'
p1937
L207L
sS'summary_score'
p1938
I66
sS'results'
p1939
(dp1940
S'unapply'
p1941
(dp1942
S'score'
p1943
I54
sS'raw_score'
p1944
I0
sS'display'
p1945
S'14/26'
p1946
ssS'apply'
p1947
(dp1948
g1943
I59
sg1944
I0
sg1945
S'24/41'
p1949
ssS'change'
p1950
(dp1951
g1943
I29
sg1944
I0
sg1945
S'5/17'
p1952
ssS'query'
p1953
(dp1954
g1943
I86
sg1944
I0
sg1945
S'56/65'
p1955
ssssVOpera 10.20
p1956
(dp1957
S'summary_display'
p1958
S'89/149'
p1959
sS'total_runs'
p1960
L37L
sS'summary_score'
p1961
I60
sS'results'
p1962
(dp1963
S'unapply'
p1964
(dp1965
S'score'
p1966
I50
sS'raw_score'
p1967
I0
sS'display'
p1968
S'13/26'
p1969
ssS'apply'
p1970
(dp1971
g1966
I63
sg1967
I0
sg1968
S'26/41'
p1972
ssS'change'
p1973
(dp1974
g1966
I41
sg1967
I0
sg1968
S'7/17'
p1975
ssS'query'
p1976
(dp1977
g1966
I66
sg1967
I0
sg1968
S'43/65'
p1978
ssssVPalm Pre 1.0
p1979
(dp1980
S'summary_display'
p1981
S'129/149'
p1982
sS'total_runs'
p1983
L7L
sS'summary_score'
p1984
I87
sS'results'
p1985
(dp1986
S'unapply'
p1987
(dp1988
S'score'
p1989
I88
sS'raw_score'
p1990
I0
sS'display'
p1991
S'23/26'
p1992
ssS'apply'
p1993
(dp1994
g1989
I95
sg1990
I0
sg1991
S'39/41'
p1995
ssS'change'
p1996
(dp1997
g1989
I100
sg1990
I0
sg1991
S'17/17'
p1998
ssS'query'
p1999
(dp2000
g1989
I77
sg1990
I0
sg1991
S'50/65'
p2001
ssssVStainless 0.7
p2002
(dp2003
S'summary_display'
p2004
S'125/149'
p2005
sS'total_runs'
p2006
L4L
sS'summary_score'
p2007
I84
sS'results'
p2008
(dp2009
S'unapply'
p2010
(dp2011
S'score'
p2012
I73
sS'raw_score'
p2013
I0
sS'display'
p2014
S'19/26'
p2015
ssS'apply'
p2016
(dp2017
g2012
I95
sg2013
I0
sg2014
S'39/41'
p2018
ssS'change'
p2019
(dp2020
g2012
I100
sg2013
I0
sg2014
S'17/17'
p2021
ssS'query'
p2022
(dp2023
g2012
I77
sg2013
I0
sg2014
S'50/65'
p2024
ssssVStainless 0.6
p2025
(dp2026
S'summary_display'
p2027
S'125/149'
p2028
sS'total_runs'
p2029
L1L
sS'summary_score'
p2030
I84
sS'results'
p2031
(dp2032
S'unapply'
p2033
(dp2034
S'score'
p2035
I73
sS'raw_score'
p2036
I0
sS'display'
p2037
S'19/26'
p2038
ssS'apply'
p2039
(dp2040
g2035
I95
sg2036
I0
sg2037
S'39/41'
p2041
ssS'change'
p2042
(dp2043
g2035
I100
sg2036
I0
sg2037
S'17/17'
p2044
ssS'query'
p2045
(dp2046
g2035
I77
sg2036
I0
sg2037
S'50/65'
p2047
ssssVChrome Frame (IE 7) 4.0
p2048
(dp2049
S'summary_display'
p2050
S'129/149'
p2051
sS'total_runs'
p2052
L4L
sS'summary_score'
p2053
I87
sS'results'
p2054
(dp2055
S'unapply'
p2056
(dp2057
S'score'
p2058
I88
sS'raw_score'
p2059
I0
sS'display'
p2060
S'23/26'
p2061
ssS'apply'
p2062
(dp2063
g2058
I95
sg2059
I0
sg2060
S'39/41'
p2064
ssS'change'
p2065
(dp2066
g2058
I100
sg2059
I0
sg2060
S'17/17'
p2067
ssS'query'
p2068
(dp2069
g2058
I77
sg2059
I0
sg2060
S'50/65'
p2070
ssssVK-Meleon 1.5
p2071
(dp2072
S'summary_display'
p2073
S'117/149'
p2074
sS'total_runs'
p2075
L9L
sS'summary_score'
p2076
I79
sS'results'
p2077
(dp2078
S'unapply'
p2079
(dp2080
S'score'
p2081
I65
sS'raw_score'
p2082
I0
sS'display'
p2083
S'17/26'
p2084
ssS'apply'
p2085
(dp2086
g2081
I83
sg2082
I0
sg2083
S'34/41'
p2087
ssS'change'
p2088
(dp2089
g2081
I41
sg2082
I0
sg2083
S'7/17'
p2090
ssS'query'
p2091
(dp2092
g2081
I91
sg2082
I0
sg2083
S'59/65'
p2093
ssssVShiira 0
p2094
(dp2095
S'summary_display'
p2096
S'125/149'
p2097
sS'total_runs'
p2098
L1L
sS'summary_score'
p2099
I84
sS'results'
p2100
(dp2101
S'unapply'
p2102
(dp2103
S'score'
p2104
I73
sS'raw_score'
p2105
I0
sS'display'
p2106
S'19/26'
p2107
ssS'apply'
p2108
(dp2109
g2104
I95
sg2105
I0
sg2106
S'39/41'
p2110
ssS'change'
p2111
(dp2112
g2104
I100
sg2105
I0
sg2106
S'17/17'
p2113
ssS'query'
p2114
(dp2115
g2104
I77
sg2105
I0
sg2106
S'50/65'
p2116
ssssVGaleon 2.0
p2117
(dp2118
S'summary_display'
p2119
S'117/149'
p2120
sS'total_runs'
p2121
L2L
sS'summary_score'
p2122
I79
sS'results'
p2123
(dp2124
S'unapply'
p2125
(dp2126
S'score'
p2127
I65
sS'raw_score'
p2128
I0
sS'display'
p2129
S'17/26'
p2130
ssS'apply'
p2131
(dp2132
g2127
I83
sg2128
I0
sg2129
S'34/41'
p2133
ssS'change'
p2134
(dp2135
g2127
I41
sg2128
I0
sg2129
S'7/17'
p2136
ssS'query'
p2137
(dp2138
g2127
I91
sg2128
I0
sg2129
S'59/65'
p2139
ssssVQtWeb 3.1
p2140
(dp2141
S'summary_display'
p2142
S'111/149'
p2143
sS'total_runs'
p2144
L1L
sS'summary_score'
p2145
I74
sS'results'
p2146
(dp2147
S'unapply'
p2148
(dp2149
S'score'
p2150
I35
sS'raw_score'
p2151
I0
sS'display'
p2152
S'9/26'
p2153
ssS'apply'
p2154
(dp2155
g2150
I85
sg2151
I0
sg2152
S'35/41'
p2156
ssS'change'
p2157
(dp2158
g2150
I100
sg2151
I0
sg2152
S'17/17'
p2159
ssS'query'
p2160
(dp2161
g2150
I77
sg2151
I0
sg2152
S'50/65'
p2162
ssssVMicroB 0.3
p2163
(dp2164
S'summary_display'
p2165
S'118/149'
p2166
sS'total_runs'
p2167
L1L
sS'summary_score'
p2168
I79
sS'results'
p2169
(dp2170
S'unapply'
p2171
(dp2172
S'score'
p2173
I65
sS'raw_score'
p2174
I0
sS'display'
p2175
S'17/26'
p2176
ssS'apply'
p2177
(dp2178
g2173
I83
sg2174
I0
sg2175
S'34/41'
p2179
ssS'change'
p2180
(dp2181
g2173
I41
sg2174
I0
sg2175
S'7/17'
p2182
ssS'query'
p2183
(dp2184
g2173
I92
sg2174
I0
sg2175
S'60/65'
p2185
ssssVIE 11.0
p2186
(dp2187
S'summary_display'
p2188
S'101/149'
p2189
sS'total_runs'
p2190
L1L
sS'summary_score'
p2191
I68
sS'results'
p2192
(dp2193
S'unapply'
p2194
(dp2195
S'score'
p2196
I54
sS'raw_score'
p2197
I0
sS'display'
p2198
S'14/26'
p2199
ssS'apply'
p2200
(dp2201
g2196
I59
sg2197
I0
sg2198
S'24/41'
p2202
ssS'change'
p2203
(dp2204
g2196
I29
sg2197
I0
sg2198
S'5/17'
p2205
ssS'query'
p2206
(dp2207
g2196
I89
sg2197
I0
sg2198
S'58/65'
p2208
ssssViCab 4.7
p2209
(dp2210
S'summary_display'
p2211
S'125/149'
p2212
sS'total_runs'
p2213
L1L
sS'summary_score'
p2214
I84
sS'results'
p2215
(dp2216
S'unapply'
p2217
(dp2218
S'score'
p2219
I73
sS'raw_score'
p2220
I0
sS'display'
p2221
S'19/26'
p2222
ssS'apply'
p2223
(dp2224
g2219
I95
sg2220
I0
sg2221
S'39/41'
p2225
ssS'change'
p2226
(dp2227
g2219
I100
sg2220
I0
sg2221
S'17/17'
p2228
ssS'query'
p2229
(dp2230
g2219
I77
sg2220
I0
sg2221
S'50/65'
p2231
ssssVVienna 2.3
p2232
(dp2233
S'summary_display'
p2234
S'123/149'
p2235
sS'total_runs'
p2236
L3L
sS'summary_score'
p2237
I83
sS'results'
p2238
(dp2239
S'unapply'
p2240
(dp2241
S'score'
p2242
I73
sS'raw_score'
p2243
I0
sS'display'
p2244
S'19/26'
p2245
ssS'apply'
p2246
(dp2247
g2242
I90
sg2243
I0
sg2244
S'37/41'
p2248
ssS'change'
p2249
(dp2250
g2242
I100
sg2243
I0
sg2244
S'17/17'
p2251
ssS'query'
p2252
(dp2253
g2242
I77
sg2243
I0
sg2244
S'50/65'
p2254
ssssVSafari 1.0
p2255
(dp2256
S'summary_display'
p2257
S'129/149'
p2258
sS'total_runs'
p2259
L2L
sS'summary_score'
p2260
I87
sS'results'
p2261
(dp2262
S'unapply'
p2263
(dp2264
S'score'
p2265
I88
sS'raw_score'
p2266
I0
sS'display'
p2267
S'23/26'
p2268
ssS'apply'
p2269
(dp2270
g2265
I95
sg2266
I0
sg2267
S'39/41'
p2271
ssS'change'
p2272
(dp2273
g2265
I100
sg2266
I0
sg2267
S'17/17'
p2274
ssS'query'
p2275
(dp2276
g2265
I77
sg2266
I0
sg2267
S'50/65'
p2277
ssssVSafari 3.2
p2278
(dp2279
S'summary_display'
p2280
S'104/149'
p2281
sS'total_runs'
p2282
L21L
sS'summary_score'
p2283
I70
sS'results'
p2284
(dp2285
S'unapply'
p2286
(dp2287
S'score'
p2288
I35
sS'raw_score'
p2289
I0
sS'display'
p2290
S'9/26'
p2291
ssS'apply'
p2292
(dp2293
g2288
I68
sg2289
I0
sg2290
S'28/41'
p2294
ssS'change'
p2295
(dp2296
g2288
I100
sg2289
I0
sg2290
S'17/17'
p2297
ssS'query'
p2298
(dp2299
g2288
I77
sg2289
I0
sg2290
S'50/65'
p2300
ssssVSafari 3.1
p2301
(dp2302
S'summary_display'
p2303
S'125/149'
p2304
sS'total_runs'
p2305
L13L
sS'summary_score'
p2306
I84
sS'results'
p2307
(dp2308
S'unapply'
p2309
(dp2310
S'score'
p2311
I73
sS'raw_score'
p2312
I0
sS'display'
p2313
S'19/26'
p2314
ssS'apply'
p2315
(dp2316
g2311
I95
sg2312
I0
sg2313
S'39/41'
p2317
ssS'change'
p2318
(dp2319
g2311
I100
sg2312
I0
sg2313
S'17/17'
p2320
ssS'query'
p2321
(dp2322
g2311
I77
sg2312
I0
sg2313
S'50/65'
p2323
ssssVSafari 3.0
p2324
(dp2325
S'summary_display'
p2326
S'103/149'
p2327
sS'total_runs'
p2328
L4L
sS'summary_score'
p2329
I69
sS'results'
p2330
(dp2331
S'unapply'
p2332
(dp2333
S'score'
p2334
I31
sS'raw_score'
p2335
I0
sS'display'
p2336
S'8/26'
p2337
ssS'apply'
p2338
(dp2339
g2334
I68
sg2335
I0
sg2336
S'28/41'
p2340
ssS'change'
p2341
(dp2342
g2334
I100
sg2335
I0
sg2336
S'17/17'
p2343
ssS'query'
p2344
(dp2345
g2334
I77
sg2335
I0
sg2336
S'50/65'
p2346
ssssVVodafone 1.0
p2347
(dp2348
S'summary_display'
p2349
S'117/149'
p2350
sS'total_runs'
p2351
L1L
sS'summary_score'
p2352
I79
sS'results'
p2353
(dp2354
S'unapply'
p2355
(dp2356
S'score'
p2357
I65
sS'raw_score'
p2358
I0
sS'display'
p2359
S'17/26'
p2360
ssS'apply'
p2361
(dp2362
g2357
I83
sg2358
I0
sg2359
S'34/41'
p2363
ssS'change'
p2364
(dp2365
g2357
I41
sg2358
I0
sg2359
S'7/17'
p2366
ssS'query'
p2367
(dp2368
g2357
I91
sg2358
I0
sg2359
S'59/65'
p2369
ssssVWii 9.30
p2370
(dp2371
S'summary_display'
p2372
S'0/149'
p2373
sS'total_runs'
p2374
L2L
sS'summary_score'
p2375
I0
sS'results'
p2376
(dp2377
S'unapply'
p2378
(dp2379
S'score'
p2380
I0
sS'raw_score'
p2381
I0
sS'display'
p2382
S'0/26'
p2383
ssS'apply'
p2384
(dp2385
g2380
I0
sg2381
I0
sg2382
S'0/41'
p2386
ssS'change'
p2387
(dp2388
g2380
I0
sg2381
I0
sg2382
S'0/17'
p2389
ssS'query'
p2390
(dp2391
g2380
I0
sg2381
I0
sg2382
S'0/65'
p2392
ssssVArora 0.5
p2393
(dp2394
S'summary_display'
p2395
S'111/149'
p2396
sS'total_runs'
p2397
L1L
sS'summary_score'
p2398
I74
sS'results'
p2399
(dp2400
S'unapply'
p2401
(dp2402
S'score'
p2403
I35
sS'raw_score'
p2404
I0
sS'display'
p2405
S'9/26'
p2406
ssS'apply'
p2407
(dp2408
g2403
I85
sg2404
I0
sg2405
S'35/41'
p2409
ssS'change'
p2410
(dp2411
g2403
I100
sg2404
I0
sg2405
S'17/17'
p2412
ssS'query'
p2413
(dp2414
g2403
I77
sg2404
I0
sg2405
S'50/65'
p2415
ssssVArora 0.6
p2416
(dp2417
S'summary_display'
p2418
S'102/149'
p2419
sS'total_runs'
p2420
L1L
sS'summary_score'
p2421
I68
sS'results'
p2422
(dp2423
S'unapply'
p2424
(dp2425
S'score'
p2426
I31
sS'raw_score'
p2427
I0
sS'display'
p2428
S'8/26'
p2429
ssS'apply'
p2430
(dp2431
g2426
I66
sg2427
I0
sg2428
S'27/41'
p2432
ssS'change'
p2433
(dp2434
g2426
I100
sg2427
I0
sg2428
S'17/17'
p2435
ssS'query'
p2436
(dp2437
g2426
I77
sg2427
I0
sg2428
S'50/65'
p2438
ssssVArora 0.7
p2439
(dp2440
S'summary_display'
p2441
S'111/149'
p2442
sS'total_runs'
p2443
L1L
sS'summary_score'
p2444
I74
sS'results'
p2445
(dp2446
S'unapply'
p2447
(dp2448
S'score'
p2449
I35
sS'raw_score'
p2450
I0
sS'display'
p2451
S'9/26'
p2452
ssS'apply'
p2453
(dp2454
g2449
I85
sg2450
I0
sg2451
S'35/41'
p2455
ssS'change'
p2456
(dp2457
g2449
I100
sg2450
I0
sg2451
S'17/17'
p2458
ssS'query'
p2459
(dp2460
g2449
I77
sg2450
I0
sg2451
S'50/65'
p2461
ssssVArora 0.8
p2462
(dp2463
S'summary_display'
p2464
S'111/149'
p2465
sS'total_runs'
p2466
L5L
sS'summary_score'
p2467
I74
sS'results'
p2468
(dp2469
S'unapply'
p2470
(dp2471
S'score'
p2472
I35
sS'raw_score'
p2473
I0
sS'display'
p2474
S'9/26'
p2475
ssS'apply'
p2476
(dp2477
g2472
I85
sg2473
I0
sg2474
S'35/41'
p2478
ssS'change'
p2479
(dp2480
g2472
I100
sg2473
I0
sg2474
S'17/17'
p2481
ssS'query'
p2482
(dp2483
g2472
I77
sg2473
I0
sg2474
S'50/65'
p2484
ssssVArora 0.9
p2485
(dp2486
S'summary_display'
p2487
S'111/149'
p2488
sS'total_runs'
p2489
L11L
sS'summary_score'
p2490
I74
sS'results'
p2491
(dp2492
S'unapply'
p2493
(dp2494
S'score'
p2495
I35
sS'raw_score'
p2496
I0
sS'display'
p2497
S'9/26'
p2498
ssS'apply'
p2499
(dp2500
g2495
I85
sg2496
I0
sg2497
S'35/41'
p2501
ssS'change'
p2502
(dp2503
g2495
I100
sg2496
I0
sg2497
S'17/17'
p2504
ssS'query'
p2505
(dp2506
g2495
I77
sg2496
I0
sg2497
S'50/65'
p2507
ssssVOpera 9.02
p2508
(dp2509
S'summary_display'
p2510
S'0/149'
p2511
sS'total_runs'
p2512
L1L
sS'summary_score'
p2513
I0
sS'results'
p2514
(dp2515
S'unapply'
p2516
(dp2517
S'score'
p2518
I0
sS'raw_score'
p2519
I0
sS'display'
p2520
S'0/26'
p2521
ssS'apply'
p2522
(dp2523
g2518
I0
sg2519
I0
sg2520
S'0/41'
p2524
ssS'change'
p2525
(dp2526
g2518
I0
sg2519
I0
sg2520
S'0/17'
p2527
ssS'query'
p2528
(dp2529
g2518
I0
sg2519
I0
sg2520
S'0/65'
p2530
ssssVOther
p2531
(dp2532
S'summary_display'
p2533
S'123/149'
p2534
sS'total_runs'
p2535
L65L
sS'summary_score'
p2536
I83
sS'results'
p2537
(dp2538
S'unapply'
p2539
(dp2540
S'score'
p2541
I69
sS'raw_score'
p2542
I0
sS'display'
p2543
S'18/26'
p2544
ssS'apply'
p2545
(dp2546
g2541
I95
sg2542
I0
sg2543
S'39/41'
p2547
ssS'change'
p2548
(dp2549
g2541
I100
sg2542
I0
sg2543
S'17/17'
p2550
ssS'query'
p2551
(dp2552
g2541
I75
sg2542
I0
sg2543
S'49/65'
p2553
ssssVUzbl 0
p2554
(dp2555
S'summary_display'
p2556
S'129/149'
p2557
sS'total_runs'
p2558
L24L
sS'summary_score'
p2559
I87
sS'results'
p2560
(dp2561
S'unapply'
p2562
(dp2563
S'score'
p2564
I88
sS'raw_score'
p2565
I0
sS'display'
p2566
S'23/26'
p2567
ssS'apply'
p2568
(dp2569
g2564
I95
sg2565
I0
sg2566
S'39/41'
p2570
ssS'change'
p2571
(dp2572
g2564
I100
sg2565
I0
sg2566
S'17/17'
p2573
ssS'query'
p2574
(dp2575
g2564
I77
sg2565
I0
sg2566
S'50/65'
p2576
ssssVArora 0.10
p2577
(dp2578
S'summary_display'
p2579
S'111/149'
p2580
sS'total_runs'
p2581
L7L
sS'summary_score'
p2582
I74
sS'results'
p2583
(dp2584
S'unapply'
p2585
(dp2586
S'score'
p2587
I35
sS'raw_score'
p2588
I0
sS'display'
p2589
S'9/26'
p2590
ssS'apply'
p2591
(dp2592
g2587
I85
sg2588
I0
sg2589
S'35/41'
p2593
ssS'change'
p2594
(dp2595
g2587
I100
sg2588
I0
sg2589
S'17/17'
p2596
ssS'query'
p2597
(dp2598
g2587
I77
sg2588
I0
sg2589
S'50/65'
p2599
ssssVOpera 9.80
p2600
(dp2601
S'summary_display'
p2602
S'89/149'
p2603
sS'total_runs'
p2604
L2L
sS'summary_score'
p2605
I60
sS'results'
p2606
(dp2607
S'unapply'
p2608
(dp2609
S'score'
p2610
I50
sS'raw_score'
p2611
I0
sS'display'
p2612
S'13/26'
p2613
ssS'apply'
p2614
(dp2615
g2610
I63
sg2611
I0
sg2612
S'26/41'
p2616
ssS'change'
p2617
(dp2618
g2610
I41
sg2611
I0
sg2612
S'7/17'
p2619
ssS'query'
p2620
(dp2621
g2610
I66
sg2611
I0
sg2612
S'43/65'
p2622
ssssVIceweasel 2.0
p2623
(dp2624
S'summary_display'
p2625
S'117/149'
p2626
sS'total_runs'
p2627
L1L
sS'summary_score'
p2628
I79
sS'results'
p2629
(dp2630
S'unapply'
p2631
(dp2632
S'score'
p2633
I65
sS'raw_score'
p2634
I0
sS'display'
p2635
S'17/26'
p2636
ssS'apply'
p2637
(dp2638
g2633
I83
sg2634
I0
sg2635
S'34/41'
p2639
ssS'change'
p2640
(dp2641
g2633
I41
sg2634
I0
sg2635
S'7/17'
p2642
ssS'query'
p2643
(dp2644
g2633
I91
sg2634
I0
sg2635
S'59/65'
p2645
ssssVChrome Frame (IE 6) 4.0
p2646
(dp2647
S'summary_display'
p2648
S'129/149'
p2649
sS'total_runs'
p2650
L2L
sS'summary_score'
p2651
I87
sS'results'
p2652
(dp2653
S'unapply'
p2654
(dp2655
S'score'
p2656
I88
sS'raw_score'
p2657
I0
sS'display'
p2658
S'23/26'
p2659
ssS'apply'
p2660
(dp2661
g2656
I95
sg2657
I0
sg2658
S'39/41'
p2662
ssS'change'
p2663
(dp2664
g2656
I100
sg2657
I0
sg2658
S'17/17'
p2665
ssS'query'
p2666
(dp2667
g2656
I77
sg2657
I0
sg2658
S'50/65'
p2668
ssssVCamino 1.6
p2669
(dp2670
S'summary_display'
p2671
S'117/149'
p2672
sS'total_runs'
p2673
L14L
sS'summary_score'
p2674
I79
sS'results'
p2675
(dp2676
S'unapply'
p2677
(dp2678
S'score'
p2679
I65
sS'raw_score'
p2680
I0
sS'display'
p2681
S'17/26'
p2682
ssS'apply'
p2683
(dp2684
g2679
I83
sg2680
I0
sg2681
S'34/41'
p2685
ssS'change'
p2686
(dp2687
g2679
I41
sg2680
I0
sg2681
S'7/17'
p2688
ssS'query'
p2689
(dp2690
g2679
I91
sg2680
I0
sg2681
S'59/65'
p2691
ssssVCamino 1.5
p2692
(dp2693
S'summary_display'
p2694
S'117/149'
p2695
sS'total_runs'
p2696
L1L
sS'summary_score'
p2697
I79
sS'results'
p2698
(dp2699
S'unapply'
p2700
(dp2701
S'score'
p2702
I65
sS'raw_score'
p2703
I0
sS'display'
p2704
S'17/26'
p2705
ssS'apply'
p2706
(dp2707
g2702
I83
sg2703
I0
sg2704
S'34/41'
p2708
ssS'change'
p2709
(dp2710
g2702
I41
sg2703
I0
sg2704
S'7/17'
p2711
ssS'query'
p2712
(dp2713
g2702
I91
sg2703
I0
sg2704
S'59/65'
p2714
ssssS'total_runs'
p2715
L10761L
sVFirefox (Namoroka) 3.7
p2716
(dp2717
S'summary_display'
p2718
S'117/149'
p2719
sS'total_runs'
p2720
L1L
sS'summary_score'
p2721
I79
sS'results'
p2722
(dp2723
S'unapply'
p2724
(dp2725
S'score'
p2726
I65
sS'raw_score'
p2727
I0
sS'display'
p2728
S'17/26'
p2729
ssS'apply'
p2730
(dp2731
g2726
I83
sg2727
I0
sg2728
S'34/41'
p2732
ssS'change'
p2733
(dp2734
g2726
I41
sg2727
I0
sg2728
S'7/17'
p2735
ssS'query'
p2736
(dp2737
g2726
I91
sg2727
I0
sg2728
S'59/65'
p2738
ssssVMaxthon 3.0
p2739
(dp2740
S'summary_display'
p2741
S'129/149'
p2742
sS'total_runs'
p2743
L6L
sS'summary_score'
p2744
I87
sS'results'
p2745
(dp2746
S'unapply'
p2747
(dp2748
S'score'
p2749
I88
sS'raw_score'
p2750
I0
sS'display'
p2751
S'23/26'
p2752
ssS'apply'
p2753
(dp2754
g2749
I95
sg2750
I0
sg2751
S'39/41'
p2755
ssS'change'
p2756
(dp2757
g2749
I100
sg2750
I0
sg2751
S'17/17'
p2758
ssS'query'
p2759
(dp2760
g2749
I77
sg2750
I0
sg2751
S'50/65'
p2761
ssssVChrome 2.0
p2762
(dp2763
S'summary_display'
p2764
S'125/149'
p2765
sS'total_runs'
p2766
L216L
sS'summary_score'
p2767
I84
sS'results'
p2768
(dp2769
S'unapply'
p2770
(dp2771
S'score'
p2772
I73
sS'raw_score'
p2773
I0
sS'display'
p2774
S'19/26'
p2775
ssS'apply'
p2776
(dp2777
g2772
I95
sg2773
I0
sg2774
S'39/41'
p2778
ssS'change'
p2779
(dp2780
g2772
I100
sg2773
I0
sg2774
S'17/17'
p2781
ssS'query'
p2782
(dp2783
g2772
I77
sg2773
I0
sg2774
S'50/65'
p2784
ssssVNokia 5800
p2785
(dp2786
S'summary_display'
p2787
S'100/149'
p2788
sS'total_runs'
p2789
L3L
sS'summary_score'
p2790
I67
sS'results'
p2791
(dp2792
S'unapply'
p2793
(dp2794
S'score'
p2795
I31
sS'raw_score'
p2796
I0
sS'display'
p2797
S'8/26'
p2798
ssS'apply'
p2799
(dp2800
g2795
I68
sg2796
I0
sg2797
S'28/41'
p2801
ssS'change'
p2802
(dp2803
g2795
I100
sg2796
I0
sg2797
S'17/17'
p2804
ssS'query'
p2805
(dp2806
g2795
I72
sg2796
I0
sg2797
S'47/65'
p2807
ssssVMaemo Browser 1.4
p2808
(dp2809
S'summary_display'
p2810
S'118/149'
p2811
sS'total_runs'
p2812
L1L
sS'summary_score'
p2813
I79
sS'results'
p2814
(dp2815
S'unapply'
p2816
(dp2817
S'score'
p2818
I65
sS'raw_score'
p2819
I0
sS'display'
p2820
S'17/26'
p2821
ssS'apply'
p2822
(dp2823
g2818
I83
sg2819
I0
sg2820
S'34/41'
p2824
ssS'change'
p2825
(dp2826
g2818
I41
sg2819
I0
sg2820
S'7/17'
p2827
ssS'query'
p2828
(dp2829
g2818
I92
sg2819
I0
sg2820
S'60/65'
p2830
ssssVChrome 0.2
p2831
(dp2832
S'summary_display'
p2833
S'104/149'
p2834
sS'total_runs'
p2835
L3L
sS'summary_score'
p2836
I70
sS'results'
p2837
(dp2838
S'unapply'
p2839
(dp2840
S'score'
p2841
I35
sS'raw_score'
p2842
I0
sS'display'
p2843
S'9/26'
p2844
ssS'apply'
p2845
(dp2846
g2841
I68
sg2842
I0
sg2843
S'28/41'
p2847
ssS'change'
p2848
(dp2849
g2841
I100
sg2842
I0
sg2843
S'17/17'
p2850
ssS'query'
p2851
(dp2852
g2841
I77
sg2842
I0
sg2843
S'50/65'
p2853
ssssVMaxthon 2.0
p2854
(dp2855
S'summary_display'
p2856
S'99/149'
p2857
sS'total_runs'
p2858
L12L
sS'summary_score'
p2859
I66
sS'results'
p2860
(dp2861
S'unapply'
p2862
(dp2863
S'score'
p2864
I54
sS'raw_score'
p2865
I0
sS'display'
p2866
S'14/26'
p2867
ssS'apply'
p2868
(dp2869
g2864
I59
sg2865
I0
sg2866
S'24/41'
p2870
ssS'change'
p2871
(dp2872
g2864
I29
sg2865
I0
sg2866
S'5/17'
p2873
ssS'query'
p2874
(dp2875
g2864
I86
sg2865
I0
sg2866
S'56/65'
p2876
ssssVNetscape 9.0
p2877
(dp2878
S'summary_display'
p2879
S'117/149'
p2880
sS'total_runs'
p2881
L1L
sS'summary_score'
p2882
I79
sS'results'
p2883
(dp2884
S'unapply'
p2885
(dp2886
S'score'
p2887
I65
sS'raw_score'
p2888
I0
sS'display'
p2889
S'17/26'
p2890
ssS'apply'
p2891
(dp2892
g2887
I83
sg2888
I0
sg2889
S'34/41'
p2893
ssS'change'
p2894
(dp2895
g2887
I41
sg2888
I0
sg2889
S'7/17'
p2896
ssS'query'
p2897
(dp2898
g2887
I91
sg2888
I0
sg2889
S'59/65'
p2899
ssssVEpiphany 2.22
p2900
(dp2901
S'summary_display'
p2902
S'117/149'
p2903
sS'total_runs'
p2904
L7L
sS'summary_score'
p2905
I79
sS'results'
p2906
(dp2907
S'unapply'
p2908
(dp2909
S'score'
p2910
I65
sS'raw_score'
p2911
I0
sS'display'
p2912
S'17/26'
p2913
ssS'apply'
p2914
(dp2915
g2910
I83
sg2911
I0
sg2912
S'34/41'
p2916
ssS'change'
p2917
(dp2918
g2910
I41
sg2911
I0
sg2912
S'7/17'
p2919
ssS'query'
p2920
(dp2921
g2910
I91
sg2911
I0
sg2912
S'59/65'
p2922
ssssVNetFront 3.5
p2923
(dp2924
S'summary_display'
p2925
S'0/149'
p2926
sS'total_runs'
p2927
L1L
sS'summary_score'
p2928
I0
sS'results'
p2929
(dp2930
S'unapply'
p2931
(dp2932
S'score'
p2933
I0
sS'raw_score'
p2934
I0
sS'display'
p2935
S'0/26'
p2936
ssS'apply'
p2937
(dp2938
g2933
I0
sg2934
I0
sg2935
S'0/41'
p2939
ssS'change'
p2940
(dp2941
g2933
I0
sg2934
I0
sg2935
S'0/17'
p2942
ssS'query'
p2943
(dp2944
g2933
I0
sg2934
I0
sg2935
S'0/65'
p2945
ssssVNetFront 3.4
p2946
(dp2947
S'summary_display'
p2948
S'117/149'
p2949
sS'total_runs'
p2950
L1L
sS'summary_score'
p2951
I79
sS'results'
p2952
(dp2953
S'unapply'
p2954
(dp2955
S'score'
p2956
I65
sS'raw_score'
p2957
I0
sS'display'
p2958
S'17/26'
p2959
ssS'apply'
p2960
(dp2961
g2956
I83
sg2957
I0
sg2958
S'34/41'
p2962
ssS'change'
p2963
(dp2964
g2956
I41
sg2957
I0
sg2958
S'7/17'
p2965
ssS'query'
p2966
(dp2967
g2956
I91
sg2957
I0
sg2958
S'59/65'
p2968
ssssVMidori 0.2
p2969
(dp2970
S'summary_display'
p2971
S'129/149'
p2972
sS'total_runs'
p2973
L6L
sS'summary_score'
p2974
I87
sS'results'
p2975
(dp2976
S'unapply'
p2977
(dp2978
S'score'
p2979
I88
sS'raw_score'
p2980
I0
sS'display'
p2981
S'23/26'
p2982
ssS'apply'
p2983
(dp2984
g2979
I95
sg2980
I0
sg2981
S'39/41'
p2985
ssS'change'
p2986
(dp2987
g2979
I100
sg2980
I0
sg2981
S'17/17'
p2988
ssS'query'
p2989
(dp2990
g2979
I77
sg2980
I0
sg2981
S'50/65'
p2991
ssssVMidori 0.1
p2992
(dp2993
S'summary_display'
p2994
S'125/149'
p2995
sS'total_runs'
p2996
L13L
sS'summary_score'
p2997
I84
sS'results'
p2998
(dp2999
S'unapply'
p3000
(dp3001
S'score'
p3002
I73
sS'raw_score'
p3003
I0
sS'display'
p3004
S'19/26'
p3005
ssS'apply'
p3006
(dp3007
g3002
I95
sg3003
I0
sg3004
S'39/41'
p3008
ssS'change'
p3009
(dp3010
g3002
I100
sg3003
I0
sg3004
S'17/17'
p3011
ssS'query'
p3012
(dp3013
g3002
I77
sg3003
I0
sg3004
S'50/65'
p3014
ssssVOpera 10.50
p3015
(dp3016
S'summary_display'
p3017
S'88/149'
p3018
sS'total_runs'
p3019
L143L
sS'summary_score'
p3020
I59
sS'results'
p3021
(dp3022
S'unapply'
p3023
(dp3024
S'score'
p3025
I50
sS'raw_score'
p3026
I0
sS'display'
p3027
S'13/26'
p3028
ssS'apply'
p3029
(dp3030
g3025
I63
sg3026
I0
sg3027
S'26/41'
p3031
ssS'change'
p3032
(dp3033
g3025
I41
sg3026
I0
sg3027
S'7/17'
p3034
ssS'query'
p3035
(dp3036
g3025
I65
sg3026
I0
sg3027
S'42/65'
p3037
ssss. | apache-2.0 | -4,245,356,437,274,673,000 | 6.484537 | 28 | 0.754963 | false |
techdragon/django | tests/distinct_on_fields/tests.py | 34 | 5957 | from __future__ import unicode_literals
from django.db.models import Max
from django.test import TestCase, skipUnlessDBFeature
from django.test.utils import str_prefix
from .models import Celebrity, Fan, Staff, StaffTag, Tag
@skipUnlessDBFeature('can_distinct_on_fields')
@skipUnlessDBFeature('supports_nullable_unique_constraints')
class DistinctOnTests(TestCase):
def setUp(self):
t1 = Tag.objects.create(name='t1')
Tag.objects.create(name='t2', parent=t1)
t3 = Tag.objects.create(name='t3', parent=t1)
Tag.objects.create(name='t4', parent=t3)
Tag.objects.create(name='t5', parent=t3)
self.p1_o1 = Staff.objects.create(id=1, name="p1", organisation="o1")
self.p2_o1 = Staff.objects.create(id=2, name="p2", organisation="o1")
self.p3_o1 = Staff.objects.create(id=3, name="p3", organisation="o1")
self.p1_o2 = Staff.objects.create(id=4, name="p1", organisation="o2")
self.p1_o1.coworkers.add(self.p2_o1, self.p3_o1)
StaffTag.objects.create(staff=self.p1_o1, tag=t1)
StaffTag.objects.create(staff=self.p1_o1, tag=t1)
celeb1 = Celebrity.objects.create(name="c1")
celeb2 = Celebrity.objects.create(name="c2")
self.fan1 = Fan.objects.create(fan_of=celeb1)
self.fan2 = Fan.objects.create(fan_of=celeb1)
self.fan3 = Fan.objects.create(fan_of=celeb2)
def test_basic_distinct_on(self):
"""QuerySet.distinct('field', ...) works"""
# (qset, expected) tuples
qsets = (
(
Staff.objects.distinct().order_by('name'),
['<Staff: p1>', '<Staff: p1>', '<Staff: p2>', '<Staff: p3>'],
),
(
Staff.objects.distinct('name').order_by('name'),
['<Staff: p1>', '<Staff: p2>', '<Staff: p3>'],
),
(
Staff.objects.distinct('organisation').order_by('organisation', 'name'),
['<Staff: p1>', '<Staff: p1>'],
),
(
Staff.objects.distinct('name', 'organisation').order_by('name', 'organisation'),
['<Staff: p1>', '<Staff: p1>', '<Staff: p2>', '<Staff: p3>'],
),
(
Celebrity.objects.filter(fan__in=[self.fan1, self.fan2, self.fan3]).distinct('name').order_by('name'),
['<Celebrity: c1>', '<Celebrity: c2>'],
),
# Does combining querysets work?
(
(Celebrity.objects.filter(fan__in=[self.fan1, self.fan2]).
distinct('name').order_by('name') |
Celebrity.objects.filter(fan__in=[self.fan3]).
distinct('name').order_by('name')),
['<Celebrity: c1>', '<Celebrity: c2>'],
),
(
StaffTag.objects.distinct('staff', 'tag'),
['<StaffTag: t1 -> p1>'],
),
(
Tag.objects.order_by('parent__pk', 'pk').distinct('parent'),
['<Tag: t2>', '<Tag: t4>', '<Tag: t1>'],
),
(
StaffTag.objects.select_related('staff').distinct('staff__name').order_by('staff__name'),
['<StaffTag: t1 -> p1>'],
),
# Fetch the alphabetically first coworker for each worker
(
(Staff.objects.distinct('id').order_by('id', 'coworkers__name').
values_list('id', 'coworkers__name')),
[str_prefix("(1, %(_)s'p2')"), str_prefix("(2, %(_)s'p1')"),
str_prefix("(3, %(_)s'p1')"), "(4, None)"]
),
)
for qset, expected in qsets:
self.assertQuerysetEqual(qset, expected)
self.assertEqual(qset.count(), len(expected))
# Combining queries with different distinct_fields is not allowed.
base_qs = Celebrity.objects.all()
with self.assertRaisesMessage(AssertionError, "Cannot combine queries with different distinct fields."):
base_qs.distinct('id') & base_qs.distinct('name')
# Test join unreffing
c1 = Celebrity.objects.distinct('greatest_fan__id', 'greatest_fan__fan_of')
self.assertIn('OUTER JOIN', str(c1.query))
c2 = c1.distinct('pk')
self.assertNotIn('OUTER JOIN', str(c2.query))
def test_distinct_not_implemented_checks(self):
# distinct + annotate not allowed
with self.assertRaises(NotImplementedError):
Celebrity.objects.annotate(Max('id')).distinct('id')[0]
with self.assertRaises(NotImplementedError):
Celebrity.objects.distinct('id').annotate(Max('id'))[0]
# However this check is done only when the query executes, so you
# can use distinct() to remove the fields before execution.
Celebrity.objects.distinct('id').annotate(Max('id')).distinct()[0]
# distinct + aggregate not allowed
with self.assertRaises(NotImplementedError):
Celebrity.objects.distinct('id').aggregate(Max('id'))
def test_distinct_on_in_ordered_subquery(self):
qs = Staff.objects.distinct('name').order_by('name', 'id')
qs = Staff.objects.filter(pk__in=qs).order_by('name')
self.assertQuerysetEqual(
qs, [self.p1_o1, self.p2_o1, self.p3_o1],
lambda x: x
)
qs = Staff.objects.distinct('name').order_by('name', '-id')
qs = Staff.objects.filter(pk__in=qs).order_by('name')
self.assertQuerysetEqual(
qs, [self.p1_o2, self.p2_o1, self.p3_o1],
lambda x: x
)
def test_distinct_on_get_ordering_preserved(self):
"""
Ordering shouldn't be cleared when distinct on fields are specified.
refs #25081
"""
staff = Staff.objects.distinct('name').order_by('name', '-organisation').get(name='p1')
self.assertEqual(staff.organisation, 'o2')
| bsd-3-clause | 3,620,798,036,416,413,000 | 42.801471 | 118 | 0.55716 | false |
Ladeia/pingo-py | pingo/parts/spi/mcp3008.py | 7 | 1982 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Object interface for MCP3008 A/D converter using bit-banged SPI
"""
import time
import atexit
import RPi.GPIO as GPIO
# make sure GPIO.cleanup will be called when script exits
atexit.register(GPIO.cleanup)
class Mcp3008(object):
def __init__(self, spi_clock, spi_miso, spi_mosi, spi_cs):
self.clock = spi_clock
self.miso = spi_miso
self.mosi = spi_mosi
self.cs = spi_cs
GPIO.setmode(GPIO.BCM)
for port in [self.clock, self.mosi, self.cs]:
GPIO.setup(port, GPIO.OUT)
GPIO.setup(self.miso, GPIO.IN)
def read(self, channel):
assert 0 <= channel <= 7, 'channel must be 0...7'
GPIO.output(self.cs, True)
GPIO.output(self.clock, False)
GPIO.output(self.cs, False)
cmd = channel
cmd |= 0x18 # start bit + "single-ended" config bit
cmd <<= 3 # discard 3 bits; we need just 5
for i in range(5):
GPIO.output(self.mosi, cmd & 0x80)
cmd <<= 1
GPIO.output(self.clock, True)
GPIO.output(self.clock, False)
res = 0
# read null bit plus 10 bits for ADC value
for i in range(11):
GPIO.output(self.clock, True)
GPIO.output(self.clock, False)
res <<= 1
if (GPIO.input(self.miso)):
res |= 0x1
GPIO.output(self.cs, True)
return res
def test():
# select pins for SPI
SPI_CLK = 18
SPI_MISO = 23
SPI_MOSI = 24
SPI_CS = 25
ad_chip = Mcp3008(SPI_CLK, SPI_MISO, SPI_MOSI, SPI_CS)
count = 0
display = '{0:6d} {1:010b} {1:4} {2:3.2f} V {3}'
while True:
res = ad_chip.read(1)
volts = float(res) / 1023 * 3.3
ticks = int(round(float(res) / 1023 * 40)) * '='
print display.format(count, res, volts, ticks)
time.sleep(.2)
count += 1
if __name__ == '__main__':
test()
| mit | -989,090,346,551,082,900 | 26.527778 | 63 | 0.545913 | false |
windskyer/nova | nova/cells/weights/ram_by_instance_type.py | 63 | 1971 | # Copyright (c) 2012-2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Weigh cells by memory needed in a way that spreads instances.
"""
from oslo_config import cfg
from nova.cells import weights
ram_weigher_opts = [
cfg.FloatOpt('ram_weight_multiplier',
default=10.0,
help='Multiplier used for weighing ram. Negative '
'numbers mean to stack vs spread.'),
]
CONF = cfg.CONF
CONF.register_opts(ram_weigher_opts, group='cells')
class RamByInstanceTypeWeigher(weights.BaseCellWeigher):
"""Weigh cells by instance_type requested."""
def weight_multiplier(self):
return CONF.cells.ram_weight_multiplier
def _weigh_object(self, cell, weight_properties):
"""Use the 'ram_free' for a particular instance_type advertised from a
child cell's capacity to compute a weight. We want to direct the
build to a cell with a higher capacity. Since higher weights win,
we just return the number of units available for the instance_type.
"""
request_spec = weight_properties['request_spec']
instance_type = request_spec['instance_type']
memory_needed = instance_type['memory_mb']
ram_free = cell.capacities.get('ram_free', {})
units_by_mb = ram_free.get('units_by_mb', {})
return units_by_mb.get(str(memory_needed), 0)
| gpl-2.0 | 4,823,621,280,956,539,000 | 36.188679 | 78 | 0.672755 | false |
alexbrasetvik/Piped | doc/tutorials/twitter/2_oauth/twitter_tutorial/test_processors.py | 2 | 3364 | import os
import subprocess
from zope import interface
from twisted.internet import defer, utils
from twisted.web import client
from piped.plugins.status_testing import statustest, processors
from piped import processing
class TestTwitterProcessor(processors.StatusTestProcessor):
interface.classProvides(processing.IProcessor)
name = 'test-twitter'
class TestTwitter(statustest.StatusTestCase):
timeout = 4
def setUp(self, auth):
# if the auth is overridden in our test configuration, pass it on to the process.
self.auth = auth
self.auth_override = ['-O', 'twitter.my_account.auth: {username: %(username)s, password: %(password)s}' % self.auth]
self.oauth_without_access_override = ['-O', 'twitter.my_account.auth: {consumer_key: %(consumer_key)s, consumer_secret: %(consumer_secret)s}' % self.auth]
self.oauth_override = ['-O', 'twitter.my_account.auth: {consumer_key: %(consumer_key)s, consumer_secret: %(consumer_secret)s }' % self.auth]
self.oauth_override += ['-O', 'twitter.my_account.auth.access_token: {key: %(key)s, secret: %(secret)s}' % self.auth['access_token']]
@defer.inlineCallbacks
def statustest_basic_auth(self):
output = yield utils.getProcessOutput('piped', args=['-nc', 'twitter.yaml', '-p', 'basic.pid'] + self.auth_override, env=os.environ)
self.assertIn('Current rate limit status:', output)
@defer.inlineCallbacks
def statustest_oauth(self):
output = yield utils.getProcessOutput('piped', args=['-nc', 'twitter.yaml', '-p', 'oauth.pid'] + self.oauth_override, env=os.environ)
self.assertIn('Current rate limit status:', output)
@defer.inlineCallbacks
def statustest_oauth_dance(self):
sub = subprocess.Popen(args=['piped', '-nc', 'twitter.yaml', '-p', 'oauth_dance.pid']+self.oauth_without_access_override, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE)
while True:
line = sub.stdout.readline()
if 'Go the the following URL to authorize' in line:
oauth_token = line.strip(' ,\n').rsplit('=', 1)[-1]
if 'paste the PID into this window and press enter.' in line:
url = 'https://api.twitter.com/oauth/authorize'
postdata = 'oauth_token=%s&session[username_or_email]=%s&session[password]=%s&allow=Authorize+app' % (oauth_token, self.auth['username'], self.auth['password'])
page = yield client.getPage(url, method='POST', postdata=postdata)
for line in page.split('\n'):
if '<code>' in line:
pin = line.split('<code>')[-1].split('</code>')[0]
sub.stdin.write('%s\n' % pin)
if 'Unauthorized' in line:
self.fail('Could not authorize.')
if 'Current rate limit status' in line:
break
sub.terminate()
def configure(self, runtime_environment):
self.auth = runtime_environment.get_configuration_value('secrets.twitter.auth', None)
def get_namespace(self, baton):
return dict(auth=self.auth) | mit | 7,468,604,758,833,249,000 | 43.866667 | 206 | 0.600476 | false |
MinnowBoard/max-opencv-demos | screens/mazegame/gameplay.py | 2 | 7574 | from .include import *
import random
# The maze generation is an implementation of the unmodified randomized Prim's
# algorithm from http://en.wikipedia.org/wiki/Maze_generation_algorithm
class Maze:
def __init__ (self, w, h, seed = None):
self.w, self.h = w, h
self.area = w * h
self.scale_w = float (util.input.cfg_h) / (self.w + 2.0)
self.scale_h = float (util.input.cfg_h) / (self.h + 2.0)
self.wall_list = []
self.visited = set ()
self.passages = set ()
random.seed (seed)
self.randstate = random.getstate ()
self.generate ()
def wall_num_between_cells (self, cell1_num, cell2_num):
if cell2_num < cell1_num:
# cell 2 is either East or North of cell 1
return self.wall_num_between_cells (cell2_num, cell1_num)
ret = 2 * cell1_num
if cell2_num == cell1_num + 1 and cell2_num % self.w != 0:
return ret # cell 2 is cell 1's neighbor to the West
elif cell2_num == cell1_num + self.w:
return ret + 1 # cell 2 is cell 1's neighbor to the South
assert False
def add_wall (self, wall_num):
if wall_num not in self.wall_list:
assert wall_num not in self.passages
self.wall_list += [wall_num]
def add_wall_between_cells (self, cell1_num, cell2_num, from_wall):
wall_num = self.wall_num_between_cells (cell1_num, cell2_num)
if wall_num != from_wall:
self.add_wall (wall_num)
def visit_cell (self, cell_num, from_wall):
self.visited.add (cell_num)
if cell_num % self.w:
self.add_wall_between_cells (cell_num, cell_num - 1, from_wall)
if (cell_num + 1) % self.w:
self.add_wall_between_cells (cell_num, cell_num + 1, from_wall)
if cell_num >= self.w:
self.add_wall_between_cells (cell_num, cell_num - self.w, from_wall)
if cell_num + self.w < self.area:
self.add_wall_between_cells (cell_num, cell_num + self.w, from_wall)
def handle_wall (self, wall_list_num):
wall_num = self.wall_list [wall_list_num]
cell1_num = wall_num / 2
if wall_num % 2:
cell2_num = cell1_num + self.w
else:
cell2_num = cell1_num + 1
cell1_visited = cell1_num in self.visited
cell2_visited = cell2_num in self.visited
if cell2_visited:
cell1_visited, cell2_visited = cell2_visited, cell1_visited
cell1_num, cell2_num = cell2_num, cell1_num
assert cell1_visited # neither visited
if cell2_visited:
# both visited, make a passage between them
last_wall_num = self.wall_list.pop ()
if wall_list_num != len (self.wall_list):
self.wall_list[wall_list_num] = last_wall_num
else:
# one visited, time to visit the other one
self.passages.add (wall_num)
self.visit_cell (cell2_num, wall_num)
def repeatable_randrange (self, stop):
random.setstate (self.randstate)
ret = random.randrange (stop)
self.randstate = random.getstate ()
return ret
def generate (self):
self.visit_cell (self.repeatable_randrange (self.area), -1)
while len (self.wall_list):
self.handle_wall (self.repeatable_randrange (len (self.wall_list)))
self.generate_lines ()
def vertline (self, x, y1, y2):
x, y1, y2 = (x + 1) * self.scale_w, (y1 + 1) * self.scale_h, (y2 + 1) * self.scale_h
return numpy.array (((x, y1), (x, y2)))
def horizline (self, y, x1, x2):
y, x1, x2 = (y + 1) * self.scale_h, (x1 + 1) * self.scale_w, (x2 + 1) * self.scale_w
return numpy.array (((x1, y), (x2, y)))
def line_between_cells (self, cell1_num, cell2_num):
if cell2_num < cell1_num:
return self.line_between_cells (cell2_num, cell1_num)
row = cell1_num // self.w
col = cell1_num % self.w
if cell1_num + 1 == cell2_num:
return self.horizline (row + 0.5, col + 0.5, col + 1.5)
assert cell1_num + self.w == cell2_num
return self.vertline (col + 0.5, row + 0.5, row + 1.5)
def generate_lines (self):
def lines_for_cell (cell_num):
row = cell_num // self.w
col = cell_num % self.w
if row < self.h - 1 and 2 * cell_num + 1 not in self.passages:
yield self.horizline (row + 1, col, col + 1)
if 2 * cell_num not in self.passages:
yield self.vertline (col + 1, row, row + 1)
self.maze_lines = [
self.horizline (0, 1, self.w), # upper border
self.vertline (0, 0, self.h), # left border
self.horizline (self.h, 0, self.w - 1), # lower border
# Draw a small square indicating the goal point
self.horizline (self.h - 0.75, self.w - 0.75, self.w - 0.25),
self.horizline (self.h - 0.25, self.w - 0.75, self.w - 0.25),
self.vertline (self.w - 0.75, self.h - 0.75, self.h - 0.25),
self.vertline (self.w - 0.25, self.h - 0.75, self.h - 0.25),
]
for cell_num in xrange (self.area):
for line in lines_for_cell (cell_num):
self.maze_lines += [line]
# Check whether the maze is solved by the directions given
def trace (self, arrows):
self.trace_lines = [
# Draw a small square indicating the start point
self.horizline (0.25, 0.25, 0.75),
self.horizline (0.75, 0.25, 0.75),
self.vertline (0.25, 0.25, 0.75),
self.vertline (0.75, 0.25, 0.75)
]
lastcell = 0
for arrow in arrows:
if arrow.dir is arrow_left and lastcell % self.w:
nextcell = lastcell - 1
elif arrow.dir is arrow_right and (lastcell + 1) % self.w:
nextcell = lastcell + 1
elif arrow.dir is arrow_up and lastcell >= self.w:
nextcell = lastcell - self.w
elif arrow.dir is arrow_down and lastcell + self.w < self.area:
nextcell = lastcell + self.w
else:
continue
if self.wall_num_between_cells (lastcell, nextcell) in self.passages:
self.trace_lines += [self.line_between_cells (lastcell, nextcell)]
lastcell = nextcell
self.solved = lastcell + 1 == self.area
def visualize (self, ui):
visualization = util.ui.SizedCanvas (ui.game_display)
for line in self.maze_lines:
visualization.scaledLine (line[0], line[1], (0, 255, 0), 2, cv2.CV_AA)
for line in self.trace_lines:
visualization.scaledLine (line[0], line[1], (255, 0, 0), 2, cv2.CV_AA)
if self.solved:
visualization.scaledPutText ("MAZE SOLVED (\"New\" button to generate another)", numpy.array ((self.scale_w, self.scale_h * 0.5)), 0, util.input.scale_len * visualization.scale, (255, 255, 0))
| mit | 4,967,479,147,143,890,000 | 35.239234 | 204 | 0.522445 | false |
LS1qJ/IP-Updater-For-Heroku | mail/simple_mail.py | 1 | 1480 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import smtplib
from email.MIMEText import MIMEText
from email.Utils import formatdate
from email.Header import Header
from dozens_ip_updater.config import Config
class SimpleMail():
fromAddress = Config.FROM_ADDRESS
smtpServer = Config.SMTP
mailAccount = Config.MAIL_ACCOUNT
mailPass = Config.MAIL_PWD
@classmethod
def post(cls, toAddress, subject, body):
fromAddress = cls.fromAddress
msg = cls.create_message(fromAddress, toAddress, subject, body)
cls.send_massage(fromAddress, toAddress, msg)
@classmethod
def create_message(cls, fromAddress, toAddress, subject, body):
enc = 'utf-8'
msg = MIMEText(body, 'plain', enc)
msg['From'] = fromAddress
msg['To'] = toAddress
msg['Subject'] = Header(subject, enc)
msg['Date'] = formatdate()
return msg
@classmethod
def send_massage(cls, fromAddress, toAddress, msg):
try:
s = None
s = smtplib.SMTP(cls.smtpServer, 587)
s.ehlo()
s.starttls()
s.ehlo()
s.login(cls.mailAccount, cls.mailPass)
s.sendmail(fromAddress, [toAddress], msg.as_string())
except smtplib.SMTPException:
raise
except smtplib.socket.error:
raise
except Exception:
raise
finally:
if s is not None:
s.close()
| mit | 7,714,643,754,657,493,000 | 27.461538 | 71 | 0.597973 | false |
mezz64/home-assistant | homeassistant/components/bitcoin/sensor.py | 16 | 6294 | """Bitcoin information service that uses blockchain.com."""
from datetime import timedelta
import logging
from blockchain import exchangerates, statistics
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION,
CONF_CURRENCY,
CONF_DISPLAY_OPTIONS,
TIME_MINUTES,
TIME_SECONDS,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by blockchain.com"
DEFAULT_CURRENCY = "USD"
ICON = "mdi:currency-btc"
SCAN_INTERVAL = timedelta(minutes=5)
OPTION_TYPES = {
"exchangerate": ["Exchange rate (1 BTC)", None],
"trade_volume_btc": ["Trade volume", "BTC"],
"miners_revenue_usd": ["Miners revenue", "USD"],
"btc_mined": ["Mined", "BTC"],
"trade_volume_usd": ["Trade volume", "USD"],
"difficulty": ["Difficulty", None],
"minutes_between_blocks": ["Time between Blocks", TIME_MINUTES],
"number_of_transactions": ["No. of Transactions", None],
"hash_rate": ["Hash rate", f"PH/{TIME_SECONDS}"],
"timestamp": ["Timestamp", None],
"mined_blocks": ["Mined Blocks", None],
"blocks_size": ["Block size", None],
"total_fees_btc": ["Total fees", "BTC"],
"total_btc_sent": ["Total sent", "BTC"],
"estimated_btc_sent": ["Estimated sent", "BTC"],
"total_btc": ["Total", "BTC"],
"total_blocks": ["Total Blocks", None],
"next_retarget": ["Next retarget", None],
"estimated_transaction_volume_usd": ["Est. Transaction volume", "USD"],
"miners_revenue_btc": ["Miners revenue", "BTC"],
"market_price_usd": ["Market price", "USD"],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_DISPLAY_OPTIONS, default=[]): vol.All(
cv.ensure_list, [vol.In(OPTION_TYPES)]
),
vol.Optional(CONF_CURRENCY, default=DEFAULT_CURRENCY): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Bitcoin sensors."""
currency = config[CONF_CURRENCY]
if currency not in exchangerates.get_ticker():
_LOGGER.warning("Currency %s is not available. Using USD", currency)
currency = DEFAULT_CURRENCY
data = BitcoinData()
dev = []
for variable in config[CONF_DISPLAY_OPTIONS]:
dev.append(BitcoinSensor(data, variable, currency))
add_entities(dev, True)
class BitcoinSensor(Entity):
"""Representation of a Bitcoin sensor."""
def __init__(self, data, option_type, currency):
"""Initialize the sensor."""
self.data = data
self._name = OPTION_TYPES[option_type][0]
self._unit_of_measurement = OPTION_TYPES[option_type][1]
self._currency = currency
self.type = option_type
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return ICON
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
return {ATTR_ATTRIBUTION: ATTRIBUTION}
def update(self):
"""Get the latest data and updates the states."""
self.data.update()
stats = self.data.stats
ticker = self.data.ticker
if self.type == "exchangerate":
self._state = ticker[self._currency].p15min
self._unit_of_measurement = self._currency
elif self.type == "trade_volume_btc":
self._state = f"{stats.trade_volume_btc:.1f}"
elif self.type == "miners_revenue_usd":
self._state = f"{stats.miners_revenue_usd:.0f}"
elif self.type == "btc_mined":
self._state = str(stats.btc_mined * 0.00000001)
elif self.type == "trade_volume_usd":
self._state = f"{stats.trade_volume_usd:.1f}"
elif self.type == "difficulty":
self._state = f"{stats.difficulty:.0f}"
elif self.type == "minutes_between_blocks":
self._state = f"{stats.minutes_between_blocks:.2f}"
elif self.type == "number_of_transactions":
self._state = str(stats.number_of_transactions)
elif self.type == "hash_rate":
self._state = f"{stats.hash_rate * 0.000001:.1f}"
elif self.type == "timestamp":
self._state = stats.timestamp
elif self.type == "mined_blocks":
self._state = str(stats.mined_blocks)
elif self.type == "blocks_size":
self._state = f"{stats.blocks_size:.1f}"
elif self.type == "total_fees_btc":
self._state = f"{stats.total_fees_btc * 0.00000001:.2f}"
elif self.type == "total_btc_sent":
self._state = f"{stats.total_btc_sent * 0.00000001:.2f}"
elif self.type == "estimated_btc_sent":
self._state = f"{stats.estimated_btc_sent * 0.00000001:.2f}"
elif self.type == "total_btc":
self._state = f"{stats.total_btc * 0.00000001:.2f}"
elif self.type == "total_blocks":
self._state = f"{stats.total_blocks:.0f}"
elif self.type == "next_retarget":
self._state = f"{stats.next_retarget:.2f}"
elif self.type == "estimated_transaction_volume_usd":
self._state = f"{stats.estimated_transaction_volume_usd:.2f}"
elif self.type == "miners_revenue_btc":
self._state = f"{stats.miners_revenue_btc * 0.00000001:.1f}"
elif self.type == "market_price_usd":
self._state = f"{stats.market_price_usd:.2f}"
class BitcoinData:
"""Get the latest data and update the states."""
def __init__(self):
"""Initialize the data object."""
self.stats = None
self.ticker = None
def update(self):
"""Get the latest data from blockchain.com."""
self.stats = statistics.get()
self.ticker = exchangerates.get_ticker()
| apache-2.0 | -7,446,982,100,493,062,000 | 33.966667 | 76 | 0.606927 | false |
jkklapp/paintshop | tester_test.py | 1 | 4056 | # Author: [email protected], 2015
import unittest
from tester import Tester
class TestTester(unittest.TestCase):
def setUp(self):
self.tester = Tester()
self.customers1 = [['1 0', '2 0'], ['1 1'], ['3 0']]
self.customers2 = [['1 0', '2 1'], ['1 1'], ['3 1']]
self.customers3 = [['2 0', '1 0'], ['2 1', '1 0']]
self.customers4 = [['2 1', '1 0'], ['2 1']]
self.customers5 = [['2 0', '1 0'], ['2 0', '1 1']]
self.customers6 = [['2 0']]
self.customers7 = [['1 0']]
self.customers8 = [['3 0', '1 1', '2 0']]
self.customers9 = [['1 0', '2 0'], ['1 1', '2 0'], ['1 0']]
self.customers10 = [['2 1', '1 0'], ['2 1', '1 0']]
self.customers11 = [['2 0', '1 0'], ['1 0']]
self.customers12 = [['3 1', '2 0'], ['2 1'], ['2 1', '1 0']]
self.customers13 = [['2 1', '1 0'], ['2 0', '1 1']]
self.customers14 = [['3 0'], ['2 1', '1 0', '3 0']]
self.customers15 = [['1 0', '2 1'], ['1 1', '2 0'], ['3 0']]
self.solution1 = ['0', '0', '0']
self.solution2 = ['1', '0', '0']
self.solution3 = ['0', '1', '0']
self.solution4 = ['1', '1', '1']
self.solution5 = ['0', '0']
self.solution6 = ['0']
self.solution7 = ['0', '1']
self.solution8 = ['1', '0', '0']
self.solution9 = ['0', '1', '1']
self.solution10 = ['0', '0', '1']
self.solution11 = ['1', '1', '1']
self.solution12 = ['1', '1', '0']
def test_solution_tester(self):
is_valid = self.tester.is_valid_solution
c1 = self.customers1
c2 = self.customers2
c3 = self.customers3
c4 = self.customers4
c5 = self.customers5
c6 = self.customers6
c7 = self.customers7
c8 = self.customers8
c9 = self.customers9
c10 = self.customers10
c11 = self.customers11
c12 = self.customers12
c13 = self.customers13
c14 = self.customers14
c15 = self.customers15
s1 = self.solution1
s2 = self.solution2
s3 = self.solution3
s4 = self.solution4
s5 = self.solution5
s6 = self.solution6
s7 = self.solution7
s8 = self.solution8
s9 = self.solution9
s10 = self.solution10
s11 = self.solution11
s12 = self.solution12
self.assertFalse(is_valid(s1, c1))
self.assertTrue(is_valid(s2, c1))
self.assertFalse(is_valid(s3, c1))
self.assertFalse(is_valid(s3, c2))
self.assertTrue(is_valid(s4, c2))
self.assertFalse(is_valid(s4, c1))
self.assertTrue(is_valid(s5, c3))
self.assertFalse(is_valid(s6, c3))
self.assertFalse(is_valid(s5, c4))
self.assertTrue(is_valid(s7, c4))
self.assertTrue(is_valid(s5, c5))
self.assertTrue(is_valid(s1, c6))
self.assertTrue(is_valid(s5, c6))
self.assertTrue(is_valid(s1, c7))
self.assertTrue(is_valid(s3, c7))
self.assertTrue(is_valid(s5, c7))
self.assertTrue(is_valid(s6, c7))
self.assertTrue(is_valid(s7, c7))
self.assertTrue(is_valid(s8, c8))
self.assertFalse(is_valid(s1, c9))
self.assertTrue(is_valid(s3, c10))
self.assertTrue(is_valid(s9, c10))
self.assertTrue(is_valid(s1, c11))
self.assertTrue(is_valid(s5, c11))
self.assertTrue(is_valid(s10, c11))
self.assertFalse(is_valid(s11, c12))
self.assertTrue(is_valid(s9, c12))
self.assertTrue(is_valid(s12, c13))
self.assertTrue(is_valid(s1, c13))
self.assertTrue(is_valid(s1, c14))
self.assertTrue(is_valid(s3, c14))
self.assertTrue(is_valid(s12, c14))
self.assertTrue(is_valid(s12, c15))
self.assertTrue(is_valid(s1, c15))
self.assertFalse(is_valid(s2, c15))
self.assertFalse(is_valid(s3, c15))
def test_solution_tester_bigger(self):
is_valid = self.tester.is_valid_solution
c = [['1 0', '6 1', '3 0', '4 0'],
['5 1', '2 0', '3 0', '6 0'], ['1 0', '5 1', '3 0', '4 0', '6 0'],
['2 1'], ['2 1', '1 0', '6 0', '3 0']]
sols = [['0', '1', '0', '0', '1', '0'],
['0', '1', '0', '1', '1', '0'],
['0', '1', '1', '0', '1', '0'],
['0', '1', '0', '0', '1', '1'],
['0', '1', '0', '0', '0', '0'],
['0', '1', '0', '1', '1', '1'],
['0', '1', '1', '0', '1', '1'],
['1', '1', '0', '0', '1', '1'],
['0', '1', '0', '0', '0', '1'],
['1', '1', '0', '0', '1', '0']]
for s in sols:
self.assertTrue(is_valid(s, c))
if __name__ == '__main__':
unittest.main() | apache-2.0 | 5,770,888,673,481,632,000 | 31.717742 | 69 | 0.56213 | false |
saneyuki/servo | tests/wpt/update/upstream.py | 10 | 13838 | from __future__ import print_function
import os
import re
import subprocess
import sys
import six.moves.urllib as urllib
from six.moves import input
from six import iteritems
from wptrunner.update.sync import UpdateCheckout
from wptrunner.update.tree import get_unique_name
from wptrunner.update.base import Step, StepRunner, exit_clean, exit_unclean
from .tree import Commit, GitTree, Patch
from .github import GitHub
def rewrite_patch(patch, strip_dir):
"""Take a Patch and rewrite the message to remove the bug number and reviewer, but add
a bugzilla link in the summary.
:param patch: the Patch to convert
"""
return Patch(patch.author, patch.email, rewrite_message(patch), None, patch.diff)
def rewrite_message(patch):
if patch.merge_message and patch.merge_message.bug:
bug = patch.merge_message.bug
else:
bug = patch.message.bug
if bug is not None:
return "\n".join([patch.message.summary,
patch.message.body,
"",
"Upstreamed from https://github.com/servo/servo/pull/%s [ci skip]" %
bug])
return "\n".join([patch.message.full_summary, "%s\n[ci skip]\n" % patch.message.body])
class SyncToUpstream(Step):
"""Sync local changes to upstream"""
def create(self, state):
if not state.kwargs["upstream"]:
return
if not isinstance(state.local_tree, GitTree):
self.logger.error("Cannot sync with upstream from a non-Git checkout.")
return exit_clean
try:
import requests
except ImportError:
self.logger.error("Upstream sync requires the requests module to be installed")
return exit_clean
if not state.sync_tree:
os.makedirs(state.sync["path"])
state.sync_tree = GitTree(root=state.sync["path"])
kwargs = state.kwargs
with state.push(["local_tree", "sync_tree", "tests_path", "metadata_path",
"sync"]):
state.token = kwargs["token"]
runner = SyncToUpstreamRunner(self.logger, state)
runner.run()
class GetLastSyncData(Step):
"""Find the gecko commit at which we last performed a sync with upstream and the upstream
commit that was synced."""
provides = ["sync_data_path", "last_sync_commit", "old_upstream_rev"]
def create(self, state):
self.logger.info("Looking for last sync commit")
state.sync_data_path = os.path.join(state.metadata_path, "mozilla-sync")
items = {}
with open(state.sync_data_path) as f:
for line in f.readlines():
key, value = [item.strip() for item in line.split(":", 1)]
items[key] = value
state.last_sync_commit = Commit(state.local_tree, items["local"])
state.old_upstream_rev = items["upstream"]
if not state.local_tree.contains_commit(state.last_sync_commit):
self.logger.error("Could not find last sync commit %s" % last_sync_sha1)
return exit_clean
self.logger.info("Last sync to web-platform-tests happened in %s" % state.last_sync_commit.sha1)
class CheckoutBranch(Step):
"""Create a branch in the sync tree pointing at the last upstream sync commit
and check it out"""
provides = ["branch"]
def create(self, state):
self.logger.info("Updating sync tree from %s" % state.sync["remote_url"])
state.branch = state.sync_tree.unique_branch_name(
"outbound_update_%s" % state.old_upstream_rev)
state.sync_tree.update(state.sync["remote_url"],
state.sync["branch"],
state.branch)
state.sync_tree.checkout(state.old_upstream_rev, state.branch, force=True)
class GetBaseCommit(Step):
"""Find the latest upstream commit on the branch that we are syncing with"""
provides = ["base_commit"]
def create(self, state):
state.base_commit = state.sync_tree.get_remote_sha1(state.sync["remote_url"],
state.sync["branch"])
self.logger.debug("New base commit is %s" % state.base_commit.sha1)
class LoadCommits(Step):
"""Get a list of commits in the gecko tree that need to be upstreamed"""
provides = ["source_commits"]
def create(self, state):
state.source_commits = state.local_tree.log(state.last_sync_commit,
state.tests_path)
update_regexp = re.compile("Update web-platform-tests to revision [0-9a-f]{40}")
for i, commit in enumerate(state.source_commits[:]):
if update_regexp.match(commit.message.text):
# This is a previous update commit so ignore it
state.source_commits.remove(commit)
continue
if commit.message.backouts:
#TODO: Add support for collapsing backouts
raise NotImplementedError("Need to get the Git->Hg commits for backouts and remove the backed out patch")
if not commit.message.bug and not (commit.merge and commit.merge.message.bug):
self.logger.error("Commit %i (%s) doesn't have an associated bug number." %
(i + 1, commit.sha1))
return exit_unclean
self.logger.debug("Source commits: %s" % state.source_commits)
class SelectCommits(Step):
"""Provide a UI to select which commits to upstream"""
def create(self, state):
if not state.source_commits:
return
while True:
commits = state.source_commits[:]
for i, commit in enumerate(commits):
print("%i:\t%s" % (i, commit.message.summary))
remove = input("Provide a space-separated list of any commits numbers to remove from the list to upstream:\n").strip()
remove_idx = set()
invalid = False
for item in remove.split(" "):
if not item:
continue
try:
item = int(item)
except:
invalid = True
break
if item < 0 or item >= len(commits):
invalid = True
break
remove_idx.add(item)
if invalid:
continue
keep_commits = [(i,cmt) for i,cmt in enumerate(commits) if i not in remove_idx]
#TODO: consider printed removed commits
print("Selected the following commits to keep:")
for i, commit in keep_commits:
print("%i:\t%s" % (i, commit.message.summary))
confirm = input("Keep the above commits? y/n\n").strip().lower()
if confirm == "y":
state.source_commits = [item[1] for item in keep_commits]
break
class MovePatches(Step):
"""Convert gecko commits into patches against upstream and commit these to the sync tree."""
provides = ["commits_loaded"]
def create(self, state):
state.commits_loaded = 0
strip_path = os.path.relpath(state.tests_path,
state.local_tree.root)
self.logger.debug("Stripping patch %s" % strip_path)
for commit in state.source_commits[state.commits_loaded:]:
i = state.commits_loaded + 1
self.logger.info("Moving commit %i: %s" % (i, commit.message.full_summary))
patch = commit.export_patch(state.tests_path)
stripped_patch = rewrite_patch(patch, strip_path)
strip_count = strip_path.count('/')
if strip_path[-1] != '/':
strip_count += 1
try:
state.sync_tree.import_patch(stripped_patch, 1 + strip_count)
except:
print(patch.diff)
raise
state.commits_loaded = i
class RebaseCommits(Step):
"""Rebase commits from the current branch on top of the upstream destination branch.
This step is particularly likely to fail if the rebase generates merge conflicts.
In that case the conflicts can be fixed up locally and the sync process restarted
with --continue.
"""
provides = ["rebased_commits"]
def create(self, state):
self.logger.info("Rebasing local commits")
continue_rebase = False
# Check if there's a rebase in progress
if (os.path.exists(os.path.join(state.sync_tree.root,
".git",
"rebase-merge")) or
os.path.exists(os.path.join(state.sync_tree.root,
".git",
"rebase-apply"))):
continue_rebase = True
try:
state.sync_tree.rebase(state.base_commit, continue_rebase=continue_rebase)
except subprocess.CalledProcessError:
self.logger.info("Rebase failed, fix merge and run %s again with --continue" % sys.argv[0])
raise
state.rebased_commits = state.sync_tree.log(state.base_commit)
self.logger.info("Rebase successful")
class CheckRebase(Step):
"""Check if there are any commits remaining after rebase"""
def create(self, state):
if not state.rebased_commits:
self.logger.info("Nothing to upstream, exiting")
return exit_clean
class MergeUpstream(Step):
"""Run steps to push local commits as seperate PRs and merge upstream."""
provides = ["merge_index", "gh_repo"]
def create(self, state):
gh = GitHub(state.token)
if "merge_index" not in state:
state.merge_index = 0
org, name = urllib.parse.urlsplit(state.sync["remote_url"]).path[1:].split("/")
if name.endswith(".git"):
name = name[:-4]
state.gh_repo = gh.repo(org, name)
for commit in state.rebased_commits[state.merge_index:]:
with state.push(["gh_repo", "sync_tree"]):
state.commit = commit
pr_merger = PRMergeRunner(self.logger, state)
rv = pr_merger.run()
if rv is not None:
return rv
state.merge_index += 1
class UpdateLastSyncData(Step):
"""Update the gecko commit at which we last performed a sync with upstream."""
provides = []
def create(self, state):
self.logger.info("Updating last sync commit")
data = {"local": state.local_tree.rev,
"upstream": state.sync_tree.rev}
with open(state.sync_data_path, "w") as f:
for key, value in iteritems(data):
f.write("%s: %s\n" % (key, value))
# This gets added to the patch later on
class MergeLocalBranch(Step):
"""Create a local branch pointing at the commit to upstream"""
provides = ["local_branch"]
def create(self, state):
branch_prefix = "sync_%s" % state.commit.sha1
local_branch = state.sync_tree.unique_branch_name(branch_prefix)
state.sync_tree.create_branch(local_branch, state.commit)
state.local_branch = local_branch
class MergeRemoteBranch(Step):
"""Get an unused remote branch name to use for the PR"""
provides = ["remote_branch"]
def create(self, state):
remote_branch = "sync_%s" % state.commit.sha1
branches = [ref[len("refs/heads/"):] for sha1, ref in
state.sync_tree.list_remote(state.gh_repo.url)
if ref.startswith("refs/heads")]
state.remote_branch = get_unique_name(branches, remote_branch)
class PushUpstream(Step):
"""Push local branch to remote"""
def create(self, state):
self.logger.info("Pushing commit upstream")
state.sync_tree.push(state.gh_repo.url,
state.local_branch,
state.remote_branch)
class CreatePR(Step):
"""Create a PR for the remote branch"""
provides = ["pr"]
def create(self, state):
self.logger.info("Creating a PR")
commit = state.commit
state.pr = state.gh_repo.create_pr(commit.message.full_summary,
state.remote_branch,
"master",
commit.message.body if commit.message.body else "")
class PRAddComment(Step):
"""Add an issue comment indicating that the code has been reviewed already"""
def create(self, state):
state.pr.issue.add_comment("Code reviewed upstream.")
state.pr.issue.add_label("servo-export")
class MergePR(Step):
"""Merge the PR"""
def create(self, state):
self.logger.info("Merging PR")
state.pr.merge()
class PRDeleteBranch(Step):
"""Delete the remote branch"""
def create(self, state):
self.logger.info("Deleting remote branch")
state.sync_tree.push(state.gh_repo.url, "", state.remote_branch)
class SyncToUpstreamRunner(StepRunner):
"""Runner for syncing local changes to upstream"""
steps = [GetLastSyncData,
UpdateCheckout,
CheckoutBranch,
GetBaseCommit,
LoadCommits,
SelectCommits,
MovePatches,
RebaseCommits,
CheckRebase,
MergeUpstream,
UpdateLastSyncData]
class PRMergeRunner(StepRunner):
"""(Sub)Runner for creating and merging a PR"""
steps = [
MergeLocalBranch,
MergeRemoteBranch,
PushUpstream,
CreatePR,
PRAddComment,
MergePR,
PRDeleteBranch,
]
| mpl-2.0 | 58,273,125,150,154,430 | 34.573265 | 130 | 0.581731 | false |
wonjohnchoi/EE122-Project3 | pox/messenger/mux.py | 1 | 4521 | # Copyright 2011 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
Sometimes you'd like to be able to communicate with multiple messenger
services over the same connection. You can use the mux messenger service
to do this.
If you send a "hello":"mux" message, the muxer will claim that connection.
Subsequent messages should include "_mux":<Key> pairs. For each unique
Key, the muxer will create a new virtual connection -- subsequent messages
with the same "_mux":<Key> will be sent down that virtual connection, and
messages from that service will have the key tagged on. Note this means
that messages to and from services you'd like muxed must be JSON objects
(dictionaries). If this is a problem, let me know, because the muxer could
be extended.
An example:
(Assume we have created a MessengerExample("foo"))
-> {"hello":"mux"}
-> {"_mux":"logger1", "hello":"log"}
-> {"_mux":"logger2", "hello":"log"}
-> {"_mux":"logger1", "level":"ERROR"}
-> {"_mux":"bar", "hello":"foo"}
-> {"_mux":"bar", "echo":"hello world"}
<- {"_mux":"bar", "echo":"hello world"}
In this case, we have created two loggers, configured one of them
independent of the other, sent an echo request to a MessengerExample
object, and recieved the result.
"""
from pox.core import core
from pox.messenger.messenger import *
log = pox.core.getLogger()
class MuxConnection (MessengerConnection):
def __init__ (self, source, channelName, con):
MessengerConnection.__init__(self, source, ID=str(id(self)))
self.channelName = channelName
self.con = con
claimed = False
e = core.messenger.raiseEventNoErrors(ConnectionStarted, self)
if e is not None:
claimed = e._claimed
if not claimed:
# Unclaimed events get forwarded to here too
self.addListener(MessageReceived, self._defaultMessageReceived, priority=-1) # Low priority
self._newlines = False
def send (self, whatever, **kw):
whatever = dict(whatever)
whatever['_mux'] = self.channelName
MessengerConnection.send(self, whatever, **kw)
def sendRaw (self, data):
self.con.sendRaw(data)
class MuxSource (EventMixin):
def __init__ (self, con):
self.listenTo(con)
self.channels = {}
def _forget (self, connection):
if connection in self.channels:
del self.channels[connection.channelName]
else:
log.warn("Tried to forget a channel I didn't know")
def _handle_MessageReceived (self, event, msg):
if event.con.isReadable():
r = event.con.read()
if type(r) is dict:
channelName = r.get("_mux", None)
if channelName is not None:
del r['_mux']
if channelName not in self.channels:
# New channel
channel = MuxConnection(self, channelName, event.con)
self.channels[channelName] = channel
else:
channel = self.channels[channelName]
channel._recv_msg(r)
elif r.get("_mux_bye",False):
event.con.close()
else:
log.warn("Message to demuxer didn't specify a channel or valid command")
else:
log.warn("Demuxer only handlers dictionaries")
else:
self._closeAll()
def _handle_ConnectionClosed (self, event):
self._closeAll()
def _closeAll (self):
channels = self.channels.values()
for connection in channels:
connection._close()
class MuxHub (object):
"""
"""
def __init__ (self):
core.messenger.addListener(MessageReceived, self._handle_global_MessageReceived)#, weak=True)
def _handle_global_MessageReceived (self, event, msg):
try:
if msg['hello'] == 'mux':
# It's for me!
event.claim()
event.con.read()
m = MuxSource(event.con)
print self.__class__.__name__, "- started conversation with", event.con
except:
pass
def launch ():
# core.register("demux", MessengerHub())
global hub
hub = MuxHub()
| gpl-3.0 | 4,525,929,168,765,370,400 | 30.615385 | 97 | 0.669542 | false |
defionscode/ansible | test/units/utils/test_encrypt.py | 51 | 5692 | # (c) 2018, Matthias Fuchs <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import pytest
import sys
from ansible.errors import AnsibleError, AnsibleFilterError
from ansible.plugins.filter.core import get_encrypted_password
from ansible.utils import encrypt
class passlib_off(object):
def __init__(self):
self.orig = encrypt.PASSLIB_AVAILABLE
def __enter__(self):
encrypt.PASSLIB_AVAILABLE = False
return self
def __exit__(self, exception_type, exception_value, traceback):
encrypt.PASSLIB_AVAILABLE = self.orig
def assert_hash(expected, secret, algorithm, **settings):
assert encrypt.CryptHash(algorithm).hash(secret, **settings) == expected
if encrypt.PASSLIB_AVAILABLE:
assert encrypt.passlib_or_crypt(secret, algorithm, **settings) == expected
assert encrypt.PasslibHash(algorithm).hash(secret, **settings) == expected
else:
with pytest.raises(AnsibleFilterError):
encrypt.PasslibHash(algorithm).hash(secret, **settings)
def test_encrypt_with_rounds():
assert_hash("$5$12345678$uAZsE3BenI2G.nA8DpTl.9Dc8JiqacI53pEqRr5ppT7",
secret="123", algorithm="sha256_crypt", salt="12345678", rounds=5000)
assert_hash("$5$rounds=10000$12345678$JBinliYMFEcBeAXKZnLjenhgEhTmJBvZn3aR8l70Oy/",
secret="123", algorithm="sha256_crypt", salt="12345678", rounds=10000)
assert_hash("$6$12345678$LcV9LQiaPekQxZ.OfkMADjFdSO2k9zfbDQrHPVcYjSLqSdjLYpsgqviYvTEP/R41yPmhH3CCeEDqVhW1VHr3L.",
secret="123", algorithm="sha512_crypt", salt="12345678", rounds=5000)
def test_encrypt_default_rounds():
assert_hash("$1$12345678$tRy4cXc3kmcfRZVj4iFXr/",
secret="123", algorithm="md5_crypt", salt="12345678")
assert_hash("$5$12345678$uAZsE3BenI2G.nA8DpTl.9Dc8JiqacI53pEqRr5ppT7",
secret="123", algorithm="sha256_crypt", salt="12345678")
assert_hash("$6$12345678$LcV9LQiaPekQxZ.OfkMADjFdSO2k9zfbDQrHPVcYjSLqSdjLYpsgqviYvTEP/R41yPmhH3CCeEDqVhW1VHr3L.",
secret="123", algorithm="sha512_crypt", salt="12345678")
assert encrypt.CryptHash("md5_crypt").hash("123")
def test_password_hash_filter_no_passlib():
with passlib_off():
assert not encrypt.PASSLIB_AVAILABLE
assert get_encrypted_password("123", "md5", salt="12345678") == "$1$12345678$tRy4cXc3kmcfRZVj4iFXr/"
with pytest.raises(AnsibleFilterError):
get_encrypted_password("123", "crypt16", salt="12")
def test_password_hash_filter_passlib():
if not encrypt.PASSLIB_AVAILABLE:
pytest.skip("passlib not available")
with pytest.raises(AnsibleFilterError):
get_encrypted_password("123", "sha257", salt="12345678")
# Uses 5000 rounds by default for sha256 matching crypt behaviour
assert get_encrypted_password("123", "sha256", salt="12345678") == "$5$12345678$uAZsE3BenI2G.nA8DpTl.9Dc8JiqacI53pEqRr5ppT7"
assert get_encrypted_password("123", "sha256", salt="12345678", rounds=5000) == "$5$12345678$uAZsE3BenI2G.nA8DpTl.9Dc8JiqacI53pEqRr5ppT7"
assert (get_encrypted_password("123", "sha256", salt="12345678", rounds=10000) ==
"$5$rounds=10000$12345678$JBinliYMFEcBeAXKZnLjenhgEhTmJBvZn3aR8l70Oy/")
assert (get_encrypted_password("123", "sha512", salt="12345678", rounds=6000) ==
"$6$rounds=6000$12345678$l/fC67BdJwZrJ7qneKGP1b6PcatfBr0dI7W6JLBrsv8P1wnv/0pu4WJsWq5p6WiXgZ2gt9Aoir3MeORJxg4.Z/")
assert (get_encrypted_password("123", "sha512", salt="12345678", rounds=5000) ==
"$6$12345678$LcV9LQiaPekQxZ.OfkMADjFdSO2k9zfbDQrHPVcYjSLqSdjLYpsgqviYvTEP/R41yPmhH3CCeEDqVhW1VHr3L.")
assert get_encrypted_password("123", "crypt16", salt="12") == "12pELHK2ME3McUFlHxel6uMM"
# Try algorithm that uses a raw salt
assert get_encrypted_password("123", "pbkdf2_sha256")
def test_do_encrypt_no_passlib():
with passlib_off():
assert not encrypt.PASSLIB_AVAILABLE
assert encrypt.do_encrypt("123", "md5_crypt", salt="12345678") == "$1$12345678$tRy4cXc3kmcfRZVj4iFXr/"
with pytest.raises(AnsibleError):
encrypt.do_encrypt("123", "crypt16", salt="12")
def test_do_encrypt_passlib():
if not encrypt.PASSLIB_AVAILABLE:
pytest.skip("passlib not available")
with pytest.raises(AnsibleError):
encrypt.do_encrypt("123", "sha257_crypt", salt="12345678")
# Uses 5000 rounds by default for sha256 matching crypt behaviour.
assert encrypt.do_encrypt("123", "sha256_crypt", salt="12345678") == "$5$12345678$uAZsE3BenI2G.nA8DpTl.9Dc8JiqacI53pEqRr5ppT7"
assert encrypt.do_encrypt("123", "md5_crypt", salt="12345678") == "$1$12345678$tRy4cXc3kmcfRZVj4iFXr/"
assert encrypt.do_encrypt("123", "crypt16", salt="12") == "12pELHK2ME3McUFlHxel6uMM"
def test_random_salt():
res = encrypt.random_salt()
expected_salt_candidate_chars = u'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789./'
assert len(res) == 8
for res_char in res:
assert res_char in expected_salt_candidate_chars
| gpl-3.0 | -396,668,443,596,897,700 | 41.796992 | 141 | 0.712755 | false |
theatrus/eve-central.com | lib/evecentral/suggest.py | 1 | 1535 | # EVE-Central.com Codebase
# Copyright (C) 2006-2012 StackFoundry LLC and Yann Ramin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import random
def upload_suggest(db, region, rettype = "names"):
cur = db.cursor()
cur.execute("SELECT types.typename,types.typeid FROM types WHERE types.typename NOT LIKE '%% Blueprint' AND types.published = 1 AND types.marketgroup > 0 AND types.typeid NOT IN (SELECT DISTINCT current_market.typeid FROM current_market WHERE current_market.reportedtime > (NOW() - interval '1 hour') AND current_market.regionid = %s ) ORDER BY RANDOM() LIMIT 20", [region])
l = []
r = cur.fetchone()
while r:
if rettype == "names":
l.append(r[0])
elif rettype == "ids":
l.append(r[1])
else:
tup = (r[0], r[1])
l.append(tup)
r = cur.fetchone()
return l
| agpl-3.0 | -3,251,484,328,430,344,000 | 38.358974 | 379 | 0.665147 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.