repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
intgr/django | tests/admin_utils/test_logentry.py | 9 | 11462 | import json
from datetime import datetime
from django.contrib.admin.models import ADDITION, CHANGE, DELETION, LogEntry
from django.contrib.admin.utils import quote
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase, override_settings
from django.urls import reverse
from django.utils import translation
from django.utils.encoding import force_bytes
from django.utils.html import escape
from .models import Article, ArticleProxy, Site
@override_settings(ROOT_URLCONF='admin_utils.urls')
class LogEntryTests(TestCase):
def setUp(self):
self.user = User.objects.create_superuser(username='super', password='secret', email='[email protected]')
self.site = Site.objects.create(domain='example.org')
self.a1 = Article.objects.create(
site=self.site,
title="Title",
created=datetime(2008, 3, 12, 11, 54),
)
content_type_pk = ContentType.objects.get_for_model(Article).pk
LogEntry.objects.log_action(
self.user.pk, content_type_pk, self.a1.pk, repr(self.a1), CHANGE,
change_message='Changed something'
)
self.client.force_login(self.user)
def test_logentry_save(self):
"""
LogEntry.action_time is a timestamp of the date when the entry was
created. It shouldn't be updated on a subsequent save().
"""
logentry = LogEntry.objects.get(content_type__model__iexact="article")
action_time = logentry.action_time
logentry.save()
self.assertEqual(logentry.action_time, action_time)
def test_logentry_change_message(self):
"""
LogEntry.change_message is stored as a dumped JSON structure to be able
to get the message dynamically translated at display time.
"""
post_data = {
'site': self.site.pk, 'title': 'Changed', 'hist': 'Some content',
'created_0': '2008-03-12', 'created_1': '11:54',
}
change_url = reverse('admin:admin_utils_article_change', args=[quote(self.a1.pk)])
response = self.client.post(change_url, post_data)
self.assertRedirects(response, reverse('admin:admin_utils_article_changelist'))
logentry = LogEntry.objects.filter(content_type__model__iexact='article').latest('id')
self.assertEqual(logentry.get_change_message(), 'Changed title and hist.')
with translation.override('fr'):
self.assertEqual(logentry.get_change_message(), 'Title et hist modifié(s).')
add_url = reverse('admin:admin_utils_article_add')
post_data['title'] = 'New'
response = self.client.post(add_url, post_data)
self.assertRedirects(response, reverse('admin:admin_utils_article_changelist'))
logentry = LogEntry.objects.filter(content_type__model__iexact='article').latest('id')
self.assertEqual(logentry.get_change_message(), 'Added.')
with translation.override('fr'):
self.assertEqual(logentry.get_change_message(), 'Ajout.')
@override_settings(USE_L10N=True)
def test_logentry_change_message_localized_datetime_input(self):
"""
Localized date/time inputs shouldn't affect changed form data detection.
"""
post_data = {
'site': self.site.pk, 'title': 'Changed', 'hist': 'Some content',
'created_0': '12/03/2008', 'created_1': '11:54',
}
with translation.override('fr'):
change_url = reverse('admin:admin_utils_article_change', args=[quote(self.a1.pk)])
response = self.client.post(change_url, post_data)
self.assertRedirects(response, reverse('admin:admin_utils_article_changelist'))
logentry = LogEntry.objects.filter(content_type__model__iexact='article').latest('id')
self.assertEqual(logentry.get_change_message(), 'Changed title and hist.')
def test_logentry_change_message_formsets(self):
"""
All messages for changed formsets are logged in a change message.
"""
a2 = Article.objects.create(
site=self.site,
title="Title second article",
created=datetime(2012, 3, 18, 11, 54),
)
post_data = {
'domain': 'example.com', # domain changed
'admin_articles-TOTAL_FORMS': '5',
'admin_articles-INITIAL_FORMS': '2',
'admin_articles-MIN_NUM_FORMS': '0',
'admin_articles-MAX_NUM_FORMS': '1000',
# Changed title for 1st article
'admin_articles-0-id': str(self.a1.pk),
'admin_articles-0-site': str(self.site.pk),
'admin_articles-0-title': 'Changed Title',
# Second article is deleted
'admin_articles-1-id': str(a2.pk),
'admin_articles-1-site': str(self.site.pk),
'admin_articles-1-title': 'Title second article',
'admin_articles-1-DELETE': 'on',
# A new article is added
'admin_articles-2-site': str(self.site.pk),
'admin_articles-2-title': 'Added article',
}
change_url = reverse('admin:admin_utils_site_change', args=[quote(self.site.pk)])
response = self.client.post(change_url, post_data)
self.assertRedirects(response, reverse('admin:admin_utils_site_changelist'))
self.assertQuerysetEqual(Article.objects.filter(pk=a2.pk), [])
logentry = LogEntry.objects.filter(content_type__model__iexact='site').latest('action_time')
self.assertEqual(
json.loads(logentry.change_message),
[
{"changed": {"fields": ["domain"]}},
{"added": {"object": "Article object", "name": "article"}},
{"changed": {"fields": ["title"], "object": "Article object", "name": "article"}},
{"deleted": {"object": "Article object", "name": "article"}},
]
)
self.assertEqual(
logentry.get_change_message(),
'Changed domain. Added article "Article object". '
'Changed title for article "Article object". Deleted article "Article object".'
)
with translation.override('fr'):
self.assertEqual(
logentry.get_change_message(),
"Domain modifié(s). Article « Article object » ajouté. "
"Title modifié(s) pour l'objet article « Article object ». Article « Article object » supprimé."
)
def test_logentry_get_edited_object(self):
"""
LogEntry.get_edited_object() returns the edited object of a LogEntry
object.
"""
logentry = LogEntry.objects.get(content_type__model__iexact="article")
edited_obj = logentry.get_edited_object()
self.assertEqual(logentry.object_id, str(edited_obj.pk))
def test_logentry_get_admin_url(self):
"""
LogEntry.get_admin_url returns a URL to edit the entry's object or
None for nonexistent (possibly deleted) models.
"""
logentry = LogEntry.objects.get(content_type__model__iexact='article')
expected_url = reverse('admin:admin_utils_article_change', args=(quote(self.a1.pk),))
self.assertEqual(logentry.get_admin_url(), expected_url)
self.assertIn('article/%d/change/' % self.a1.pk, logentry.get_admin_url())
logentry.content_type.model = "nonexistent"
self.assertIsNone(logentry.get_admin_url())
def test_logentry_unicode(self):
log_entry = LogEntry()
log_entry.action_flag = ADDITION
self.assertTrue(str(log_entry).startswith('Added '))
log_entry.action_flag = CHANGE
self.assertTrue(str(log_entry).startswith('Changed '))
log_entry.action_flag = DELETION
self.assertTrue(str(log_entry).startswith('Deleted '))
# Make sure custom action_flags works
log_entry.action_flag = 4
self.assertEqual(str(log_entry), 'LogEntry Object')
def test_log_action(self):
content_type_pk = ContentType.objects.get_for_model(Article).pk
log_entry = LogEntry.objects.log_action(
self.user.pk, content_type_pk, self.a1.pk, repr(self.a1), CHANGE,
change_message='Changed something else',
)
self.assertEqual(log_entry, LogEntry.objects.latest('id'))
def test_recentactions_without_content_type(self):
"""
If a LogEntry is missing content_type it will not display it in span
tag under the hyperlink.
"""
response = self.client.get(reverse('admin:index'))
link = reverse('admin:admin_utils_article_change', args=(quote(self.a1.pk),))
should_contain = """<a href="%s">%s</a>""" % (escape(link), escape(repr(self.a1)))
self.assertContains(response, should_contain)
should_contain = "Article"
self.assertContains(response, should_contain)
logentry = LogEntry.objects.get(content_type__model__iexact='article')
# If the log entry doesn't have a content type it should still be
# possible to view the Recent Actions part (#10275).
logentry.content_type = None
logentry.save()
counted_presence_before = response.content.count(force_bytes(should_contain))
response = self.client.get(reverse('admin:index'))
counted_presence_after = response.content.count(force_bytes(should_contain))
self.assertEqual(counted_presence_before - 1, counted_presence_after)
def test_proxy_model_content_type_is_used_for_log_entries(self):
"""
Log entries for proxy models should have the proxy model's contenttype
(#21084).
"""
proxy_content_type = ContentType.objects.get_for_model(ArticleProxy, for_concrete_model=False)
post_data = {
'site': self.site.pk, 'title': "Foo", 'hist': "Bar",
'created_0': '2015-12-25', 'created_1': '00:00',
}
changelist_url = reverse('admin:admin_utils_articleproxy_changelist')
# add
proxy_add_url = reverse('admin:admin_utils_articleproxy_add')
response = self.client.post(proxy_add_url, post_data)
self.assertRedirects(response, changelist_url)
proxy_addition_log = LogEntry.objects.latest('id')
self.assertEqual(proxy_addition_log.action_flag, ADDITION)
self.assertEqual(proxy_addition_log.content_type, proxy_content_type)
# change
article_id = proxy_addition_log.object_id
proxy_change_url = reverse('admin:admin_utils_articleproxy_change', args=(article_id,))
post_data['title'] = 'New'
response = self.client.post(proxy_change_url, post_data)
self.assertRedirects(response, changelist_url)
proxy_change_log = LogEntry.objects.latest('id')
self.assertEqual(proxy_change_log.action_flag, CHANGE)
self.assertEqual(proxy_change_log.content_type, proxy_content_type)
# delete
proxy_delete_url = reverse('admin:admin_utils_articleproxy_delete', args=(article_id,))
response = self.client.post(proxy_delete_url, {'post': 'yes'})
self.assertRedirects(response, changelist_url)
proxy_delete_log = LogEntry.objects.latest('id')
self.assertEqual(proxy_delete_log.action_flag, DELETION)
self.assertEqual(proxy_delete_log.content_type, proxy_content_type)
| bsd-3-clause | -1,509,449,593,893,466,600 | 45.714286 | 113 | 0.631018 | false |
lahwaacz/qutebrowser | qutebrowser/config/configdata.py | 1 | 76011 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Configuration data for config.py.
Module attributes:
FIRST_COMMENT: The initial comment header to place in the config.
SECTION_DESC: A dictionary with descriptions for sections.
DATA: A global read-only copy of the default config, an OrderedDict of
sections.
"""
import sys
import re
import collections
from qutebrowser.config import configtypes as typ
from qutebrowser.config import sections as sect
from qutebrowser.config.value import SettingValue
from qutebrowser.utils.qtutils import MAXVALS
from qutebrowser.utils import usertypes, qtutils
FIRST_COMMENT = r"""
# vim: ft=dosini
# Configfile for qutebrowser.
#
# This configfile is parsed by python's configparser in extended
# interpolation mode. The format is very INI-like, so there are
# categories like [general] with "key = value"-pairs.
#
# Note that you shouldn't add your own comments, as this file is
# regenerated every time the config is saved.
#
# Interpolation looks like ${value} or ${section:value} and will be
# replaced by the respective value.
#
# Some settings will expand environment variables. Note that, since
# interpolation is run first, you will need to escape the $ char as
# described below.
#
# This is the default config, so if you want to remove anything from
# here (as opposed to change/add), for example a key binding, set it to
# an empty value.
#
# You will need to escape the following values:
# - # at the start of the line (at the first position of the key) (\#)
# - $ in a value ($$)
"""
SECTION_DESC = {
'general': "General/miscellaneous options.",
'ui': "General options related to the user interface.",
'input': "Options related to input modes.",
'network': "Settings related to the network.",
'completion': "Options related to completion and command history.",
'tabs': "Configuration of the tab bar.",
'storage': "Settings related to cache and storage.",
'content': "Loaded plugins/scripts and allowed actions.",
'hints': "Hinting settings.",
'searchengines': (
"Definitions of search engines which can be used via the address "
"bar.\n"
"The searchengine named `DEFAULT` is used when "
"`general -> auto-search` is true and something else than a URL was "
"entered to be opened. Other search engines can be used by prepending "
"the search engine name to the search term, e.g. "
"`:open google qutebrowser`. The string `{}` will be replaced by the "
"search term, use `{{` and `}}` for literal `{`/`}` signs."),
'aliases': (
"Aliases for commands.\n"
"By default, no aliases are defined. Example which adds a new command "
"`:qtb` to open qutebrowsers website:\n\n"
"`qtb = open https://www.qutebrowser.org/`"),
'colors': (
"Colors used in the UI.\n"
"A value can be in one of the following format:\n\n"
" * `#RGB`/`#RRGGBB`/`#RRRGGGBBB`/`#RRRRGGGGBBBB`\n"
" * An SVG color name as specified in http://www.w3.org/TR/SVG/"
"types.html#ColorKeywords[the W3C specification].\n"
" * transparent (no color)\n"
" * `rgb(r, g, b)` / `rgba(r, g, b, a)` (values 0-255 or "
"percentages)\n"
" * `hsv(h, s, v)` / `hsva(h, s, v, a)` (values 0-255, hue 0-359)\n"
" * A gradient as explained in http://doc.qt.io/qt-5/"
"stylesheet-reference.html#list-of-property-types[the Qt "
"documentation] under ``Gradient''.\n\n"
"A *.system value determines the color system to use for color "
"interpolation between similarly-named *.start and *.stop entries, "
"regardless of how they are defined in the options. "
"Valid values are 'rgb', 'hsv', and 'hsl'.\n\n"
"The `hints.*` values are a special case as they're real CSS "
"colors, not Qt-CSS colors. There, for a gradient, you need to use "
"`-webkit-gradient`, see https://www.webkit.org/blog/175/introducing-"
"css-gradients/[the WebKit documentation]."),
'fonts': (
"Fonts used for the UI, with optional style/weight/size.\n\n"
" * Style: `normal`/`italic`/`oblique`\n"
" * Weight: `normal`, `bold`, `100`..`900`\n"
" * Size: _number_ `px`/`pt`"),
}
DEFAULT_FONT_SIZE = '10pt' if sys.platform == 'darwin' else '8pt'
def data(readonly=False):
"""Get the default config data.
Return:
A {name: section} OrderedDict.
"""
return collections.OrderedDict([
('general', sect.KeyValue(
('ignore-case',
SettingValue(typ.IgnoreCase(), 'smart'),
"Whether to find text on a page case-insensitively."),
('startpage',
SettingValue(typ.List(typ.String()),
'https://start.duckduckgo.com'),
"The default page(s) to open at the start, separated by commas."),
('yank-ignored-url-parameters',
SettingValue(typ.List(typ.String()),
'ref,utm_source,utm_medium,utm_campaign,utm_term,'
'utm_content'),
"The URL parameters to strip with :yank url, separated by "
"commas."),
('default-open-dispatcher',
SettingValue(typ.String(none_ok=True), ''),
"The default program used to open downloads. Set to an empty "
"string to use the default internal handler.\n\n"
"Any {} in the string will be expanded to the filename, else "
"the filename will be appended."),
('default-page',
SettingValue(typ.FuzzyUrl(), '${startpage}'),
"The page to open if :open -t/-b/-w is used without URL. Use "
"`about:blank` for a blank page."),
('auto-search',
SettingValue(typ.AutoSearch(), 'naive'),
"Whether to start a search when something else than a URL is "
"entered."),
('auto-save-config',
SettingValue(typ.Bool(), 'true'),
"Whether to save the config automatically on quit."),
('auto-save-interval',
SettingValue(typ.Int(minval=0, maxval=MAXVALS['int']), '15000'),
"How often (in milliseconds) to auto-save config/cookies/etc."),
('editor',
SettingValue(typ.ShellCommand(placeholder=True), 'gvim -f "{}"'),
"The editor (and arguments) to use for the `open-editor` "
"command.\n\n"
"The arguments get split like in a shell, so you can use `\"` or "
"`'` to quote them.\n"
"`{}` gets replaced by the filename of the file to be edited."),
('editor-encoding',
SettingValue(typ.Encoding(), 'utf-8'),
"Encoding to use for editor."),
('private-browsing',
SettingValue(typ.Bool(), 'false'),
"Open new windows in private browsing mode which does not record "
"visited pages."),
('developer-extras',
SettingValue(typ.Bool(), 'false',
backends=[usertypes.Backend.QtWebKit]),
"Enable extra tools for Web developers.\n\n"
"This needs to be enabled for `:inspector` to work and also adds "
"an _Inspect_ entry to the context menu. For QtWebEngine, see "
"'qutebrowser --help' instead."),
('print-element-backgrounds',
SettingValue(typ.Bool(), 'true',
backends=(
None if qtutils.version_check('5.8', strict=True)
else [usertypes.Backend.QtWebKit])),
"Whether the background color and images are also drawn when the "
"page is printed.\n"
"This setting only works with Qt 5.8 or newer when using the "
"QtWebEngine backend."),
('xss-auditing',
SettingValue(typ.Bool(), 'false'),
"Whether load requests should be monitored for cross-site "
"scripting attempts.\n\n"
"Suspicious scripts will be blocked and reported in the "
"inspector's JavaScript console. Enabling this feature might "
"have an impact on performance."),
('default-encoding',
SettingValue(typ.String(), 'iso-8859-1'),
"Default encoding to use for websites.\n\n"
"The encoding must be a string describing an encoding such as "
"_utf-8_, _iso-8859-1_, etc."),
('new-instance-open-target',
SettingValue(typ.String(
valid_values=typ.ValidValues(
('tab', "Open a new tab in the existing "
"window and activate the window."),
('tab-bg', "Open a new background tab in the "
"existing window and activate the "
"window."),
('tab-silent', "Open a new tab in the existing "
"window without activating "
"the window."),
('tab-bg-silent', "Open a new background tab "
"in the existing window "
"without activating the "
"window."),
('window', "Open in a new window.")
)), 'tab'),
"How to open links in an existing instance if a new one is "
"launched."),
('new-instance-open-target.window',
SettingValue(typ.String(
valid_values=typ.ValidValues(
('first-opened', "Open new tabs in the first (oldest) "
"opened window."),
('last-opened', "Open new tabs in the last (newest) "
"opened window."),
('last-focused', "Open new tabs in the most recently "
"focused window."),
('last-visible', "Open new tabs in the most recently "
"visible window.")
)), 'last-focused'),
"Which window to choose when opening links as new tabs."),
('log-javascript-console',
SettingValue(typ.String(
valid_values=typ.ValidValues(
('none', "Don't log messages."),
('debug', "Log messages with debug level."),
('info', "Log messages with info level.")
)), 'debug'),
"How to log javascript console messages."),
('save-session',
SettingValue(typ.Bool(), 'false'),
"Whether to always save the open pages."),
('session-default-name',
SettingValue(typ.SessionName(none_ok=True), ''),
"The name of the session to save by default, or empty for the "
"last loaded session."),
('url-incdec-segments',
SettingValue(
typ.FlagList(valid_values=typ.ValidValues(
'host', 'path', 'query', 'anchor')),
'path,query'),
"The URL segments where `:navigate increment/decrement` will "
"search for a number."),
readonly=readonly
)),
('ui', sect.KeyValue(
('history-session-interval',
SettingValue(typ.Int(), '30'),
"The maximum time in minutes between two history items for them "
"to be considered being from the same session. Use -1 to "
"disable separation."),
('zoom-levels',
SettingValue(typ.List(typ.Perc(minval=0)),
'25%,33%,50%,67%,75%,90%,100%,110%,125%,150%,175%,'
'200%,250%,300%,400%,500%'),
"The available zoom levels, separated by commas."),
('default-zoom',
SettingValue(typ.Perc(), '100%'),
"The default zoom level."),
('downloads-position',
SettingValue(typ.VerticalPosition(), 'top'),
"Where to show the downloaded files."),
('status-position',
SettingValue(typ.VerticalPosition(), 'bottom'),
"The position of the status bar."),
('message-timeout',
SettingValue(typ.Int(minval=0), '2000'),
"Time (in ms) to show messages in the statusbar for.\n"
"Set to 0 to never clear messages."),
('message-unfocused',
SettingValue(typ.Bool(), 'false'),
"Whether to show messages in unfocused windows."),
('confirm-quit',
SettingValue(typ.ConfirmQuit(), 'never'),
"Whether to confirm quitting the application."),
('zoom-text-only',
SettingValue(typ.Bool(), 'false',
backends=[usertypes.Backend.QtWebKit]),
"Whether the zoom factor on a frame applies only to the text or "
"to all content."),
('frame-flattening',
SettingValue(typ.Bool(), 'false',
backends=[usertypes.Backend.QtWebKit]),
"Whether to expand each subframe to its contents.\n\n"
"This will flatten all the frames to become one scrollable "
"page."),
('user-stylesheet',
SettingValue(typ.File(none_ok=True), ''),
"User stylesheet to use (absolute filename or filename relative "
"to the config directory). Will expand environment variables."),
('hide-scrollbar',
SettingValue(typ.Bool(), 'true'),
"Hide the main scrollbar."),
('smooth-scrolling',
SettingValue(typ.Bool(), 'false'),
"Whether to enable smooth scrolling for web pages. Note smooth "
"scrolling does not work with the :scroll-px command."),
('remove-finished-downloads',
SettingValue(typ.Int(minval=-1), '-1'),
"Number of milliseconds to wait before removing finished "
"downloads. Will not be removed if value is -1."),
('hide-statusbar',
SettingValue(typ.Bool(), 'false'),
"Whether to hide the statusbar unless a message is shown."),
('statusbar-padding',
SettingValue(typ.Padding(), '1,1,0,0'),
"Padding for statusbar (top, bottom, left, right)."),
('window-title-format',
SettingValue(typ.FormatString(fields=['perc', 'perc_raw', 'title',
'title_sep', 'id',
'scroll_pos', 'host',
'backend', 'private']),
'{perc}{title}{title_sep}qutebrowser'),
"The format to use for the window title. The following "
"placeholders are defined:\n\n"
"* `{perc}`: The percentage as a string like `[10%]`.\n"
"* `{perc_raw}`: The raw percentage, e.g. `10`\n"
"* `{title}`: The title of the current web page\n"
"* `{title_sep}`: The string ` - ` if a title is set, empty "
"otherwise.\n"
"* `{id}`: The internal window ID of this window.\n"
"* `{scroll_pos}`: The page scroll position.\n"
"* `{host}`: The host of the current web page.\n"
"* `{backend}`: Either 'webkit' or 'webengine'\n"
"* `{private}` : Indicates when private mode is enabled.\n"),
('modal-js-dialog',
SettingValue(typ.Bool(), 'false'),
"Use standard JavaScript modal dialog for alert() and confirm()"),
('hide-wayland-decoration',
SettingValue(typ.Bool(), 'false'),
"Hide the window decoration when using wayland "
"(requires restart)"),
('keyhint-blacklist',
SettingValue(typ.List(typ.String(), none_ok=True), ''),
"Keychains that shouldn't be shown in the keyhint dialog\n\n"
"Globs are supported, so ';*' will blacklist all keychains"
"starting with ';'. Use '*' to disable keyhints"),
('keyhint-delay',
SettingValue(typ.Int(minval=0), '500'),
"Time from pressing a key to seeing the keyhint dialog (ms)"),
('prompt-radius',
SettingValue(typ.Int(minval=0), '8'),
"The rounding radius for the edges of prompts."),
('prompt-filebrowser',
SettingValue(typ.Bool(), 'true'),
"Show a filebrowser in upload/download prompts."),
readonly=readonly
)),
('network', sect.KeyValue(
('do-not-track',
SettingValue(typ.Bool(), 'true'),
"Value to send in the `DNT` header."),
('accept-language',
SettingValue(typ.String(none_ok=True), 'en-US,en'),
"Value to send in the `accept-language` header."),
('referer-header',
SettingValue(typ.String(
valid_values=typ.ValidValues(
('always', "Always send."),
('never', "Never send; this is not recommended,"
" as some sites may break."),
('same-domain', "Only send for the same domain."
" This will still protect your privacy, but"
" shouldn't break any sites.")
)), 'same-domain', backends=[usertypes.Backend.QtWebKit]),
"Send the Referer header"),
('user-agent',
SettingValue(typ.UserAgent(none_ok=True), ''),
"User agent to send. Empty to send the default."),
('proxy',
SettingValue(typ.Proxy(), 'system',
backends=(None if qtutils.version_check('5.8')
else [usertypes.Backend.QtWebKit])),
"The proxy to use.\n\n"
"In addition to the listed values, you can use a `socks://...` "
"or `http://...` URL.\n\n"
"This setting only works with Qt 5.8 or newer when using the "
"QtWebEngine backend."),
('proxy-dns-requests',
SettingValue(typ.Bool(), 'true',
backends=[usertypes.Backend.QtWebKit]),
"Whether to send DNS requests over the configured proxy."),
('ssl-strict',
SettingValue(typ.BoolAsk(), 'ask'),
"Whether to validate SSL handshakes."),
('dns-prefetch',
SettingValue(typ.Bool(), 'true',
backends=[usertypes.Backend.QtWebKit]),
"Whether to try to pre-fetch DNS entries to speed up browsing."),
('custom-headers',
SettingValue(typ.HeaderDict(none_ok=True), ''),
"Set custom headers for qutebrowser HTTP requests."),
('netrc-file',
SettingValue(typ.File(none_ok=True), ''),
"Set location of a netrc-file for HTTP authentication. If empty, "
"~/.netrc is used."),
readonly=readonly
)),
('completion', sect.KeyValue(
('show',
SettingValue(typ.String(
valid_values=typ.ValidValues(
('always', "Whenever a completion is available."),
('auto', "Whenever a completion is requested."),
('never', "Never.")
)), 'always'),
"When to show the autocompletion window."),
('download-path-suggestion',
SettingValue(
typ.String(valid_values=typ.ValidValues(
('path', "Show only the download path."),
('filename', "Show only download filename."),
('both', "Show download path and filename."))),
'path'),
"What to display in the download filename input."),
('timestamp-format',
SettingValue(typ.TimestampTemplate(none_ok=True), '%Y-%m-%d'),
"How to format timestamps (e.g. for history)"),
('height',
SettingValue(typ.PercOrInt(minperc=0, maxperc=100, minint=1),
'50%'),
"The height of the completion, in px or as percentage of the "
"window."),
('cmd-history-max-items',
SettingValue(typ.Int(minval=-1), '100'),
"How many commands to save in the command history.\n\n"
"0: no history / -1: unlimited"),
('web-history-max-items',
SettingValue(typ.Int(minval=-1), '1000'),
"How many URLs to show in the web history.\n\n"
"0: no history / -1: unlimited"),
('quick-complete',
SettingValue(typ.Bool(), 'true'),
"Whether to move on to the next part when there's only one "
"possible completion left."),
('shrink',
SettingValue(typ.Bool(), 'false'),
"Whether to shrink the completion to be smaller than the "
"configured size if there are no scrollbars."),
('scrollbar-width',
SettingValue(typ.Int(minval=0), '12'),
"Width of the scrollbar in the completion window (in px)."),
('scrollbar-padding',
SettingValue(typ.Int(minval=0), '2'),
"Padding of scrollbar handle in completion window (in px)."),
readonly=readonly
)),
('input', sect.KeyValue(
('timeout',
SettingValue(typ.Int(minval=0, maxval=MAXVALS['int']), '500'),
"Timeout (in milliseconds) for ambiguous key bindings.\n\n"
"If the current input forms both a complete match and a partial "
"match, the complete match will be executed after this time."),
('partial-timeout',
SettingValue(typ.Int(minval=0, maxval=MAXVALS['int']), '5000'),
"Timeout (in milliseconds) for partially typed key bindings.\n\n"
"If the current input forms only partial matches, the keystring "
"will be cleared after this time."),
('insert-mode-on-plugins',
SettingValue(typ.Bool(), 'false'),
"Whether to switch to insert mode when clicking flash and other "
"plugins."),
('auto-leave-insert-mode',
SettingValue(typ.Bool(), 'true'),
"Whether to leave insert mode if a non-editable element is "
"clicked."),
('auto-insert-mode',
SettingValue(typ.Bool(), 'false'),
"Whether to automatically enter insert mode if an editable "
"element is focused after page load."),
('forward-unbound-keys',
SettingValue(typ.String(
valid_values=typ.ValidValues(
('all', "Forward all unbound keys."),
('auto', "Forward unbound non-alphanumeric "
"keys."),
('none', "Don't forward any keys.")
)), 'auto'),
"Whether to forward unbound keys to the webview in normal mode."),
('spatial-navigation',
SettingValue(typ.Bool(), 'false'),
"Enables or disables the Spatial Navigation feature.\n\n"
"Spatial navigation consists in the ability to navigate between "
"focusable elements in a Web page, such as hyperlinks and form "
"controls, by using Left, Right, Up and Down arrow keys. For "
"example, if a user presses the Right key, heuristics determine "
"whether there is an element he might be trying to reach towards "
"the right and which element he probably wants."),
('links-included-in-focus-chain',
SettingValue(typ.Bool(), 'true'),
"Whether hyperlinks should be included in the keyboard focus "
"chain."),
('rocker-gestures',
SettingValue(typ.Bool(), 'false'),
"Whether to enable Opera-like mouse rocker gestures. This "
"disables the context menu."),
('mouse-zoom-divider',
SettingValue(typ.Int(minval=0), '512'),
"How much to divide the mouse wheel movements to translate them "
"into zoom increments."),
readonly=readonly
)),
('tabs', sect.KeyValue(
('background-tabs',
SettingValue(typ.Bool(), 'false'),
"Whether to open new tabs (middleclick/ctrl+click) in "
"background."),
('select-on-remove',
SettingValue(typ.SelectOnRemove(), 'next'),
"Which tab to select when the focused tab is removed."),
('new-tab-position',
SettingValue(typ.NewTabPosition(), 'next'),
"How new tabs are positioned."),
('new-tab-position-explicit',
SettingValue(typ.NewTabPosition(), 'last'),
"How new tabs opened explicitly are positioned."),
('last-close',
SettingValue(typ.String(
valid_values=typ.ValidValues(
('ignore', "Don't do anything."),
('blank', "Load a blank page."),
('startpage', "Load the start page."),
('default-page', "Load the default page."),
('close', "Close the window.")
)), 'ignore'),
"Behavior when the last tab is closed."),
('show',
SettingValue(
typ.String(valid_values=typ.ValidValues(
('always', "Always show the tab bar."),
('never', "Always hide the tab bar."),
('multiple', "Hide the tab bar if only one tab "
"is open."),
('switching', "Show the tab bar when switching "
"tabs.")
)), 'always'),
"When to show the tab bar"),
('show-switching-delay',
SettingValue(typ.Int(), '800'),
"Time to show the tab bar before hiding it when tabs->show is "
"set to 'switching'."),
('wrap',
SettingValue(typ.Bool(), 'true'),
"Whether to wrap when changing tabs."),
('movable',
SettingValue(typ.Bool(), 'true'),
"Whether tabs should be movable."),
('close-mouse-button',
SettingValue(typ.String(
valid_values=typ.ValidValues(
('right', "Close tabs on right-click."),
('middle', "Close tabs on middle-click."),
('none', "Don't close tabs using the mouse.")
)), 'middle'),
"On which mouse button to close tabs."),
('position',
SettingValue(typ.Position(), 'top'),
"The position of the tab bar."),
('show-favicons',
SettingValue(typ.Bool(), 'true'),
"Whether to show favicons in the tab bar."),
('favicon-scale',
SettingValue(typ.Float(minval=0.0), '1.0'),
"Scale for favicons in the tab bar. The tab size is unchanged, "
"so big favicons also require extra `tabs->padding`."),
('width',
SettingValue(typ.PercOrInt(minperc=0, maxperc=100, minint=1),
'20%'),
"The width of the tab bar if it's vertical, in px or as "
"percentage of the window."),
('pinned-width',
SettingValue(typ.Int(minval=10),
'43'),
"The width for pinned tabs with a horizontal tabbar, in px."),
('indicator-width',
SettingValue(typ.Int(minval=0), '3'),
"Width of the progress indicator (0 to disable)."),
('tabs-are-windows',
SettingValue(typ.Bool(), 'false'),
"Whether to open windows instead of tabs."),
('title-format',
SettingValue(typ.FormatString(
fields=['perc', 'perc_raw', 'title', 'title_sep', 'index',
'id', 'scroll_pos', 'host', 'private'], none_ok=True),
'{index}: {title}'),
"The format to use for the tab title. The following placeholders "
"are defined:\n\n"
"* `{perc}`: The percentage as a string like `[10%]`.\n"
"* `{perc_raw}`: The raw percentage, e.g. `10`\n"
"* `{title}`: The title of the current web page\n"
"* `{title_sep}`: The string ` - ` if a title is set, empty "
"otherwise.\n"
"* `{index}`: The index of this tab.\n"
"* `{id}`: The internal tab ID of this tab.\n"
"* `{scroll_pos}`: The page scroll position.\n"
"* `{host}`: The host of the current web page.\n"
"* `{backend}`: Either 'webkit' or 'webengine'\n"
"* `{private}` : Indicates when private mode is enabled.\n"),
('title-format-pinned',
SettingValue(typ.FormatString(
fields=['perc', 'perc_raw', 'title', 'title_sep', 'index',
'id', 'scroll_pos', 'host', 'private'], none_ok=True),
'{index}'),
"The format to use for the tab title for pinned tabs. "
"The same placeholders like for title-format are defined."),
('title-alignment',
SettingValue(typ.TextAlignment(), 'left'),
"Alignment of the text inside of tabs"),
('mousewheel-tab-switching',
SettingValue(typ.Bool(), 'true'),
"Switch between tabs using the mouse wheel."),
('padding',
SettingValue(typ.Padding(), '0,0,5,5'),
"Padding for tabs (top, bottom, left, right)."),
('indicator-padding',
SettingValue(typ.Padding(), '2,2,0,4'),
"Padding for indicators (top, bottom, left, right)."),
readonly=readonly
)),
('storage', sect.KeyValue(
('download-directory',
SettingValue(typ.Directory(none_ok=True), ''),
"The directory to save downloads to. An empty value selects a "
"sensible os-specific default. Will expand environment "
"variables."),
('prompt-download-directory',
SettingValue(typ.Bool(), 'true'),
"Whether to prompt the user for the download location.\n"
"If set to false, 'download-directory' will be used."),
('remember-download-directory',
SettingValue(typ.Bool(), 'true'),
"Whether to remember the last used download directory."),
# Defaults from QWebSettings::QWebSettings() in
# qtwebkit/Source/WebKit/qt/Api/qwebsettings.cpp
('maximum-pages-in-cache',
SettingValue(typ.Int(minval=0, maxval=MAXVALS['int']), '0',
backends=[usertypes.Backend.QtWebKit]),
"The maximum number of pages to hold in the global memory page "
"cache.\n\n"
"The Page Cache allows for a nicer user experience when "
"navigating forth or back to pages in the forward/back history, "
"by pausing and resuming up to _n_ pages.\n\n"
"For more information about the feature, please refer to: "
"http://webkit.org/blog/427/webkit-page-cache-i-the-basics/"),
('offline-web-application-cache',
SettingValue(typ.Bool(), 'true',
backends=[usertypes.Backend.QtWebKit]),
"Whether support for the HTML 5 web application cache feature is "
"enabled.\n\n"
"An application cache acts like an HTTP cache in some sense. For "
"documents that use the application cache via JavaScript, the "
"loader engine will first ask the application cache for the "
"contents, before hitting the network.\n\n"
"The feature is described in details at: "
"http://dev.w3.org/html5/spec/Overview.html#appcache"),
('local-storage',
SettingValue(typ.Bool(), 'true'),
"Whether support for HTML 5 local storage and Web SQL is "
"enabled."),
('cache-size',
SettingValue(typ.Int(none_ok=True, minval=0,
maxval=MAXVALS['int64']), ''),
"Size of the HTTP network cache. Empty to use the default "
"value."),
readonly=readonly
)),
('content', sect.KeyValue(
('allow-images',
SettingValue(typ.Bool(), 'true'),
"Whether images are automatically loaded in web pages."),
('allow-javascript',
SettingValue(typ.Bool(), 'true'),
"Enables or disables the running of JavaScript programs."),
('allow-plugins',
SettingValue(typ.Bool(), 'false'),
"Enables or disables plugins in Web pages.\n\n"
'Qt plugins with a mimetype such as "application/x-qt-plugin" '
"are not affected by this setting."),
('webgl',
SettingValue(typ.Bool(), 'true'),
"Enables or disables WebGL."),
('hyperlink-auditing',
SettingValue(typ.Bool(), 'false'),
"Enable or disable hyperlink auditing (<a ping>)."),
('geolocation',
SettingValue(typ.BoolAsk(), 'ask'),
"Allow websites to request geolocations."),
('notifications',
SettingValue(typ.BoolAsk(), 'ask'),
"Allow websites to show notifications."),
('media-capture',
SettingValue(typ.BoolAsk(), 'ask',
backends=[usertypes.Backend.QtWebEngine]),
"Allow websites to record audio/video."),
('javascript-can-open-windows-automatically',
SettingValue(typ.Bool(), 'false'),
"Whether JavaScript programs can open new windows without user "
"interaction."),
('javascript-can-close-windows',
SettingValue(typ.Bool(), 'false',
backends=[usertypes.Backend.QtWebKit]),
"Whether JavaScript programs can close windows."),
('javascript-can-access-clipboard',
SettingValue(typ.Bool(), 'false'),
"Whether JavaScript programs can read or write to the "
"clipboard.\nWith QtWebEngine, writing the clipboard as response "
"to a user interaction is always allowed."),
('ignore-javascript-prompt',
SettingValue(typ.Bool(), 'false'),
"Whether all javascript prompts should be ignored."),
('ignore-javascript-alert',
SettingValue(typ.Bool(), 'false'),
"Whether all javascript alerts should be ignored."),
('local-content-can-access-remote-urls',
SettingValue(typ.Bool(), 'false'),
"Whether locally loaded documents are allowed to access remote "
"urls."),
('local-content-can-access-file-urls',
SettingValue(typ.Bool(), 'true'),
"Whether locally loaded documents are allowed to access other "
"local urls."),
('cookies-accept',
SettingValue(typ.String(
valid_values=typ.ValidValues(
('all', "Accept all cookies."),
('no-3rdparty', "Accept cookies from the same"
" origin only."),
('no-unknown-3rdparty', "Accept cookies from "
"the same origin only, unless a cookie is "
"already set for the domain."),
('never', "Don't accept cookies at all.")
)), 'no-3rdparty', backends=[usertypes.Backend.QtWebKit]),
"Control which cookies to accept."),
('cookies-store',
SettingValue(typ.Bool(), 'true'),
"Whether to store cookies. Note this option needs a restart with "
"QtWebEngine on Qt < 5.9."),
('host-block-lists',
SettingValue(
typ.List(typ.Url(), none_ok=True),
'https://www.malwaredomainlist.com/hostslist/hosts.txt,'
'http://someonewhocares.org/hosts/hosts,'
'http://winhelp2002.mvps.org/hosts.zip,'
'http://malwaredomains.lehigh.edu/files/justdomains.zip,'
'https://pgl.yoyo.org/adservers/serverlist.php?'
'hostformat=hosts&mimetype=plaintext'),
"List of URLs of lists which contain hosts to block.\n\n"
"The file can be in one of the following formats:\n\n"
"- An '/etc/hosts'-like file\n"
"- One host per line\n"
"- A zip-file of any of the above, with either only one file, or "
"a file named 'hosts' (with any extension)."),
('host-blocking-enabled',
SettingValue(typ.Bool(), 'true'),
"Whether host blocking is enabled."),
('host-blocking-whitelist',
SettingValue(typ.List(typ.String(), none_ok=True), 'piwik.org'),
"List of domains that should always be loaded, despite being "
"ad-blocked.\n\n"
"Domains may contain * and ? wildcards and are otherwise "
"required to exactly match the requested domain.\n\n"
"Local domains are always exempt from hostblocking."),
('enable-pdfjs', SettingValue(typ.Bool(), 'false'),
"Enable pdf.js to view PDF files in the browser.\n\n"
"Note that the files can still be downloaded by clicking"
" the download button in the pdf.js viewer."),
readonly=readonly
)),
('hints', sect.KeyValue(
('border',
SettingValue(typ.String(), '1px solid #E3BE23'),
"CSS border value for hints."),
('mode',
SettingValue(typ.String(
valid_values=typ.ValidValues(
('number', "Use numeric hints. (In this mode you can "
"also type letters form the hinted element to filter "
"and reduce the number of elements that are hinted.)"),
('letter', "Use the chars in the hints -> "
"chars setting."),
('word', "Use hints words based on the html "
"elements and the extra words."),
)), 'letter'),
"Mode to use for hints."),
('chars',
SettingValue(typ.UniqueCharString(minlen=2, completions=[
('asdfghjkl', "Home row"),
('aoeuidnths', "Home row (Dvorak)"),
('abcdefghijklmnopqrstuvwxyz', "All letters"),
]), 'asdfghjkl'),
"Chars used for hint strings."),
('min-chars',
SettingValue(typ.Int(minval=1), '1'),
"Minimum number of chars used for hint strings."),
('scatter',
SettingValue(typ.Bool(), 'true'),
"Whether to scatter hint key chains (like Vimium) or not (like "
"dwb). Ignored for number hints."),
('uppercase',
SettingValue(typ.Bool(), 'false'),
"Make chars in hint strings uppercase."),
('dictionary',
SettingValue(typ.File(required=False), '/usr/share/dict/words'),
"The dictionary file to be used by the word hints."),
('auto-follow',
SettingValue(typ.String(
valid_values=typ.ValidValues(
('always', "Auto-follow whenever there is only a single "
"hint on a page."),
('unique-match', "Auto-follow whenever there is a unique "
"non-empty match in either the hint string (word mode) "
"or filter (number mode)."),
('full-match', "Follow the hint when the user typed the "
"whole hint (letter, word or number mode) or the "
"element's text (only in number mode)."),
('never', "The user will always need to press Enter to "
"follow a hint."),
)), 'unique-match'),
"Controls when a hint can be automatically followed without the "
"user pressing Enter."),
('auto-follow-timeout',
SettingValue(typ.Int(), '0'),
"A timeout (in milliseconds) to inhibit normal-mode key bindings "
"after a successful auto-follow."),
('next-regexes',
SettingValue(typ.List(typ.Regex(flags=re.IGNORECASE)),
r'\bnext\b,\bmore\b,\bnewer\b,\b[>→≫]\b,\b(>>|»)\b,'
r'\bcontinue\b'),
"A comma-separated list of regexes to use for 'next' links."),
('prev-regexes',
SettingValue(typ.List(typ.Regex(flags=re.IGNORECASE)),
r'\bprev(ious)?\b,\bback\b,\bolder\b,\b[<←≪]\b,'
r'\b(<<|«)\b'),
"A comma-separated list of regexes to use for 'prev' links."),
('find-implementation',
SettingValue(typ.String(
valid_values=typ.ValidValues(
('javascript', "Better but slower"),
('python', "Slightly worse but faster"),
)), 'python'),
"Which implementation to use to find elements to hint."),
('hide-unmatched-rapid-hints',
SettingValue(typ.Bool(), 'true'),
"Controls hiding unmatched hints in rapid mode."),
readonly=readonly
)),
('searchengines', sect.ValueList(
typ.SearchEngineName(), typ.SearchEngineUrl(),
('DEFAULT', 'https://duckduckgo.com/?q={}'),
readonly=readonly
)),
('aliases', sect.ValueList(
typ.String(forbidden=' '), typ.Command(),
readonly=readonly
)),
('colors', sect.KeyValue(
('completion.fg',
SettingValue(typ.QtColor(), 'white'),
"Text color of the completion widget."),
('completion.bg',
SettingValue(typ.QssColor(), '#333333'),
"Background color of the completion widget."),
('completion.alternate-bg',
SettingValue(typ.QssColor(), '#444444'),
"Alternating background color of the completion widget."),
('completion.category.fg',
SettingValue(typ.QtColor(), 'white'),
"Foreground color of completion widget category headers."),
('completion.category.bg',
SettingValue(typ.QssColor(), 'qlineargradient(x1:0, y1:0, x2:0, '
'y2:1, stop:0 #888888, stop:1 #505050)'),
"Background color of the completion widget category headers."),
('completion.category.border.top',
SettingValue(typ.QssColor(), 'black'),
"Top border color of the completion widget category headers."),
('completion.category.border.bottom',
SettingValue(typ.QssColor(), '${completion.category.border.top}'),
"Bottom border color of the completion widget category headers."),
('completion.item.selected.fg',
SettingValue(typ.QtColor(), 'black'),
"Foreground color of the selected completion item."),
('completion.item.selected.bg',
SettingValue(typ.QssColor(), '#e8c000'),
"Background color of the selected completion item."),
('completion.item.selected.border.top',
SettingValue(typ.QssColor(), '#bbbb00'),
"Top border color of the completion widget category headers."),
('completion.item.selected.border.bottom',
SettingValue(
typ.QssColor(), '${completion.item.selected.border.top}'),
"Bottom border color of the selected completion item."),
('completion.match.fg',
SettingValue(typ.QssColor(), '#ff4444'),
"Foreground color of the matched text in the completion."),
('completion.scrollbar.fg',
SettingValue(typ.QssColor(), '${completion.fg}'),
"Color of the scrollbar handle in completion view."),
('completion.scrollbar.bg',
SettingValue(typ.QssColor(), '${completion.bg}'),
"Color of the scrollbar in completion view"),
('statusbar.fg',
SettingValue(typ.QssColor(), 'white'),
"Foreground color of the statusbar."),
('statusbar.bg',
SettingValue(typ.QssColor(), 'black'),
"Background color of the statusbar."),
('statusbar.fg.private',
SettingValue(typ.QssColor(), '${statusbar.fg}'),
"Foreground color of the statusbar in private browsing mode."),
('statusbar.bg.private',
SettingValue(typ.QssColor(), '#666666'),
"Background color of the statusbar in private browsing mode."),
('statusbar.fg.insert',
SettingValue(typ.QssColor(), '${statusbar.fg}'),
"Foreground color of the statusbar in insert mode."),
('statusbar.bg.insert',
SettingValue(typ.QssColor(), 'darkgreen'),
"Background color of the statusbar in insert mode."),
('statusbar.fg.command',
SettingValue(typ.QssColor(), '${statusbar.fg}'),
"Foreground color of the statusbar in command mode."),
('statusbar.bg.command',
SettingValue(typ.QssColor(), '${statusbar.bg}'),
"Background color of the statusbar in command mode."),
('statusbar.fg.command.private',
SettingValue(typ.QssColor(), '${statusbar.fg.private}'),
"Foreground color of the statusbar in private browsing + command "
"mode."),
('statusbar.bg.command.private',
SettingValue(typ.QssColor(), '${statusbar.bg.private}'),
"Background color of the statusbar in private browsing + command "
"mode."),
('statusbar.fg.caret',
SettingValue(typ.QssColor(), '${statusbar.fg}'),
"Foreground color of the statusbar in caret mode."),
('statusbar.bg.caret',
SettingValue(typ.QssColor(), 'purple'),
"Background color of the statusbar in caret mode."),
('statusbar.fg.caret-selection',
SettingValue(typ.QssColor(), '${statusbar.fg}'),
"Foreground color of the statusbar in caret mode with a "
"selection"),
('statusbar.bg.caret-selection',
SettingValue(typ.QssColor(), '#a12dff'),
"Background color of the statusbar in caret mode with a "
"selection"),
('statusbar.progress.bg',
SettingValue(typ.QssColor(), 'white'),
"Background color of the progress bar."),
('statusbar.url.fg',
SettingValue(typ.QssColor(), '${statusbar.fg}'),
"Default foreground color of the URL in the statusbar."),
('statusbar.url.fg.success',
SettingValue(typ.QssColor(), 'white'),
"Foreground color of the URL in the statusbar on successful "
"load (http)."),
('statusbar.url.fg.success.https',
SettingValue(typ.QssColor(), 'lime'),
"Foreground color of the URL in the statusbar on successful "
"load (https)."),
('statusbar.url.fg.error',
SettingValue(typ.QssColor(), 'orange'),
"Foreground color of the URL in the statusbar on error."),
('statusbar.url.fg.warn',
SettingValue(typ.QssColor(), 'yellow'),
"Foreground color of the URL in the statusbar when there's a "
"warning."),
('statusbar.url.fg.hover',
SettingValue(typ.QssColor(), 'aqua'),
"Foreground color of the URL in the statusbar for hovered "
"links."),
('tabs.fg.odd',
SettingValue(typ.QtColor(), 'white'),
"Foreground color of unselected odd tabs."),
('tabs.bg.odd',
SettingValue(typ.QtColor(), 'grey'),
"Background color of unselected odd tabs."),
('tabs.fg.even',
SettingValue(typ.QtColor(), 'white'),
"Foreground color of unselected even tabs."),
('tabs.bg.even',
SettingValue(typ.QtColor(), 'darkgrey'),
"Background color of unselected even tabs."),
('tabs.fg.selected.odd',
SettingValue(typ.QtColor(), 'white'),
"Foreground color of selected odd tabs."),
('tabs.bg.selected.odd',
SettingValue(typ.QtColor(), 'black'),
"Background color of selected odd tabs."),
('tabs.fg.selected.even',
SettingValue(typ.QtColor(), '${tabs.fg.selected.odd}'),
"Foreground color of selected even tabs."),
('tabs.bg.selected.even',
SettingValue(typ.QtColor(), '${tabs.bg.selected.odd}'),
"Background color of selected even tabs."),
('tabs.bg.bar',
SettingValue(typ.QtColor(), '#555555'),
"Background color of the tab bar."),
('tabs.indicator.start',
SettingValue(typ.QtColor(), '#0000aa'),
"Color gradient start for the tab indicator."),
('tabs.indicator.stop',
SettingValue(typ.QtColor(), '#00aa00'),
"Color gradient end for the tab indicator."),
('tabs.indicator.error',
SettingValue(typ.QtColor(), '#ff0000'),
"Color for the tab indicator on errors.."),
('tabs.indicator.system',
SettingValue(typ.ColorSystem(), 'rgb'),
"Color gradient interpolation system for the tab indicator."),
('hints.fg',
SettingValue(typ.QssColor(), 'black'),
"Font color for hints."),
('hints.bg',
SettingValue(typ.QssColor(), 'qlineargradient(x1:0, y1:0, x2:0, '
'y2:1, stop:0 rgba(255, 247, 133, 0.8), '
'stop:1 rgba(255, 197, 66, 0.8))'),
"Background color for hints. Note that you can use a `rgba(...)` "
"value for transparency."),
('hints.fg.match',
SettingValue(typ.QssColor(), 'green'),
"Font color for the matched part of hints."),
('downloads.bg.bar',
SettingValue(typ.QssColor(), 'black'),
"Background color for the download bar."),
('downloads.fg.start',
SettingValue(typ.QtColor(), 'white'),
"Color gradient start for download text."),
('downloads.bg.start',
SettingValue(typ.QtColor(), '#0000aa'),
"Color gradient start for download backgrounds."),
('downloads.fg.stop',
SettingValue(typ.QtColor(), '${downloads.fg.start}'),
"Color gradient end for download text."),
('downloads.bg.stop',
SettingValue(typ.QtColor(), '#00aa00'),
"Color gradient stop for download backgrounds."),
('downloads.fg.system',
SettingValue(typ.ColorSystem(), 'rgb'),
"Color gradient interpolation system for download text."),
('downloads.bg.system',
SettingValue(typ.ColorSystem(), 'rgb'),
"Color gradient interpolation system for download backgrounds."),
('downloads.fg.error',
SettingValue(typ.QtColor(), 'white'),
"Foreground color for downloads with errors."),
('downloads.bg.error',
SettingValue(typ.QtColor(), 'red'),
"Background color for downloads with errors."),
('webpage.bg',
SettingValue(typ.QtColor(none_ok=True), 'white'),
"Background color for webpages if unset (or empty to use the "
"theme's color)"),
('keyhint.fg',
SettingValue(typ.QssColor(), '#FFFFFF'),
"Text color for the keyhint widget."),
('keyhint.fg.suffix',
SettingValue(typ.CssColor(), '#FFFF00'),
"Highlight color for keys to complete the current keychain"),
('keyhint.bg',
SettingValue(typ.QssColor(), 'rgba(0, 0, 0, 80%)'),
"Background color of the keyhint widget."),
('messages.fg.error',
SettingValue(typ.QssColor(), 'white'),
"Foreground color of an error message."),
('messages.bg.error',
SettingValue(typ.QssColor(), 'red'),
"Background color of an error message."),
('messages.border.error',
SettingValue(typ.QssColor(), '#bb0000'),
"Border color of an error message."),
('messages.fg.warning',
SettingValue(typ.QssColor(), 'white'),
"Foreground color a warning message."),
('messages.bg.warning',
SettingValue(typ.QssColor(), 'darkorange'),
"Background color of a warning message."),
('messages.border.warning',
SettingValue(typ.QssColor(), '#d47300'),
"Border color of an error message."),
('messages.fg.info',
SettingValue(typ.QssColor(), 'white'),
"Foreground color an info message."),
('messages.bg.info',
SettingValue(typ.QssColor(), 'black'),
"Background color of an info message."),
('messages.border.info',
SettingValue(typ.QssColor(), '#333333'),
"Border color of an info message."),
('prompts.fg',
SettingValue(typ.QssColor(), 'white'),
"Foreground color for prompts."),
('prompts.bg',
SettingValue(typ.QssColor(), 'darkblue'),
"Background color for prompts."),
('prompts.selected.bg',
SettingValue(typ.QssColor(), '#308cc6'),
"Background color for the selected item in filename prompts."),
readonly=readonly
)),
('fonts', sect.KeyValue(
('_monospace',
SettingValue(typ.Font(), 'xos4 Terminus, Terminus, Monospace, '
'"DejaVu Sans Mono", Monaco, '
'"Bitstream Vera Sans Mono", "Andale Mono", '
'"Courier New", Courier, "Liberation Mono", '
'monospace, Fixed, Consolas, Terminal'),
"Default monospace fonts."),
('completion',
SettingValue(typ.Font(), DEFAULT_FONT_SIZE + ' ${_monospace}'),
"Font used in the completion widget."),
('completion.category',
SettingValue(typ.Font(), 'bold ${completion}'),
"Font used in the completion categories."),
('tabbar',
SettingValue(typ.QtFont(), DEFAULT_FONT_SIZE + ' ${_monospace}'),
"Font used in the tab bar."),
('statusbar',
SettingValue(typ.Font(), DEFAULT_FONT_SIZE + ' ${_monospace}'),
"Font used in the statusbar."),
('downloads',
SettingValue(typ.Font(), DEFAULT_FONT_SIZE + ' ${_monospace}'),
"Font used for the downloadbar."),
('hints',
SettingValue(typ.Font(), 'bold 13px ${_monospace}'),
"Font used for the hints."),
('debug-console',
SettingValue(typ.QtFont(), DEFAULT_FONT_SIZE + ' ${_monospace}'),
"Font used for the debugging console."),
('web-family-standard',
SettingValue(typ.FontFamily(none_ok=True), ''),
"Font family for standard fonts."),
('web-family-fixed',
SettingValue(typ.FontFamily(none_ok=True), ''),
"Font family for fixed fonts."),
('web-family-serif',
SettingValue(typ.FontFamily(none_ok=True), ''),
"Font family for serif fonts."),
('web-family-sans-serif',
SettingValue(typ.FontFamily(none_ok=True), ''),
"Font family for sans-serif fonts."),
('web-family-cursive',
SettingValue(typ.FontFamily(none_ok=True), ''),
"Font family for cursive fonts."),
('web-family-fantasy',
SettingValue(typ.FontFamily(none_ok=True), ''),
"Font family for fantasy fonts."),
# Defaults for web-size-* from WebEngineSettings::initDefaults in
# qtwebengine/src/core/web_engine_settings.cpp and
# QWebSettings::QWebSettings() in
# qtwebkit/Source/WebKit/qt/Api/qwebsettings.cpp
('web-size-minimum',
SettingValue(typ.Int(minval=0, maxval=MAXVALS['int']), '0'),
"The hard minimum font size."),
# This is 0 as default on QtWebKit, and 6 on QtWebEngine - so let's
# just go for 6 here.
('web-size-minimum-logical',
SettingValue(typ.Int(minval=0, maxval=MAXVALS['int']), '6'),
"The minimum logical font size that is applied when zooming "
"out."),
('web-size-default',
SettingValue(typ.Int(minval=1, maxval=MAXVALS['int']), '16'),
"The default font size for regular text."),
('web-size-default-fixed',
SettingValue(typ.Int(minval=1, maxval=MAXVALS['int']), '13'),
"The default font size for fixed-pitch text."),
('keyhint',
SettingValue(typ.Font(), DEFAULT_FONT_SIZE + ' ${_monospace}'),
"Font used in the keyhint widget."),
('messages.error',
SettingValue(typ.Font(), DEFAULT_FONT_SIZE + ' ${_monospace}'),
"Font used for error messages."),
('messages.warning',
SettingValue(typ.Font(), DEFAULT_FONT_SIZE + ' ${_monospace}'),
"Font used for warning messages."),
('messages.info',
SettingValue(typ.Font(), DEFAULT_FONT_SIZE + ' ${_monospace}'),
"Font used for info messages."),
('prompts',
SettingValue(typ.Font(), DEFAULT_FONT_SIZE + ' sans-serif'),
"Font used for prompts."),
readonly=readonly
)),
])
DATA = data(readonly=True)
KEY_FIRST_COMMENT = """
# vim: ft=conf
#
# In this config file, qutebrowser's key bindings are configured.
# The format looks like this:
#
# [keymode]
#
# command
# keychain
# keychain2
# ...
#
# All blank lines and lines starting with '#' are ignored.
# Inline-comments are not permitted.
#
# keymode is a comma separated list of modes in which the key binding should be
# active. If keymode starts with !, the key binding is active in all modes
# except the listed modes.
#
# For special keys (can't be part of a keychain), enclose them in `<`...`>`.
# For modifiers, you can use either `-` or `+` as delimiters, and these names:
#
# * Control: `Control`, `Ctrl`
# * Meta: `Meta`, `Windows`, `Mod4`
# * Alt: `Alt`, `Mod1`
# * Shift: `Shift`
#
# For simple keys (no `<>`-signs), a capital letter means the key is pressed
# with Shift. For special keys (with `<>`-signs), you need to explicitly add
# `Shift-` to match a key pressed with shift.
#
# Note that default keybindings are always bound, and need to be explicitly
# unbound if you wish to remove them:
#
# <unbound>
# keychain
# keychain2
# ...
"""
KEY_SECTION_DESC = {
'all': "Keybindings active in all modes.",
'normal': "Keybindings for normal mode.",
'insert': (
"Keybindings for insert mode.\n"
"Since normal keypresses are passed through, only special keys are "
"supported in this mode.\n"
"Useful hidden commands to map in this section:\n\n"
" * `open-editor`: Open a texteditor with the focused field.\n"
" * `paste-primary`: Paste primary selection at cursor position."),
'hint': (
"Keybindings for hint mode.\n"
"Since normal keypresses are passed through, only special keys are "
"supported in this mode.\n"
"Useful hidden commands to map in this section:\n\n"
" * `follow-hint`: Follow the currently selected hint."),
'passthrough': (
"Keybindings for passthrough mode.\n"
"Since normal keypresses are passed through, only special keys are "
"supported in this mode."),
'command': (
"Keybindings for command mode.\n"
"Since normal keypresses are passed through, only special keys are "
"supported in this mode.\n"
"Useful hidden commands to map in this section:\n\n"
" * `command-history-prev`: Switch to previous command in history.\n"
" * `command-history-next`: Switch to next command in history.\n"
" * `completion-item-focus`: Select another item in completion.\n"
" * `command-accept`: Execute the command currently in the "
"commandline."),
'prompt': (
"Keybindings for prompts in the status line.\n"
"You can bind normal keys in this mode, but they will be only active "
"when a yes/no-prompt is asked. For other prompt modes, you can only "
"bind special keys.\n"
"Useful hidden commands to map in this section:\n\n"
" * `prompt-accept`: Confirm the entered value.\n"
" * `prompt-accept yes`: Answer yes to a yes/no question.\n"
" * `prompt-accept no`: Answer no to a yes/no question."),
'caret': (
""),
}
# Keys which are similar to Return and should be bound by default where Return
# is bound.
RETURN_KEYS = ['<Return>', '<Ctrl-M>', '<Ctrl-J>', '<Shift-Return>', '<Enter>',
'<Shift-Enter>']
KEY_DATA = collections.OrderedDict([
('!normal', collections.OrderedDict([
('leave-mode', ['<Escape>', '<Ctrl-[>']),
])),
('normal', collections.OrderedDict([
('clear-keychain ;; search ;; fullscreen --leave',
['<Escape>', '<Ctrl-[>']),
('set-cmd-text -s :open', ['o']),
('set-cmd-text :open {url:pretty}', ['go']),
('set-cmd-text -s :open -t', ['O']),
('set-cmd-text :open -t -i {url:pretty}', ['gO']),
('set-cmd-text -s :open -b', ['xo']),
('set-cmd-text :open -b -i {url:pretty}', ['xO']),
('set-cmd-text -s :open -w', ['wo']),
('set-cmd-text :open -w {url:pretty}', ['wO']),
('set-cmd-text /', ['/']),
('set-cmd-text ?', ['?']),
('set-cmd-text :', [':']),
('open -t', ['ga', '<Ctrl-T>']),
('open -w', ['<Ctrl-N>']),
('tab-close', ['d', '<Ctrl-W>']),
('tab-close -o', ['D']),
('tab-only', ['co']),
('tab-focus', ['T']),
('tab-move', ['gm']),
('tab-move -', ['gl']),
('tab-move +', ['gr']),
('tab-next', ['J', '<Ctrl-PgDown>']),
('tab-prev', ['K', '<Ctrl-PgUp>']),
('tab-clone', ['gC']),
('reload', ['r', '<F5>']),
('reload -f', ['R', '<Ctrl-F5>']),
('back', ['H', '<back>']),
('back -t', ['th']),
('back -w', ['wh']),
('forward', ['L', '<forward>']),
('forward -t', ['tl']),
('forward -w', ['wl']),
('fullscreen', ['<F11>']),
('hint', ['f']),
('hint all tab', ['F']),
('hint all window', ['wf']),
('hint all tab-bg', [';b']),
('hint all tab-fg', [';f']),
('hint all hover', [';h']),
('hint images', [';i']),
('hint images tab', [';I']),
('hint links fill :open {hint-url}', [';o']),
('hint links fill :open -t -i {hint-url}', [';O']),
('hint links yank', [';y']),
('hint links yank-primary', [';Y']),
('hint --rapid links tab-bg', [';r']),
('hint --rapid links window', [';R']),
('hint links download', [';d']),
('hint inputs', [';t']),
('scroll left', ['h']),
('scroll down', ['j']),
('scroll up', ['k']),
('scroll right', ['l']),
('undo', ['u', '<Ctrl-Shift-T>']),
('scroll-perc 0', ['gg']),
('scroll-perc', ['G']),
('search-next', ['n']),
('search-prev', ['N']),
('enter-mode insert', ['i']),
('enter-mode caret', ['v']),
('enter-mode set_mark', ['`']),
('enter-mode jump_mark', ["'"]),
('yank', ['yy']),
('yank -s', ['yY']),
('yank title', ['yt']),
('yank title -s', ['yT']),
('yank domain', ['yd']),
('yank domain -s', ['yD']),
('yank pretty-url', ['yp']),
('yank pretty-url -s', ['yP']),
('open -- {clipboard}', ['pp']),
('open -- {primary}', ['pP']),
('open -t -- {clipboard}', ['Pp']),
('open -t -- {primary}', ['PP']),
('open -w -- {clipboard}', ['wp']),
('open -w -- {primary}', ['wP']),
('quickmark-save', ['m']),
('set-cmd-text -s :quickmark-load', ['b']),
('set-cmd-text -s :quickmark-load -t', ['B']),
('set-cmd-text -s :quickmark-load -w', ['wb']),
('bookmark-add', ['M']),
('set-cmd-text -s :bookmark-load', ['gb']),
('set-cmd-text -s :bookmark-load -t', ['gB']),
('set-cmd-text -s :bookmark-load -w', ['wB']),
('save', ['sf']),
('set-cmd-text -s :set', ['ss']),
('set-cmd-text -s :set -t', ['sl']),
('set-cmd-text -s :bind', ['sk']),
('zoom-out', ['-']),
('zoom-in', ['+']),
('zoom', ['=']),
('navigate prev', ['[[']),
('navigate next', [']]']),
('navigate prev -t', ['{{']),
('navigate next -t', ['}}']),
('navigate up', ['gu']),
('navigate up -t', ['gU']),
('navigate increment', ['<Ctrl-A>']),
('navigate decrement', ['<Ctrl-X>']),
('inspector', ['wi']),
('download', ['gd']),
('download-cancel', ['ad']),
('download-clear', ['cd']),
('view-source', ['gf']),
('set-cmd-text -s :buffer', ['gt']),
('tab-focus last', ['<Ctrl-Tab>', '<Ctrl-6>', '<Ctrl-^>']),
('enter-mode passthrough', ['<Ctrl-V>']),
('quit', ['<Ctrl-Q>', 'ZQ']),
('wq', ['ZZ']),
('scroll-page 0 1', ['<Ctrl-F>']),
('scroll-page 0 -1', ['<Ctrl-B>']),
('scroll-page 0 0.5', ['<Ctrl-D>']),
('scroll-page 0 -0.5', ['<Ctrl-U>']),
('tab-focus 1', ['<Alt-1>', 'g0', 'g^']),
('tab-focus 2', ['<Alt-2>']),
('tab-focus 3', ['<Alt-3>']),
('tab-focus 4', ['<Alt-4>']),
('tab-focus 5', ['<Alt-5>']),
('tab-focus 6', ['<Alt-6>']),
('tab-focus 7', ['<Alt-7>']),
('tab-focus 8', ['<Alt-8>']),
('tab-focus -1', ['<Alt-9>', 'g$']),
('home', ['<Ctrl-h>']),
('stop', ['<Ctrl-s>']),
('print', ['<Ctrl-Alt-p>']),
('open qute://settings', ['Ss']),
('follow-selected', RETURN_KEYS),
('follow-selected -t', ['<Ctrl-Return>', '<Ctrl-Enter>']),
('repeat-command', ['.']),
('tab-pin', ['<Ctrl-p>']),
('record-macro', ['q']),
('run-macro', ['@']),
])),
('insert', collections.OrderedDict([
('open-editor', ['<Ctrl-E>']),
('insert-text {primary}', ['<Shift-Ins>']),
])),
('hint', collections.OrderedDict([
('follow-hint', RETURN_KEYS),
('hint --rapid links tab-bg', ['<Ctrl-R>']),
('hint links', ['<Ctrl-F>']),
('hint all tab-bg', ['<Ctrl-B>']),
])),
('passthrough', {}),
('command', collections.OrderedDict([
('command-history-prev', ['<Ctrl-P>']),
('command-history-next', ['<Ctrl-N>']),
('completion-item-focus prev', ['<Shift-Tab>', '<Up>']),
('completion-item-focus next', ['<Tab>', '<Down>']),
('completion-item-focus next-category', ['<Ctrl-Tab>']),
('completion-item-focus prev-category', ['<Ctrl-Shift-Tab>']),
('completion-item-del', ['<Ctrl-D>']),
('command-accept', RETURN_KEYS),
])),
('prompt', collections.OrderedDict([
('prompt-accept', RETURN_KEYS),
('prompt-accept yes', ['y']),
('prompt-accept no', ['n']),
('prompt-open-download', ['<Ctrl-X>']),
('prompt-item-focus prev', ['<Shift-Tab>', '<Up>']),
('prompt-item-focus next', ['<Tab>', '<Down>']),
])),
('command,prompt', collections.OrderedDict([
('rl-backward-char', ['<Ctrl-B>']),
('rl-forward-char', ['<Ctrl-F>']),
('rl-backward-word', ['<Alt-B>']),
('rl-forward-word', ['<Alt-F>']),
('rl-beginning-of-line', ['<Ctrl-A>']),
('rl-end-of-line', ['<Ctrl-E>']),
('rl-unix-line-discard', ['<Ctrl-U>']),
('rl-kill-line', ['<Ctrl-K>']),
('rl-kill-word', ['<Alt-D>']),
('rl-unix-word-rubout', ['<Ctrl-W>']),
('rl-backward-kill-word', ['<Alt-Backspace>']),
('rl-yank', ['<Ctrl-Y>']),
('rl-delete-char', ['<Ctrl-?>']),
('rl-backward-delete-char', ['<Ctrl-H>']),
])),
('caret', collections.OrderedDict([
('toggle-selection', ['v', '<Space>']),
('drop-selection', ['<Ctrl-Space>']),
('enter-mode normal', ['c']),
('move-to-next-line', ['j']),
('move-to-prev-line', ['k']),
('move-to-next-char', ['l']),
('move-to-prev-char', ['h']),
('move-to-end-of-word', ['e']),
('move-to-next-word', ['w']),
('move-to-prev-word', ['b']),
('move-to-start-of-next-block', [']']),
('move-to-start-of-prev-block', ['[']),
('move-to-end-of-next-block', ['}']),
('move-to-end-of-prev-block', ['{']),
('move-to-start-of-line', ['0']),
('move-to-end-of-line', ['$']),
('move-to-start-of-document', ['gg']),
('move-to-end-of-document', ['G']),
('yank selection -s', ['Y']),
('yank selection', ['y'] + RETURN_KEYS),
('scroll left', ['H']),
('scroll down', ['J']),
('scroll up', ['K']),
('scroll right', ['L']),
])),
])
# A list of (regex, replacement) tuples of changed key commands.
CHANGED_KEY_COMMANDS = [
(re.compile(r'^open -([twb]) about:blank$'), r'open -\1'),
(re.compile(r'^download-page$'), r'download'),
(re.compile(r'^cancel-download$'), r'download-cancel'),
(re.compile(r"""^search (''|"")$"""),
r'clear-keychain ;; search ;; fullscreen --leave'),
(re.compile(r'^search$'),
r'clear-keychain ;; search ;; fullscreen --leave'),
(re.compile(r'^clear-keychain ;; search$'),
r'clear-keychain ;; search ;; fullscreen --leave'),
(re.compile(r"""^set-cmd-text ['"](.*) ['"]$"""), r'set-cmd-text -s \1'),
(re.compile(r"""^set-cmd-text ['"](.*)['"]$"""), r'set-cmd-text \1'),
(re.compile(r"^hint links rapid$"), r'hint --rapid links tab-bg'),
(re.compile(r"^hint links rapid-win$"), r'hint --rapid links window'),
(re.compile(r'^scroll -50 0$'), r'scroll left'),
(re.compile(r'^scroll 0 50$'), r'scroll down'),
(re.compile(r'^scroll 0 -50$'), r'scroll up'),
(re.compile(r'^scroll 50 0$'), r'scroll right'),
(re.compile(r'^scroll ([-\d]+ [-\d]+)$'), r'scroll-px \1'),
(re.compile(r'^search *;; *clear-keychain$'),
r'clear-keychain ;; search ;; fullscreen --leave'),
(re.compile(r'^clear-keychain *;; *leave-mode$'), r'leave-mode'),
(re.compile(r'^download-remove --all$'), r'download-clear'),
(re.compile(r'^hint links fill "([^"]*)"$'), r'hint links fill \1'),
(re.compile(r'^yank -t(\S+)'), r'yank title -\1'),
(re.compile(r'^yank -t'), r'yank title'),
(re.compile(r'^yank -d(\S+)'), r'yank domain -\1'),
(re.compile(r'^yank -d'), r'yank domain'),
(re.compile(r'^yank -p(\S+)'), r'yank pretty-url -\1'),
(re.compile(r'^yank -p'), r'yank pretty-url'),
(re.compile(r'^yank-selected -p'), r'yank selection -s'),
(re.compile(r'^yank-selected'), r'yank selection'),
(re.compile(r'^paste$'), r'open -- {clipboard}'),
(re.compile(r'^paste -s$'), r'open -- {primary}'),
(re.compile(r'^paste -([twb])$'), r'open -\1 -- {clipboard}'),
(re.compile(r'^paste -([twb])s$'), r'open -\1 -- {primary}'),
(re.compile(r'^paste -s([twb])$'), r'open -\1 -- {primary}'),
(re.compile(r'^completion-item-next'), r'completion-item-focus next'),
(re.compile(r'^completion-item-prev'), r'completion-item-focus prev'),
(re.compile(r'^open {clipboard}$'), r'open -- {clipboard}'),
(re.compile(r'^open -([twb]) {clipboard}$'), r'open -\1 -- {clipboard}'),
(re.compile(r'^open {primary}$'), r'open -- {primary}'),
(re.compile(r'^open -([twb]) {primary}$'), r'open -\1 -- {primary}'),
(re.compile(r'^paste-primary$'), r'insert-text {primary}'),
(re.compile(r'^set-cmd-text -s :search$'), r'set-cmd-text /'),
(re.compile(r'^set-cmd-text -s :search -r$'), r'set-cmd-text ?'),
(re.compile(r'^set-cmd-text -s :$'), r'set-cmd-text :'),
(re.compile(r'^set-cmd-text -s :set keybind$'), r'set-cmd-text -s :bind'),
(re.compile(r'^prompt-yes$'), r'prompt-accept yes'),
(re.compile(r'^prompt-no$'), r'prompt-accept no'),
(re.compile(r'^tab-close -l$'), r'tab-close --prev'),
(re.compile(r'^tab-close --left$'), r'tab-close --prev'),
(re.compile(r'^tab-close -r$'), r'tab-close --next'),
(re.compile(r'^tab-close --right$'), r'tab-close --next'),
(re.compile(r'^tab-only -l$'), r'tab-only --prev'),
(re.compile(r'^tab-only --left$'), r'tab-only --prev'),
(re.compile(r'^tab-only -r$'), r'tab-only --next'),
(re.compile(r'^tab-only --right$'), r'tab-only --next'),
]
| gpl-3.0 | 5,243,266,520,370,665,000 | 40.059427 | 79 | 0.52219 | false |
chandler14362/panda3d | direct/src/p3d/packp3d.py | 10 | 8913 | #! /usr/bin/env python
usageText = """
This command will pack a Panda application, consisting of a directory
tree of .py files and models, into a p3d file for convenient
distribution. The resulting p3d file can be run by the Panda3D
runtime executable, or by the Panda3D web browser plugin.
This command will build p3d files that reference Panda3D %s,
from host %s .
Also see ppackage, a more powerful (but more complex) tool that can
also be used to build p3d applications, using a pdef description file.
Usage:
%s [opts] -o app.p3d
Options:
-o app.p3d
Specify the name of the p3d file to generate. This is required.
-d application_root
Specify the root directory of the application source; this is a
directory tree that contains all of your .py files and models.
If this is omitted, the default is the current directory.
-m main.py
Names the Python file that begins the application. This should
be a file within the root directory. If this is omitted, the
default is a file named "main.py", or if there is only one Python
file present, it is used. If this file contains a function
called main(), that function will be called after importing it
(this is preferable to having the module start itself immediately
upon importing).
-S file.crt[,chain.crt[,file.key[,\"password\"]]]
Signs the resulting p3d with the indicated certificate. You may
specify the signing certificate, the optional authorization
chain, and the private key in three different files, or they may
all be combined in the first file. If the private key is
encrypted, the password will be required to decrypt it.
-e ext
Adds a new extension to be processed as a generic, compressible
file type. Do not include the leading dot. Files matching this
extension found within the root directory will be automatically
added to the p3d file, in compressed form. This option may be
repeated as necessary.
-n ext
Adds a new extension to be processed as a noncompressible file
type. Files matching this extension will be added to the p3d
file, in their original, uncompressed form. You should use this
instead of -e for files that are uncompressible by their nature
(e.g. mpg files). This option may be repeated as necessary.
-x ext
Marks files with the given extensions of needing to be physically
extracted to disk before they can be loaded. This is used for
file types that cannot be loaded via the virtual file system,
such as .ico files on Windows.
This option is currently only implemented when deploying the
application with pdeploy.
-p python_lib_dir
Adds a directory to search for additional Python modules. You
can use this to add your system's Python path, to allow packp3d
to find any system modules not included in the standard Panda3D
release, but your version of Python must match this one (%s).
This option may be repeated to add multiple directories.
-c config=value
Sets the indicated config flag in the application. This option
may be repeated as necessary.
-r package[,version[,hostURL]]
Names an additional package that this application requires at
startup time. The default package is 'panda3d'; you may repeat
this option to indicate dependencies on additional packages.
-s search_dir
Additional directories to search for previously-built packages.
This option may be repeated as necessary. These directories may
also be specified with the pdef-path Config.prc variable.
-D
Sets the allow_python_dev flag in the application. This enables
additional runtime debug operations, particularly the -i option
to the panda3d command, which enables a live Python prompt within
the application's environment. Setting this flag may be useful
to develop an application initially, but should not be set on an
application intended for deployment.
"""
import sys
import os
import getopt
import glob
from direct.p3d import Packager
from panda3d.core import *
# Temp hack for debugging.
#from direct.p3d.AppRunner import dummyAppRunner; dummyAppRunner()
class ArgumentError(Exception):
pass
def makePackedApp(args):
opts, args = getopt.getopt(args, 'o:d:m:S:e:n:x:p:c:r:s:Dh')
packager = Packager.Packager()
appFilename = None
root = Filename('.')
main = None
configFlags = []
requires = []
allowPythonDev = False
for option, value in opts:
if option == '-o':
appFilename = Filename.fromOsSpecific(value)
elif option == '-d':
root = Filename.fromOsSpecific(value)
elif option == '-m':
main = value
elif option == '-S':
tokens = value.split(',')
while len(tokens) < 4:
tokens.append('')
certificate, chain, pkey, password = tokens[:4]
packager.signParams.append((Filename.fromOsSpecific(certificate),
Filename.fromOsSpecific(chain),
Filename.fromOsSpecific(pkey),
Filename.fromOsSpecific(password)))
elif option == '-e':
packager.binaryExtensions.append(value)
elif option == '-n':
packager.uncompressibleExtensions.append(value)
elif option == '-x':
packager.extractExtensions.append(value)
elif option == '-p':
sys.path.append(value)
elif option == '-c':
configFlags.append(value.split('=', 1))
elif option == '-r':
tokens = value.split(',')
while len(tokens) < 3:
tokens.append('')
name, version, host = tokens[:3]
requires.append((name, version, host))
elif option == '-s':
packager.installSearch.append(Filename.fromOsSpecific(value))
elif option == '-D':
allowPythonDev = True
elif option == '-h':
print(usageText % (
PandaSystem.getPackageVersionString(),
PandaSystem.getPackageHostUrl(),
os.path.split(sys.argv[0])[1],
'%s.%s' % (sys.version_info[0], sys.version_info[1])))
sys.exit(0)
if not appFilename:
raise ArgumentError("No target app specified. Use:\n %s -o app.p3d\nUse -h to get more usage information." % (os.path.split(sys.argv[0])[1]))
if args:
raise ArgumentError("Extra arguments on command line.")
if appFilename.getExtension() != 'p3d':
raise ArgumentError('Application filename must end in ".p3d".')
appDir = Filename(appFilename.getDirname())
if not appDir:
appDir = Filename('.')
appBase = appFilename.getBasenameWoExtension()
if main:
main = Filename.fromOsSpecific(main)
main.makeAbsolute(root)
else:
main = Filename(root, 'main.py')
if not main.exists():
main = glob.glob(os.path.join(root.toOsSpecific(), '*.py'))
if len(main) == 0:
raise ArgumentError('No Python files in root directory.')
elif len(main) > 1:
raise ArgumentError('Multiple Python files in root directory; specify the main application with -m "main".')
main = Filename.fromOsSpecific(os.path.split(main[0])[1])
main.makeAbsolute(root)
packager.installDir = appDir
packager.allowPythonDev = allowPythonDev
# Put the root directory on the front of the model-path, so that
# any texture references in egg or bam files that reference
# textures from the top of the root directory will be properly
# resolved.
getModelPath().prependDirectory(root)
try:
packager.setup()
packager.beginPackage(appBase, p3dApplication = True)
# Pre-require panda3d, to give a less-confusing error message
# if one of our requirements pulls in a wrong version of
# panda3d.
if 'panda3d' not in [t[0] for t in requires]:
packager.do_require('panda3d')
for name, version, host in requires:
packager.do_require(name, version = version, host = host)
if configFlags:
packager.do_config(**dict(configFlags))
packager.do_dir(root)
packager.do_main(main)
packager.endPackage()
packager.close()
except Packager.PackagerError:
# Just print the error message and exit gracefully.
inst = sys.exc_info()[1]
print(inst.args[0])
sys.exit(1)
try:
makePackedApp(sys.argv[1:])
except ArgumentError as e:
print(e.args[0])
sys.exit(1)
# An explicit call to exit() is required to exit the program, when
# this module is packaged in a p3d file.
sys.exit(0)
| bsd-3-clause | 1,656,250,894,107,034,400 | 35.983402 | 151 | 0.653203 | false |
ocefpaf/wicken | wicken/exceptions.py | 2 | 1507 | #!/usr/bin/env python
'''
COPYRIGHT 2013 RPS ASA
This file is part of Wicken.
Wicken is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Wicken is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Wicken. If not, see <http://www.gnu.org/licenses/>.
@author David Stuebe <[email protected]>
@file exceptions.py
@date 06/05/13
@description exception classes for the wicken project
'''
from __future__ import absolute_import, print_function, division
class WickenException(Exception):
"""
Base class for all exceptions in Wicken
"""
pass
class DogmaGetterSetterException(WickenException):
"""
Exception class for errors during get or set of a dogmatic belief (a property)
"""
pass
class DogmaDeleteException(WickenException):
"""
Exception class for errors while deleting of a dogmatic belief (a property)
"""
pass
class DogmaMetaClassException(WickenException):
"""
Exception class for errors while creating the dogma class for a particular set of beliefs
"""
pass
| apache-2.0 | -1,584,580,250,176,985,300 | 27.433962 | 93 | 0.726609 | false |
ElementsProject/elements | test/functional/feature_block.py | 1 | 67342 | #!/usr/bin/env python3
# Copyright (c) 2015-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test block processing."""
import copy
import struct
import time
from test_framework.blocktools import (
create_block,
create_coinbase,
create_tx_with_script,
get_legacy_sigopcount_block,
MAX_BLOCK_SIGOPS,
)
from test_framework.key import ECKey
from test_framework.messages import (
CBlock,
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
MAX_BLOCK_BASE_SIZE,
uint256_from_str,
)
from test_framework.p2p import P2PDataStore
from test_framework.script import (
CScript,
MAX_SCRIPT_ELEMENT_SIZE,
OP_2DUP,
OP_CHECKMULTISIG,
OP_CHECKMULTISIGVERIFY,
OP_CHECKSIG,
OP_CHECKSIGVERIFY,
OP_ELSE,
OP_ENDIF,
OP_EQUAL,
OP_DROP,
OP_FALSE,
OP_HASH160,
OP_IF,
OP_INVALIDOPCODE,
OP_RETURN,
OP_TRUE,
SIGHASH_ALL,
LegacySignatureHash,
hash160,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
from data import invalid_txs
# Use this class for tests that require behavior other than normal p2p behavior.
# For now, it is used to serialize a bloated varint (b64).
class CBrokenBlock(CBlock):
def initialize(self, base_block):
self.vtx = copy.deepcopy(base_block.vtx)
self.hashMerkleRoot = self.calc_merkle_root()
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
r += struct.pack("<BQ", 255, len(self.vtx))
for tx in self.vtx:
if with_witness:
r += tx.serialize_with_witness()
else:
r += tx.serialize_without_witness()
return r
def normal_serialize(self):
return super().serialize()
DUPLICATE_COINBASE_SCRIPT_SIG = b'\x01\x78' # Valid for block at height 120
class FullBlockTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [['-acceptnonstdtxn=1']] # This is a consensus block test, we don't care about tx policy
def run_test(self):
node = self.nodes[0] # convenience reference to the node
self.bootstrap_p2p() # Add one p2p connection to the node
self.block_heights = {}
self.coinbase_key = ECKey()
self.coinbase_key.generate()
self.coinbase_pubkey = self.coinbase_key.get_pubkey().get_bytes()
self.tip = None
self.blocks = {}
self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16)
self.block_heights[self.genesis_hash] = 0
self.spendable_outputs = []
# Create a new block
b_dup_cb = self.next_block('dup_cb')
b_dup_cb.vtx[0].vin[0].scriptSig = DUPLICATE_COINBASE_SCRIPT_SIG
b_dup_cb.vtx[0].rehash()
duplicate_tx = b_dup_cb.vtx[0]
b_dup_cb = self.update_block('dup_cb', [])
self.send_blocks([b_dup_cb])
b0 = self.next_block(0)
self.save_spendable_output()
self.send_blocks([b0])
# These constants chosen specifically to trigger an immature coinbase spend
# at a certain time below.
NUM_BUFFER_BLOCKS_TO_GENERATE = 99
NUM_OUTPUTS_TO_COLLECT = 33
# Allow the block to mature
blocks = []
for i in range(NUM_BUFFER_BLOCKS_TO_GENERATE):
blocks.append(self.next_block("maturitybuffer.{}".format(i)))
self.save_spendable_output()
self.send_blocks(blocks)
# collect spendable outputs now to avoid cluttering the code later on
out = []
for _ in range(NUM_OUTPUTS_TO_COLLECT):
out.append(self.get_spendable_output())
# Start by building a couple of blocks on top (which output is spent is
# in parentheses):
# genesis -> b1 (0) -> b2 (1)
b1 = self.next_block(1, spend=out[0])
self.save_spendable_output()
b2 = self.next_block(2, spend=out[1])
self.save_spendable_output()
self.send_blocks([b1, b2], timeout=4)
# Select a txn with an output eligible for spending. This won't actually be spent,
# since we're testing submission of a series of blocks with invalid txns.
attempt_spend_tx = out[2]
# Submit blocks for rejection, each of which contains a single transaction
# (aside from coinbase) which should be considered invalid.
for TxTemplate in invalid_txs.iter_all_templates():
template = TxTemplate(spend_tx=attempt_spend_tx)
if template.valid_in_block:
continue
self.log.info("Reject block with invalid tx: %s", TxTemplate.__name__)
blockname = "for_invalid.%s" % TxTemplate.__name__
badblock = self.next_block(blockname)
badtx = template.get_tx()
if TxTemplate != invalid_txs.InputMissing:
self.sign_tx(badtx, attempt_spend_tx)
badtx.rehash()
badblock = self.update_block(blockname, [badtx])
self.send_blocks(
[badblock], success=False,
reject_reason=(template.block_reject_reason or template.reject_reason),
reconnect=True, timeout=2)
self.move_tip(2)
# Fork like this:
#
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1)
#
# Nothing should happen at this point. We saw b2 first so it takes priority.
self.log.info("Don't reorg to a chain of the same length")
self.move_tip(1)
b3 = self.next_block(3, spend=out[1])
txout_b3 = b3.vtx[1]
self.send_blocks([b3], False)
# Now we add another block to make the alternative chain longer.
#
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1) -> b4 (2)
self.log.info("Reorg to a longer chain")
b4 = self.next_block(4, spend=out[2])
self.send_blocks([b4])
# ... and back to the first chain.
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b3 (1) -> b4 (2)
self.move_tip(2)
b5 = self.next_block(5, spend=out[2])
self.save_spendable_output()
self.send_blocks([b5], False)
self.log.info("Reorg back to the original chain")
b6 = self.next_block(6, spend=out[3])
self.send_blocks([b6], True)
# Try to create a fork that double-spends
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b7 (2) -> b8 (4)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a chain with a double spend, even if it is longer")
self.move_tip(5)
b7 = self.next_block(7, spend=out[2])
self.send_blocks([b7], False)
b8 = self.next_block(8, spend=out[4])
self.send_blocks([b8], False, reconnect=True)
# Try to create a block that has too much fee
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b9 (4)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block where the miner creates too much coinbase reward")
self.move_tip(6)
b9 = self.next_block(9, spend=out[4], additional_coinbase_value=1)
self.send_blocks([b9], success=False, reject_reason='bad-cb-amount', reconnect=True)
# Create a fork that ends in a block with too much fee (the one that causes the reorg)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b10 (3) -> b11 (4)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a chain where the miner creates too much coinbase reward, even if the chain is longer")
self.move_tip(5)
b10 = self.next_block(10, spend=out[3])
self.send_blocks([b10], False)
b11 = self.next_block(11, spend=out[4], additional_coinbase_value=1)
self.send_blocks([b11], success=False, reject_reason='bad-cb-amount', reconnect=True)
# Try again, but with a valid fork first
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b14 (5)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a chain where the miner creates too much coinbase reward, even if the chain is longer (on a forked chain)")
self.move_tip(5)
b12 = self.next_block(12, spend=out[3])
self.save_spendable_output()
b13 = self.next_block(13, spend=out[4])
self.save_spendable_output()
b14 = self.next_block(14, spend=out[5], additional_coinbase_value=1)
self.send_blocks([b12, b13, b14], success=False, reject_reason='bad-cb-amount', reconnect=True)
# New tip should be b13.
assert_equal(node.getbestblockhash(), b13.hash)
# Add a block with MAX_BLOCK_SIGOPS and one with one more sigop
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b16 (6)
# \-> b3 (1) -> b4 (2)
self.log.info("Accept a block with lots of checksigs")
lots_of_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS - 1))
self.move_tip(13)
b15 = self.next_block(15, spend=out[5], script=lots_of_checksigs)
self.save_spendable_output()
self.send_blocks([b15], True)
self.log.info("Reject a block with too many checksigs")
too_many_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS))
b16 = self.next_block(16, spend=out[6], script=too_many_checksigs)
self.send_blocks([b16], success=False, reject_reason='bad-blk-sigops', reconnect=True)
# Attempt to spend a transaction created on a different fork
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b17 (b3.vtx[1])
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block with a spend from a re-org'ed out tx")
self.move_tip(15)
b17 = self.next_block(17, spend=txout_b3)
self.send_blocks([b17], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True)
# Attempt to spend a transaction created on a different fork (on a fork this time)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5)
# \-> b18 (b3.vtx[1]) -> b19 (6)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block with a spend from a re-org'ed out tx (on a forked chain)")
self.move_tip(13)
b18 = self.next_block(18, spend=txout_b3)
self.send_blocks([b18], False)
b19 = self.next_block(19, spend=out[6])
self.send_blocks([b19], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True)
# Attempt to spend a coinbase at depth too low
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b20 (7)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block spending an immature coinbase.")
self.move_tip(15)
b20 = self.next_block(20, spend=out[7])
self.send_blocks([b20], success=False, reject_reason='bad-txns-premature-spend-of-coinbase', reconnect=True)
# Attempt to spend a coinbase at depth too low (on a fork this time)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5)
# \-> b21 (6) -> b22 (5)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block spending an immature coinbase (on a forked chain)")
self.move_tip(13)
b21 = self.next_block(21, spend=out[6])
self.send_blocks([b21], False)
b22 = self.next_block(22, spend=out[5])
self.send_blocks([b22], success=False, reject_reason='bad-txns-premature-spend-of-coinbase', reconnect=True)
# Create a block on either side of MAX_BLOCK_BASE_SIZE and make sure its accepted/rejected
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6)
# \-> b24 (6) -> b25 (7)
# \-> b3 (1) -> b4 (2)
self.log.info("Accept a block of size MAX_BLOCK_BASE_SIZE")
self.move_tip(15)
b23 = self.next_block(23, spend=out[6])
tx = CTransaction()
#script_length = MAX_BLOCK_BASE_SIZE - len(b23.serialize()) - 69
script_length = MAX_BLOCK_BASE_SIZE - len(b23.serialize()) - 149
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 0)))
tx.vout.append(CTxOut(b23.vtx[1].vout[0].nValue.getAmount() - 0)) # fee
b23 = self.update_block(23, [tx])
# Make sure the math above worked out to produce a max-sized block
assert_equal(len(b23.serialize()), MAX_BLOCK_BASE_SIZE)
self.send_blocks([b23], True)
self.save_spendable_output()
self.log.info("Reject a block of size MAX_BLOCK_BASE_SIZE + 1")
self.move_tip(15)
b24 = self.next_block(24, spend=out[6])
#script_length = MAX_BLOCK_BASE_SIZE - len(b24.serialize()) - 69
script_length = MAX_BLOCK_BASE_SIZE - len(b24.serialize()) - 149
script_output = CScript([b'\x00' * (script_length + 1)])
tx.vout = [CTxOut(0, script_output)]
tx.vout.append(CTxOut(b23.vtx[1].vout[0].nValue.getAmount() - 0)) # fee
b24 = self.update_block(24, [tx])
assert_equal(len(b24.serialize()), MAX_BLOCK_BASE_SIZE + 1)
self.send_blocks([b24], success=False, reject_reason='bad-blk-length', reconnect=True)
b25 = self.next_block(25, spend=out[7])
self.send_blocks([b25], False)
# Create blocks with a coinbase input script size out of range
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7)
# \-> ... (6) -> ... (7)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block with coinbase input script size out of range")
self.move_tip(15)
b26 = self.next_block(26, spend=out[6])
b26.vtx[0].vin[0].scriptSig = b'\x00'
b26.vtx[0].rehash()
# update_block causes the merkle root to get updated, even with no new
# transactions, and updates the required state.
b26 = self.update_block(26, [])
self.send_blocks([b26], success=False, reject_reason='bad-cb-length', reconnect=True)
# Extend the b26 chain to make sure bitcoind isn't accepting b26
b27 = self.next_block(27, spend=out[7])
self.send_blocks([b27], False)
# Now try a too-large-coinbase script
self.move_tip(15)
b28 = self.next_block(28, spend=out[6])
b28.vtx[0].vin[0].scriptSig = b'\x00' * 101
b28.vtx[0].rehash()
b28 = self.update_block(28, [])
self.send_blocks([b28], success=False, reject_reason='bad-cb-length', reconnect=True)
# Extend the b28 chain to make sure bitcoind isn't accepting b28
b29 = self.next_block(29, spend=out[7])
self.send_blocks([b29], False)
# b30 has a max-sized coinbase scriptSig.
self.move_tip(23)
b30 = self.next_block(30)
b30.vtx[0].vin[0].scriptSig = b'\x00' * 100
b30.vtx[0].rehash()
b30 = self.update_block(30, [])
self.send_blocks([b30], True)
self.save_spendable_output()
# b31 - b35 - check sigops of OP_CHECKMULTISIG / OP_CHECKMULTISIGVERIFY / OP_CHECKSIGVERIFY
#
# genesis -> ... -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10)
# \-> b36 (11)
# \-> b34 (10)
# \-> b32 (9)
#
# MULTISIG: each op code counts as 20 sigops. To create the edge case, pack another 19 sigops at the end.
self.log.info("Accept a block with the max number of OP_CHECKMULTISIG sigops")
lots_of_multisigs = CScript([OP_CHECKMULTISIG] * ((MAX_BLOCK_SIGOPS - 1) // 20) + [OP_CHECKSIG] * 19)
b31 = self.next_block(31, spend=out[8], script=lots_of_multisigs)
assert_equal(get_legacy_sigopcount_block(b31), MAX_BLOCK_SIGOPS)
self.send_blocks([b31], True)
self.save_spendable_output()
# this goes over the limit because the coinbase has one sigop
self.log.info("Reject a block with too many OP_CHECKMULTISIG sigops")
too_many_multisigs = CScript([OP_CHECKMULTISIG] * (MAX_BLOCK_SIGOPS // 20))
b32 = self.next_block(32, spend=out[9], script=too_many_multisigs)
assert_equal(get_legacy_sigopcount_block(b32), MAX_BLOCK_SIGOPS + 1)
self.send_blocks([b32], success=False, reject_reason='bad-blk-sigops', reconnect=True)
# CHECKMULTISIGVERIFY
self.log.info("Accept a block with the max number of OP_CHECKMULTISIGVERIFY sigops")
self.move_tip(31)
lots_of_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * ((MAX_BLOCK_SIGOPS - 1) // 20) + [OP_CHECKSIG] * 19)
b33 = self.next_block(33, spend=out[9], script=lots_of_multisigs)
self.send_blocks([b33], True)
self.save_spendable_output()
self.log.info("Reject a block with too many OP_CHECKMULTISIGVERIFY sigops")
too_many_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * (MAX_BLOCK_SIGOPS // 20))
b34 = self.next_block(34, spend=out[10], script=too_many_multisigs)
self.send_blocks([b34], success=False, reject_reason='bad-blk-sigops', reconnect=True)
# CHECKSIGVERIFY
self.log.info("Accept a block with the max number of OP_CHECKSIGVERIFY sigops")
self.move_tip(33)
lots_of_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS - 1))
b35 = self.next_block(35, spend=out[10], script=lots_of_checksigs)
self.send_blocks([b35], True)
self.save_spendable_output()
self.log.info("Reject a block with too many OP_CHECKSIGVERIFY sigops")
too_many_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS))
b36 = self.next_block(36, spend=out[11], script=too_many_checksigs)
self.send_blocks([b36], success=False, reject_reason='bad-blk-sigops', reconnect=True)
# Check spending of a transaction in a block which failed to connect
#
# b6 (3)
# b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10)
# \-> b37 (11)
# \-> b38 (11/37)
#
# save 37's spendable output, but then double-spend out11 to invalidate the block
self.log.info("Reject a block spending transaction from a block which failed to connect")
self.move_tip(35)
b37 = self.next_block(37, spend=out[11])
txout_b37 = b37.vtx[1]
tx = self.create_and_sign_transaction(out[11], 0)
b37 = self.update_block(37, [tx])
self.send_blocks([b37], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True)
# attempt to spend b37's first non-coinbase tx, at which point b37 was still considered valid
self.move_tip(35)
b38 = self.next_block(38, spend=txout_b37)
self.send_blocks([b38], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True)
# Check P2SH SigOp counting
#
#
# 13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b41 (12)
# \-> b40 (12)
#
# b39 - create some P2SH outputs that will require 6 sigops to spend:
#
# redeem_script = COINBASE_PUBKEY, (OP_2DUP+OP_CHECKSIGVERIFY) * 5, OP_CHECKSIG
# p2sh_script = OP_HASH160, ripemd160(sha256(script)), OP_EQUAL
#
self.log.info("Check P2SH SIGOPS are correctly counted")
self.move_tip(35)
b39 = self.next_block(39)
b39_outputs = 0
# ELEMENTS: increase the number of sigops per output to not hit max block size first
b39_sigops_per_output = 11
# Build the redeem script, hash it, use hash to create the p2sh script
redeem_script = CScript([self.coinbase_pubkey] + [OP_2DUP, OP_CHECKSIGVERIFY] * (b39_sigops_per_output-1) + [OP_CHECKSIG])
redeem_script_hash = hash160(redeem_script)
p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL])
# Create a transaction that spends one satoshi to the p2sh_script, the rest to OP_TRUE
# This must be signed because it is spending a coinbase
spend = out[11]
tx = self.create_tx(spend, 0, 1, p2sh_script)
tx.vout.append(CTxOut(spend.vout[0].nValue.getAmount() - 1, CScript([OP_TRUE])))
self.update_fee(tx, spend, 0)
self.sign_tx(tx, spend)
tx.rehash()
b39 = self.update_block(39, [tx])
b39_outputs += 1
# Until block is full, add tx's with 1 satoshi to p2sh_script, the rest to OP_TRUE
tx_new = None
tx_last = tx
total_size = len(b39.serialize())
while(total_size < MAX_BLOCK_BASE_SIZE):
tx_new = self.create_tx(tx_last, 1, 1, p2sh_script)
tx_new.vout.append(CTxOut(tx_last.vout[1].nValue.getAmount() - 1, CScript([OP_TRUE])))
self.update_fee(tx_new, tx_last, 1)
tx_new.rehash()
total_size += len(tx_new.serialize())
if total_size >= MAX_BLOCK_BASE_SIZE:
break
b39.vtx.append(tx_new) # add tx to block
tx_last = tx_new
b39_outputs += 1
# The accounting in the loop above can be off, because it misses the
# compact size encoding of the number of transactions in the block.
# Make sure we didn't accidentally make too big a block. Note that the
# size of the block has non-determinism due to the ECDSA signature in
# the first transaction.
while (len(b39.serialize()) >= MAX_BLOCK_BASE_SIZE):
del b39.vtx[-1]
b39 = self.update_block(39, [])
self.send_blocks([b39], True)
self.save_spendable_output()
# Test sigops in P2SH redeem scripts
#
# b40 creates 3333 tx's spending the 6-sigop P2SH outputs from b39 for a total of 19998 sigops.
# The first tx has one sigop and then at the end we add 2 more to put us just over the max.
#
# b41 does the same, less one, so it has the maximum sigops permitted.
#
self.log.info("Reject a block with too many P2SH sigops")
self.move_tip(39)
b40 = self.next_block(40, spend=out[12])
sigops = get_legacy_sigopcount_block(b40)
numTxes = (MAX_BLOCK_SIGOPS - sigops) // b39_sigops_per_output
assert_equal(numTxes <= b39_outputs, True)
lastOutpoint = COutPoint(b40.vtx[1].sha256, 0)
new_txs = []
for i in range(1, numTxes + 1):
tx = CTransaction()
tx.vout.append(CTxOut(1, CScript([OP_TRUE])))
tx.vin.append(CTxIn(lastOutpoint, b''))
# second input is corresponding P2SH output from b39
tx.vin.append(CTxIn(COutPoint(b39.vtx[i].sha256, 0), b''))
tx.vout.append(CTxOut(b39.vtx[i].vout[0].nValue.getAmount())) # fee
# Note: must pass the redeem_script (not p2sh_script) to the signature hash function
(sighash, err) = LegacySignatureHash(redeem_script, tx, 1, SIGHASH_ALL)
sig = self.coinbase_key.sign_ecdsa(sighash) + bytes(bytearray([SIGHASH_ALL]))
scriptSig = CScript([sig, redeem_script])
tx.vin[1].scriptSig = scriptSig
tx.rehash()
new_txs.append(tx)
lastOutpoint = COutPoint(tx.sha256, 0)
b40_sigops_to_fill = MAX_BLOCK_SIGOPS - (numTxes * b39_sigops_per_output + sigops) + 1
tx = CTransaction()
tx.vin.append(CTxIn(lastOutpoint, b''))
tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b40_sigops_to_fill)))
tx.rehash()
new_txs.append(tx)
self.update_block(40, new_txs)
self.send_blocks([b40], success=False, reject_reason='bad-blk-sigops', reconnect=True)
# same as b40, but one less sigop
self.log.info("Accept a block with the max number of P2SH sigops")
self.move_tip(39)
b41 = self.next_block(41, spend=None)
self.update_block(41, b40.vtx[1:-1])
b41_sigops_to_fill = b40_sigops_to_fill - 1
tx = CTransaction()
tx.vin.append(CTxIn(lastOutpoint, b''))
tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b41_sigops_to_fill)))
tx.rehash()
self.update_block(41, [tx])
self.send_blocks([b41], True)
# Fork off of b39 to create a constant base again
#
# b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13)
# \-> b41 (12)
#
self.move_tip(39)
b42 = self.next_block(42, spend=out[12])
self.save_spendable_output()
b43 = self.next_block(43, spend=out[13])
self.save_spendable_output()
self.send_blocks([b42, b43], True)
# Test a number of really invalid scenarios
#
# -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b44 (14)
# \-> ??? (15)
# The next few blocks are going to be created "by hand" since they'll do funky things, such as having
# the first transaction be non-coinbase, etc. The purpose of b44 is to make sure this works.
self.log.info("Build block 44 manually")
height = self.block_heights[self.tip.sha256] + 1
coinbase = create_coinbase(height, self.coinbase_pubkey)
b44 = CBlock()
b44.nTime = self.tip.nTime + 1
b44.hashPrevBlock = self.tip.sha256
b44.vtx.append(coinbase)
b44.block_height = height
b44.hashMerkleRoot = b44.calc_merkle_root()
b44.solve()
self.tip = b44
self.block_heights[b44.sha256] = height
self.blocks[44] = b44
self.send_blocks([b44], True)
self.log.info("Reject a block with a non-coinbase as the first tx")
non_coinbase = self.create_tx(out[15], 0, 1)
b45 = CBlock()
b45.nTime = self.tip.nTime + 1
b45.hashPrevBlock = self.tip.sha256
b45.vtx.append(non_coinbase)
b45.block_height = height+1
b45.hashMerkleRoot = b45.calc_merkle_root()
b45.calc_sha256()
b45.solve()
self.block_heights[b45.sha256] = self.block_heights[self.tip.sha256] + 1
self.tip = b45
self.blocks[45] = b45
self.send_blocks([b45], success=False, reject_reason='bad-cb-missing', reconnect=True)
self.log.info("Reject a block with no transactions")
self.move_tip(44)
b46 = CBlock()
b46.nTime = b44.nTime + 1
b46.hashPrevBlock = b44.sha256
b46.vtx = []
b46.block_height = height+1
b46.hashMerkleRoot = 0
b46.solve()
self.block_heights[b46.sha256] = self.block_heights[b44.sha256] + 1
self.tip = b46
assert 46 not in self.blocks
self.blocks[46] = b46
self.send_blocks([b46], success=False, reject_reason='bad-blk-length', reconnect=True)
# No testing PoW
#self.log.info("Reject a block with invalid work")
#self.move_tip(44)
#b47 = self.next_block(47)
#target = uint256_from_compact(b47.nBits)
#while b47.sha256 <= target:
# # Rehash nonces until an invalid too-high-hash block is found.
# b47.nNonce += 1
# b47.rehash()
#self.send_blocks([b47], False, force_send=True, reject_reason='high-hash', reconnect=True)
self.log.info("Reject a block with a timestamp >2 hours in the future")
self.move_tip(44)
b48 = self.next_block(48)
b48.nTime = int(time.time()) + 60 * 60 * 3
# Header timestamp has changed. Re-solve the block.
b48.solve()
self.send_blocks([b48], False, force_send=True, reject_reason='time-too-new')
self.log.info("Reject a block with invalid merkle hash")
self.move_tip(44)
b49 = self.next_block(49)
b49.hashMerkleRoot += 1
b49.solve()
self.send_blocks([b49], success=False, reject_reason='bad-txnmrklroot', reconnect=True)
# No testing PoW
#self.log.info("Reject a block with incorrect POW limit")
#self.move_tip(44)
#b50 = self.next_block(50)
#b50.nBits = b50.nBits - 1
#b50.solve()
#self.send_blocks([b50], False, force_send=True, reject_reason='bad-diffbits', reconnect=True)
self.log.info("Reject a block with two coinbase transactions")
self.move_tip(44)
b51 = self.next_block(51)
cb2 = create_coinbase(51, self.coinbase_pubkey)
b51 = self.update_block(51, [cb2])
self.send_blocks([b51], success=False, reject_reason='bad-cb-multiple', reconnect=True)
self.log.info("Reject a block with duplicate transactions")
# Note: txns have to be in the right position in the merkle tree to trigger this error
self.move_tip(44)
b52 = self.next_block(52, spend=out[15])
tx = self.create_tx(b52.vtx[1], 0, 1)
b52 = self.update_block(52, [tx, tx])
self.send_blocks([b52], success=False, reject_reason='bad-txns-duplicate', reconnect=True)
# Test block timestamps
# -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15)
# \-> b54 (15)
#
self.move_tip(43)
b53 = self.next_block(53, spend=out[14])
self.send_blocks([b53], False)
self.save_spendable_output()
self.log.info("Reject a block with timestamp before MedianTimePast")
b54 = self.next_block(54, spend=out[15])
b54.nTime = b35.nTime - 1
b54.solve()
self.send_blocks([b54], False, force_send=True, reject_reason='time-too-old', reconnect=True)
# valid timestamp
self.move_tip(53)
b55 = self.next_block(55, spend=out[15])
b55.nTime = b35.nTime
self.update_block(55, [])
self.send_blocks([b55], True)
self.save_spendable_output()
# Test Merkle tree malleability
#
# -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57p2 (16)
# \-> b57 (16)
# \-> b56p2 (16)
# \-> b56 (16)
#
# Merkle tree malleability (CVE-2012-2459): repeating sequences of transactions in a block without
# affecting the merkle root of a block, while still invalidating it.
# See: src/consensus/merkle.h
#
# b57 has three txns: coinbase, tx, tx1. The merkle root computation will duplicate tx.
# Result: OK
#
# b56 copies b57 but duplicates tx1 and does not recalculate the block hash. So it has a valid merkle
# root but duplicate transactions.
# Result: Fails
#
# b57p2 has six transactions in its merkle tree:
# - coinbase, tx, tx1, tx2, tx3, tx4
# Merkle root calculation will duplicate as necessary.
# Result: OK.
#
# b56p2 copies b57p2 but adds both tx3 and tx4. The purpose of the test is to make sure the code catches
# duplicate txns that are not next to one another with the "bad-txns-duplicate" error (which indicates
# that the error was caught early, avoiding a DOS vulnerability.)
# b57 - a good block with 2 txs, don't submit until end
self.move_tip(55)
b57 = self.next_block(57)
tx = self.create_and_sign_transaction(out[16], 1)
tx1 = self.create_tx(tx, 0, 1)
b57 = self.update_block(57, [tx, tx1])
# b56 - copy b57, add a duplicate tx
self.log.info("Reject a block with a duplicate transaction in the Merkle Tree (but with a valid Merkle Root)")
self.move_tip(55)
b56 = copy.deepcopy(b57)
self.blocks[56] = b56
assert_equal(len(b56.vtx), 3)
b56 = self.update_block(56, [tx1])
assert_equal(b56.hash, b57.hash)
self.send_blocks([b56], success=False, reject_reason='bad-txns-duplicate', reconnect=True)
# b57p2 - a good block with 6 tx'es, don't submit until end
self.move_tip(55)
b57p2 = self.next_block("57p2")
tx = self.create_and_sign_transaction(out[16], 1)
tx1 = self.create_tx(tx, 0, 1)
tx2 = self.create_tx(tx1, 0, 1)
tx3 = self.create_tx(tx2, 0, 1)
tx4 = self.create_tx(tx3, 0, 1)
b57p2 = self.update_block("57p2", [tx, tx1, tx2, tx3, tx4])
# b56p2 - copy b57p2, duplicate two non-consecutive tx's
self.log.info("Reject a block with two duplicate transactions in the Merkle Tree (but with a valid Merkle Root)")
self.move_tip(55)
b56p2 = copy.deepcopy(b57p2)
self.blocks["b56p2"] = b56p2
assert_equal(b56p2.hash, b57p2.hash)
assert_equal(len(b56p2.vtx), 6)
b56p2 = self.update_block("b56p2", [tx3, tx4])
self.send_blocks([b56p2], success=False, reject_reason='bad-txns-duplicate', reconnect=True)
self.move_tip("57p2")
self.send_blocks([b57p2], True)
self.move_tip(57)
self.send_blocks([b57], False) # The tip is not updated because 57p2 seen first
self.save_spendable_output()
# Test a few invalid tx types
#
# -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 ()
# \-> ??? (17)
#
# tx with prevout.n out of range
self.log.info("Reject a block with a transaction with prevout.n out of range")
self.move_tip(57)
b58 = self.next_block(58, spend=out[17])
tx = CTransaction()
assert len(out[17].vout) < 42
tx.vin.append(CTxIn(COutPoint(out[17].sha256, 42), CScript([OP_TRUE]), 0xffffffff))
tx.vout.append(CTxOut(0, b""))
tx.calc_sha256()
b58 = self.update_block(58, [tx])
self.send_blocks([b58], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True)
# tx with output value > input value
self.log.info("Reject a block with a transaction with outputs > inputs")
self.move_tip(57)
b59 = self.next_block(59)
tx = self.create_and_sign_transaction(out[17], 51 * COIN)
b59 = self.update_block(59, [tx])
self.send_blocks([b59], success=False, reject_reason='block-validation-failed', reconnect=True)
# reset to good chain
self.move_tip(57)
b60 = self.next_block(60)
self.send_blocks([b60], True)
self.save_spendable_output()
# Test BIP30 (reject duplicate)
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 ()
# \-> b61 ()
#
# Blocks are not allowed to contain a transaction whose id matches that of an earlier,
# not-fully-spent transaction in the same chain. To test, make identical coinbases;
# the second one should be rejected. See also CVE-2012-1909.
#
self.log.info("Reject a block with a transaction with a duplicate hash of a previous transaction (BIP30)")
self.move_tip(60)
b61 = self.next_block(61)
b61.vtx[0].vin[0].scriptSig = DUPLICATE_COINBASE_SCRIPT_SIG
b61.vtx[0].rehash()
b61 = self.update_block(61, [])
assert_equal(duplicate_tx.serialize(), b61.vtx[0].serialize())
self.send_blocks([b61], success=False, reject_reason='bad-txns-BIP30', reconnect=True)
# Test BIP30 (allow duplicate if spent)
#
# -> b57 (16) -> b60 ()
# \-> b_spend_dup_cb (b_dup_cb) -> b_dup_2 ()
#
self.move_tip(57)
b_spend_dup_cb = self.next_block('spend_dup_cb')
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(duplicate_tx.sha256, 0)))
tx.vout.append(CTxOut(duplicate_tx.vout[0].nValue, CScript([OP_TRUE])))
self.sign_tx(tx, duplicate_tx)
tx.rehash()
b_spend_dup_cb = self.update_block('spend_dup_cb', [tx])
b_dup_2 = self.next_block('dup_2')
b_dup_2.vtx[0].vin[0].scriptSig = DUPLICATE_COINBASE_SCRIPT_SIG
b_dup_2.vtx[0].rehash()
b_dup_2 = self.update_block('dup_2', [])
assert_equal(duplicate_tx.serialize(), b_dup_2.vtx[0].serialize())
assert_equal(self.nodes[0].gettxout(txid=duplicate_tx.hash, n=0)['confirmations'], 119)
self.send_blocks([b_spend_dup_cb, b_dup_2], success=True)
# The duplicate has less confirmations
assert_equal(self.nodes[0].gettxout(txid=duplicate_tx.hash, n=0)['confirmations'], 1)
# Test tx.isFinal is properly rejected (not an exhaustive tx.isFinal test, that should be in data-driven transaction tests)
#
# -> b_spend_dup_cb (b_dup_cb) -> b_dup_2 ()
# \-> b62 (18)
#
self.log.info("Reject a block with a transaction with a nonfinal locktime")
self.move_tip('dup_2')
b62 = self.next_block(62)
tx = CTransaction()
tx.nLockTime = 0xffffffff # this locktime is non-final
tx.vin.append(CTxIn(COutPoint(out[18].sha256, 0))) # don't set nSequence
tx.vout.append(CTxOut(0, CScript([OP_TRUE])))
assert tx.vin[0].nSequence < 0xffffffff
tx.calc_sha256()
b62 = self.update_block(62, [tx])
self.send_blocks([b62], success=False, reject_reason='bad-txns-nonfinal', reconnect=True)
# Test a non-final coinbase is also rejected
#
# -> b_spend_dup_cb (b_dup_cb) -> b_dup_2 ()
# \-> b63 (-)
#
self.log.info("Reject a block with a coinbase transaction with a nonfinal locktime")
self.move_tip('dup_2')
b63 = self.next_block(63)
b63.vtx[0].nLockTime = 0xffffffff
b63.vtx[0].vin[0].nSequence = 0xDEADBEEF
b63.vtx[0].rehash()
b63 = self.update_block(63, [])
self.send_blocks([b63], success=False, reject_reason='bad-txns-nonfinal', reconnect=True)
# This checks that a block with a bloated VARINT between the block_header and the array of tx such that
# the block is > MAX_BLOCK_BASE_SIZE with the bloated varint, but <= MAX_BLOCK_BASE_SIZE without the bloated varint,
# does not cause a subsequent, identical block with canonical encoding to be rejected. The test does not
# care whether the bloated block is accepted or rejected; it only cares that the second block is accepted.
#
# What matters is that the receiving node should not reject the bloated block, and then reject the canonical
# block on the basis that it's the same as an already-rejected block (which would be a consensus failure.)
#
# -> b_spend_dup_cb (b_dup_cb) -> b_dup_2 () -> b64 (18)
# \
# b64a (18)
# b64a is a bloated block (non-canonical varint)
# b64 is a good block (same as b64 but w/ canonical varint)
#
self.log.info("Accept a valid block even if a bloated version of the block has previously been sent")
self.move_tip('dup_2')
regular_block = self.next_block("64a", spend=out[18])
# make it a "broken_block," with non-canonical serialization
b64a = CBrokenBlock(regular_block)
b64a.initialize(regular_block)
self.blocks["64a"] = b64a
self.tip = b64a
tx = CTransaction()
# use canonical serialization to calculate size
#script_length = MAX_BLOCK_BASE_SIZE - len(b64a.normal_serialize()) - 69
script_length = MAX_BLOCK_BASE_SIZE - len(b64a.normal_serialize()) - 149
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b64a.vtx[1].sha256, 0)))
tx.vout.append(CTxOut(b64a.vtx[1].vout[0].nValue.getAmount() - 0)) # fee
b64a = self.update_block("64a", [tx])
assert_equal(len(b64a.serialize()), MAX_BLOCK_BASE_SIZE + 8)
self.send_blocks([b64a], success=False, reject_reason='non-canonical ReadCompactSize()')
# bitcoind doesn't disconnect us for sending a bloated block, but if we subsequently
# resend the header message, it won't send us the getdata message again. Just
# disconnect and reconnect and then call sync_blocks.
# TODO: improve this test to be less dependent on P2P DOS behaviour.
node.disconnect_p2ps()
self.reconnect_p2p()
self.move_tip('dup_2')
b64 = CBlock(b64a)
b64.vtx = copy.deepcopy(b64a.vtx)
assert_equal(b64.hash, b64a.hash)
assert_equal(len(b64.serialize()), MAX_BLOCK_BASE_SIZE)
self.blocks[64] = b64
b64 = self.update_block(64, [])
self.send_blocks([b64], True)
self.save_spendable_output()
# Spend an output created in the block itself
#
# -> b_dup_2 () -> b64 (18) -> b65 (19)
#
self.log.info("Accept a block with a transaction spending an output created in the same block")
self.move_tip(64)
b65 = self.next_block(65)
tx1 = self.create_and_sign_transaction(out[19], out[19].vout[0].nValue.getAmount())
# ELEMENTS: doesn't support 0 value for non-OP_RETURN
tx2 = self.create_and_sign_transaction(tx1, 1)
b65 = self.update_block(65, [tx1, tx2])
self.send_blocks([b65], True)
self.save_spendable_output()
# Attempt to spend an output created later in the same block
#
# -> b64 (18) -> b65 (19)
# \-> b66 (20)
self.log.info("Reject a block with a transaction spending an output created later in the same block")
self.move_tip(65)
b66 = self.next_block(66)
tx1 = self.create_and_sign_transaction(out[20], out[20].vout[0].nValue.getAmount())
tx2 = self.create_and_sign_transaction(tx1, 1)
b66 = self.update_block(66, [tx2, tx1])
self.send_blocks([b66], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True)
# Attempt to double-spend a transaction created in a block
#
# -> b64 (18) -> b65 (19)
# \-> b67 (20)
#
#
self.log.info("Reject a block with a transaction double spending a transaction created in the same block")
self.move_tip(65)
b67 = self.next_block(67)
tx1 = self.create_and_sign_transaction(out[20], out[20].vout[0].nValue.getAmount())
tx2 = self.create_and_sign_transaction(tx1, 1)
tx3 = self.create_and_sign_transaction(tx1, 2)
b67 = self.update_block(67, [tx1, tx2, tx3])
self.send_blocks([b67], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True)
# More tests of block subsidy
#
# -> b64 (18) -> b65 (19) -> b69 (20)
# \-> b68 (20)
#
# b68 - coinbase with an extra 10 satoshis,
# creates a tx that has 9 satoshis from out[20] go to fees
# this fails because the coinbase is trying to claim 1 satoshi too much in fees
#
# b69 - coinbase with extra 10 satoshis, and a tx that gives a 10 satoshi fee
# this succeeds
#
self.log.info("Reject a block trying to claim too much subsidy in the coinbase transaction")
self.move_tip(65)
b68 = self.next_block(68, additional_coinbase_value=10)
tx = self.create_and_sign_transaction(out[20], out[20].vout[0].nValue.getAmount() - 9)
b68 = self.update_block(68, [tx])
self.send_blocks([b68], success=False, reject_reason='bad-cb-amount', reconnect=True)
self.log.info("Accept a block claiming the correct subsidy in the coinbase transaction")
self.move_tip(65)
b69 = self.next_block(69, additional_coinbase_value=10)
tx = self.create_and_sign_transaction(out[20], out[20].vout[0].nValue.getAmount() - 10)
self.update_block(69, [tx])
self.send_blocks([b69], True)
self.save_spendable_output()
# Test spending the outpoint of a non-existent transaction
#
# -> b65 (19) -> b69 (20)
# \-> b70 (21)
#
self.log.info("Reject a block containing a transaction spending from a non-existent input")
self.move_tip(69)
b70 = self.next_block(70, spend=out[21])
bogus_tx = CTransaction()
bogus_tx.sha256 = uint256_from_str(b"23c70ed7c0506e9178fc1a987f40a33946d4ad4c962b5ae3a52546da53af0c5c")
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(bogus_tx.sha256, 0), b"", 0xffffffff))
tx.vout.append(CTxOut(1, b""))
b70 = self.update_block(70, [tx])
self.send_blocks([b70], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True)
# Test accepting an invalid block which has the same hash as a valid one (via merkle tree tricks)
#
# -> b65 (19) -> b69 (20) -> b72 (21)
# \-> b71 (21)
#
# b72 is a good block.
# b71 is a copy of 72, but re-adds one of its transactions. However, it has the same hash as b72.
self.log.info("Reject a block containing a duplicate transaction but with the same Merkle root (Merkle tree malleability")
self.move_tip(69)
b72 = self.next_block(72)
tx1 = self.create_and_sign_transaction(out[21], 2)
tx2 = self.create_and_sign_transaction(tx1, 1)
b72 = self.update_block(72, [tx1, tx2]) # now tip is 72
b71 = copy.deepcopy(b72)
b71.vtx.append(tx2) # add duplicate tx2
self.block_heights[b71.sha256] = self.block_heights[b69.sha256] + 1 # b71 builds off b69
self.blocks[71] = b71
assert_equal(len(b71.vtx), 4)
assert_equal(len(b72.vtx), 3)
assert_equal(b72.sha256, b71.sha256)
self.move_tip(71)
self.send_blocks([b71], success=False, reject_reason='bad-txns-duplicate', reconnect=True)
self.move_tip(72)
self.send_blocks([b72], True)
self.save_spendable_output()
# Test some invalid scripts and MAX_BLOCK_SIGOPS
#
# -> b69 (20) -> b72 (21)
# \-> b** (22)
#
# b73 - tx with excessive sigops that are placed after an excessively large script element.
# The purpose of the test is to make sure those sigops are counted.
#
# script is a bytearray of size 20,526
#
# bytearray[0-19,998] : OP_CHECKSIG
# bytearray[19,999] : OP_PUSHDATA4
# bytearray[20,000-20,003]: 521 (max_script_element_size+1, in little-endian format)
# bytearray[20,004-20,525]: unread data (script_element)
# bytearray[20,526] : OP_CHECKSIG (this puts us over the limit)
self.log.info("Reject a block containing too many sigops after a large script element")
self.move_tip(72)
b73 = self.next_block(73)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 + 1
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS - 1] = int("4e", 16) # OP_PUSHDATA4
element_size = MAX_SCRIPT_ELEMENT_SIZE + 1
a[MAX_BLOCK_SIGOPS] = element_size % 256
a[MAX_BLOCK_SIGOPS + 1] = element_size // 256
a[MAX_BLOCK_SIGOPS + 2] = 0
a[MAX_BLOCK_SIGOPS + 3] = 0
tx = self.create_and_sign_transaction(out[22], 1, CScript(a))
b73 = self.update_block(73, [tx])
assert_equal(get_legacy_sigopcount_block(b73), MAX_BLOCK_SIGOPS + 1)
self.send_blocks([b73], success=False, reject_reason='bad-blk-sigops', reconnect=True)
# b74/75 - if we push an invalid script element, all previous sigops are counted,
# but sigops after the element are not counted.
#
# The invalid script element is that the push_data indicates that
# there will be a large amount of data (0xffffff bytes), but we only
# provide a much smaller number. These bytes are CHECKSIGS so they would
# cause b75 to fail for excessive sigops, if those bytes were counted.
#
# b74 fails because we put MAX_BLOCK_SIGOPS+1 before the element
# b75 succeeds because we put MAX_BLOCK_SIGOPS before the element
self.log.info("Check sigops are counted correctly after an invalid script element")
self.move_tip(72)
b74 = self.next_block(74)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42 # total = 20,561
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS] = 0x4e
a[MAX_BLOCK_SIGOPS + 1] = 0xfe
a[MAX_BLOCK_SIGOPS + 2] = 0xff
a[MAX_BLOCK_SIGOPS + 3] = 0xff
a[MAX_BLOCK_SIGOPS + 4] = 0xff
tx = self.create_and_sign_transaction(out[22], 1, CScript(a))
b74 = self.update_block(74, [tx])
self.send_blocks([b74], success=False, reject_reason='bad-blk-sigops', reconnect=True)
self.move_tip(72)
b75 = self.next_block(75)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS - 1] = 0x4e
a[MAX_BLOCK_SIGOPS] = 0xff
a[MAX_BLOCK_SIGOPS + 1] = 0xff
a[MAX_BLOCK_SIGOPS + 2] = 0xff
a[MAX_BLOCK_SIGOPS + 3] = 0xff
tx = self.create_and_sign_transaction(out[22], 1, CScript(a))
b75 = self.update_block(75, [tx])
self.send_blocks([b75], True)
self.save_spendable_output()
# Check that if we push an element filled with CHECKSIGs, they are not counted
self.move_tip(75)
b76 = self.next_block(76)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS - 1] = 0x4e # PUSHDATA4, but leave the following bytes as just checksigs
tx = self.create_and_sign_transaction(out[23], 1, CScript(a))
b76 = self.update_block(76, [tx])
self.send_blocks([b76], True)
self.save_spendable_output()
# Test transaction resurrection
#
# -> b77 (24) -> b78 (25) -> b79 (26)
# \-> b80 (25) -> b81 (26) -> b82 (27)
#
# b78 creates a tx, which is spent in b79. After b82, both should be in mempool
#
# The tx'es must be unsigned and pass the node's mempool policy. It is unsigned for the
# rather obscure reason that the Python signature code does not distinguish between
# Low-S and High-S values (whereas the bitcoin code has custom code which does so);
# as a result of which, the odds are 50% that the python code will use the right
# value and the transaction will be accepted into the mempool. Until we modify the
# test framework to support low-S signing, we are out of luck.
#
# To get around this issue, we construct transactions which are not signed and which
# spend to OP_TRUE. If the standard-ness rules change, this test would need to be
# updated. (Perhaps to spend to a P2SH OP_TRUE script)
self.log.info("Test transaction resurrection during a re-org")
self.move_tip(76)
b77 = self.next_block(77)
tx77 = self.create_and_sign_transaction(out[24], 10 * COIN)
b77 = self.update_block(77, [tx77])
self.send_blocks([b77], True)
self.save_spendable_output()
b78 = self.next_block(78)
tx78 = self.create_tx(tx77, 0, 9 * COIN)
b78 = self.update_block(78, [tx78])
self.send_blocks([b78], True)
b79 = self.next_block(79)
tx79 = self.create_tx(tx78, 0, 8 * COIN)
b79 = self.update_block(79, [tx79])
self.send_blocks([b79], True)
# mempool should be empty
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.move_tip(77)
b80 = self.next_block(80, spend=out[25])
self.send_blocks([b80], False, force_send=True)
self.save_spendable_output()
b81 = self.next_block(81, spend=out[26])
self.send_blocks([b81], False, force_send=True) # other chain is same length
self.save_spendable_output()
b82 = self.next_block(82, spend=out[27])
self.send_blocks([b82], True) # now this chain is longer, triggers re-org
self.save_spendable_output()
# now check that tx78 and tx79 have been put back into the peer's mempool
mempool = self.nodes[0].getrawmempool()
assert_equal(len(mempool), 2)
assert tx78.hash in mempool
assert tx79.hash in mempool
# Test invalid opcodes in dead execution paths.
#
# -> b81 (26) -> b82 (27) -> b83 (28)
#
self.log.info("Accept a block with invalid opcodes in dead execution paths")
b83 = self.next_block(83)
op_codes = [OP_IF, OP_INVALIDOPCODE, OP_ELSE, OP_TRUE, OP_ENDIF]
script = CScript(op_codes)
tx1 = self.create_and_sign_transaction(out[28], out[28].vout[0].nValue.getAmount(), script)
# ELEMENTS: doesn't support 0 value for non-OP_RETURN
tx2 = self.create_and_sign_transaction(tx1, 1, CScript([OP_TRUE]))
tx2.vin[0].scriptSig = CScript([OP_FALSE])
tx2.rehash()
b83 = self.update_block(83, [tx1, tx2])
self.send_blocks([b83], True)
self.save_spendable_output()
# Reorg on/off blocks that have OP_RETURN in them (and try to spend them)
#
# -> b81 (26) -> b82 (27) -> b83 (28) -> b84 (29) -> b87 (30) -> b88 (31)
# \-> b85 (29) -> b86 (30) \-> b89a (32)
#
self.log.info("Test re-orging blocks with OP_RETURN in them")
b84 = self.next_block(84)
# ELEMENTS: doesn't support 0 value for non-OP_RETURN
tx1 = self.create_tx(out[29], 0, 1, CScript([OP_RETURN]))
tx1.vout.append(CTxOut(1, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(1, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(1, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(1, CScript([OP_TRUE])))
self.update_fee(tx1, out[29], 0)
tx1.calc_sha256()
self.sign_tx(tx1, out[29])
tx1.rehash()
tx2 = self.create_tx(tx1, 1, 0, CScript([OP_RETURN]))
tx2.vout.append(CTxOut(0, CScript([OP_RETURN])))
self.update_fee(tx2, tx1, 1)
tx3 = self.create_tx(tx1, 2, 0, CScript([OP_RETURN]))
# ELEMENTS: doesn't support 0 value for non-OP_RETURN
tx3.vout.append(CTxOut(1, CScript([OP_TRUE])))
self.update_fee(tx3, tx1, 2)
# ELEMENTS: doesn't support 0 value for non-OP_RETURN
tx4 = self.create_tx(tx1, 3, 1, CScript([OP_TRUE]))
tx4.vout.append(CTxOut(0, CScript([OP_RETURN])))
self.update_fee(tx4, tx1, 3)
tx5 = self.create_tx(tx1, 4, 0, CScript([OP_RETURN]))
b84 = self.update_block(84, [tx1, tx2, tx3, tx4, tx5])
self.send_blocks([b84], True)
self.save_spendable_output()
self.move_tip(83)
b85 = self.next_block(85, spend=out[29])
self.send_blocks([b85], False) # other chain is same length
b86 = self.next_block(86, spend=out[30])
self.send_blocks([b86], True)
self.move_tip(84)
b87 = self.next_block(87, spend=out[30])
self.send_blocks([b87], False) # other chain is same length
self.save_spendable_output()
b88 = self.next_block(88, spend=out[31])
self.send_blocks([b88], True)
self.save_spendable_output()
# trying to spend the OP_RETURN output is rejected
b89a = self.next_block("89a", spend=out[32])
# ELEMENTS: doesn't support 0 value for non-OP_RETURN
tx = self.create_tx(tx1, 0, 1, CScript([OP_TRUE]))
b89a = self.update_block("89a", [tx])
self.send_blocks([b89a], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True)
self.log.info("Test a re-org of one week's worth of blocks (1088 blocks)")
self.move_tip(88)
LARGE_REORG_SIZE = 1088
blocks = []
spend = out[32]
for i in range(89, LARGE_REORG_SIZE + 89):
b = self.next_block(i, spend)
tx = CTransaction()
#script_length = MAX_BLOCK_BASE_SIZE - len(b.serialize()) - 69
script_length = MAX_BLOCK_BASE_SIZE - len(b.serialize()) - 149
# ELEMENTS: doesn't support 0 value for non-OP_RETURN
script_output = CScript([OP_RETURN, b'\x00' * (script_length-1)])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b.vtx[1].sha256, 0)))
self.update_fee(tx, b.vtx[1], 0)
b = self.update_block(i, [tx])
assert_equal(len(b.serialize()), MAX_BLOCK_BASE_SIZE)
blocks.append(b)
self.save_spendable_output()
spend = self.get_spendable_output()
self.send_blocks(blocks, True, timeout=2440)
chain1_tip = i
# now create alt chain of same length
self.move_tip(88)
blocks2 = []
for i in range(89, LARGE_REORG_SIZE + 89):
blocks2.append(self.next_block("alt" + str(i)))
self.send_blocks(blocks2, False, force_send=True)
# extend alt chain to trigger re-org
block = self.next_block("alt" + str(chain1_tip + 1))
self.send_blocks([block], True, timeout=2440)
# ... and re-org back to the first chain
self.move_tip(chain1_tip)
block = self.next_block(chain1_tip + 1)
self.send_blocks([block], False, force_send=True)
block = self.next_block(chain1_tip + 2)
self.send_blocks([block], True, timeout=2440)
self.log.info("Reject a block with an invalid block header version")
b_v1 = self.next_block('b_v1', version=1)
self.send_blocks([b_v1], success=False, force_send=True, reject_reason='bad-version(0x00000001)', reconnect=True)
self.move_tip(chain1_tip + 2)
b_cb34 = self.next_block('b_cb34')
b_cb34.vtx[0].vin[0].scriptSig = b_cb34.vtx[0].vin[0].scriptSig[:-1]
b_cb34.vtx[0].rehash()
b_cb34.hashMerkleRoot = b_cb34.calc_merkle_root()
b_cb34.solve()
self.send_blocks([b_cb34], success=False, reject_reason='bad-cb-height', reconnect=True)
# Helper methods
################
def add_transactions_to_block(self, block, tx_list):
[tx.rehash() for tx in tx_list]
block.vtx.extend(tx_list)
# this is a little handier to use than the version in blocktools.py
def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])):
fee = spend_tx.vout[n].nValue.getAmount() - value
return create_tx_with_script(spend_tx, n, amount=value, fee=fee, script_pub_key=script)
# update the fee output amount and also move it to the end
def update_fee(self, tx, prev_tx, prev_n):
total_out = 0
for i in reversed(range(len(tx.vout))):
if tx.vout[i].is_fee():
del tx.vout[i]
else:
total_out += tx.vout[i].nValue.getAmount()
fee = prev_tx.vout[prev_n].nValue.getAmount() - total_out
if fee > 0:
tx.vout.append(CTxOut(fee))
tx.calc_sha256()
# sign a transaction, using the key we know about
# this signs input 0 in tx, which is assumed to be spending output n in spend_tx
def sign_tx(self, tx, spend_tx):
scriptPubKey = bytearray(spend_tx.vout[0].scriptPubKey)
if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend
tx.vin[0].scriptSig = CScript()
return
(sighash, err) = LegacySignatureHash(spend_tx.vout[0].scriptPubKey, tx, 0, SIGHASH_ALL)
tx.vin[0].scriptSig = CScript([self.coinbase_key.sign_ecdsa(sighash) + bytes(bytearray([SIGHASH_ALL]))])
def create_and_sign_transaction(self, spend_tx, value, script=CScript([OP_TRUE])):
tx = self.create_tx(spend_tx, 0, value, script)
self.sign_tx(tx, spend_tx)
tx.rehash()
return tx
def next_block(self, number, spend=None, additional_coinbase_value=0, script=CScript([OP_TRUE]), *, version=4):
if self.tip is None:
base_block_hash = self.genesis_hash
block_time = int(time.time()) + 1
else:
base_block_hash = self.tip.sha256
block_time = self.tip.nTime + 1
# First create the coinbase
height = self.block_heights[base_block_hash] + 1
coinbase = create_coinbase(height, self.coinbase_pubkey)
coinbase.vout[0].nValue.setToAmount(coinbase.vout[0].nValue.getAmount() + additional_coinbase_value)
coinbase.rehash()
if spend is None:
block = create_block(base_block_hash, coinbase, block_time, version=version)
else:
coinbase.vout[0].nValue.setToAmount(coinbase.vout[0].nValue.getAmount() + spend.vout[0].nValue.getAmount() - 1) # all but one satoshi to fees
coinbase.rehash()
block = create_block(base_block_hash, coinbase, block_time, version=version)
tx = self.create_tx(spend, 0, 1, script) # spend 1 satoshi
self.sign_tx(tx, spend)
self.add_transactions_to_block(block, [tx])
block.hashMerkleRoot = block.calc_merkle_root()
# Block is created. Find a valid nonce.
block.solve()
self.tip = block
self.block_heights[block.sha256] = height
assert number not in self.blocks
self.blocks[number] = block
return block
# save the current tip so it can be spent by a later block
def save_spendable_output(self):
self.log.debug("saving spendable output %s" % self.tip.vtx[0])
self.spendable_outputs.append(self.tip)
# get an output that we previously marked as spendable
def get_spendable_output(self):
self.log.debug("getting spendable output %s" % self.spendable_outputs[0].vtx[0])
return self.spendable_outputs.pop(0).vtx[0]
# move the tip back to a previous block
def move_tip(self, number):
self.tip = self.blocks[number]
# adds transactions to the block and updates state
def update_block(self, block_number, new_transactions):
block = self.blocks[block_number]
self.add_transactions_to_block(block, new_transactions)
old_sha256 = block.sha256
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
# Update the internal state just like in next_block
self.tip = block
if block.sha256 != old_sha256:
self.block_heights[block.sha256] = self.block_heights[old_sha256]
del self.block_heights[old_sha256]
self.blocks[block_number] = block
return block
def bootstrap_p2p(self, timeout=10):
"""Add a P2P connection to the node.
Helper to connect and wait for version handshake."""
self.helper_peer = self.nodes[0].add_p2p_connection(P2PDataStore())
# We need to wait for the initial getheaders from the peer before we
# start populating our blockstore. If we don't, then we may run ahead
# to the next subtest before we receive the getheaders. We'd then send
# an INV for the next block and receive two getheaders - one for the
# IBD and one for the INV. We'd respond to both and could get
# unexpectedly disconnected if the DoS score for that error is 50.
self.helper_peer.wait_for_getheaders(timeout=timeout)
def reconnect_p2p(self, timeout=60):
"""Tear down and bootstrap the P2P connection to the node.
The node gets disconnected several times in this test. This helper
method reconnects the p2p and restarts the network thread."""
self.nodes[0].disconnect_p2ps()
self.bootstrap_p2p(timeout=timeout)
def send_blocks(self, blocks, success=True, reject_reason=None, force_send=False, reconnect=False, timeout=960):
"""Sends blocks to test node. Syncs and verifies that tip has advanced to most recent block.
Call with success = False if the tip shouldn't advance to the most recent block."""
self.helper_peer.send_blocks_and_test(blocks, self.nodes[0], success=success, reject_reason=reject_reason, force_send=force_send, timeout=timeout, expect_disconnect=reconnect)
if reconnect:
self.reconnect_p2p(timeout=timeout)
if __name__ == '__main__':
FullBlockTest().main()
| mit | 7,900,168,660,383,633,000 | 45.314993 | 183 | 0.574693 | false |
bradleyayers/django-tokenfield | docs/conf.py | 1 | 7072 | # -*- coding: utf-8 -*-
#
# django-tokenfield documentation build configuration file, created by
# sphinx-quickstart on Tue Sep 13 13:21:28 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-tokenfield'
copyright = u'2011, Bradley Ayers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.0.dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-tokenfielddoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-tokenfield.tex', u'django-tokenfield Documentation',
u'Bradley Ayers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-tokenfield', u'django-tokenfield Documentation',
[u'Bradley Ayers'], 1)
]
| bsd-2-clause | 3,945,631,780,351,700,500 | 31.740741 | 80 | 0.710124 | false |
pathumego/nototools | nototools/tool_utils.py | 2 | 7212 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Some common utilities for tools to use."""
import contextlib
import datetime
import glob
import os
import os.path as path
import re
import shutil
import subprocess
import time
import zipfile
from nototools import notoconfig
@contextlib.contextmanager
def temp_chdir(path):
"""Usage: with temp_chdir(path):
do_something
"""
saved_dir = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(saved_dir)
noto_re = re.compile(r'\[(tools|fonts|emoji|cjk)\]/(.*)')
def resolve_path(path):
"""Resolve a path that might use noto path shorthand."""
if not path or path == '-':
return None
m = noto_re.match(path)
if m:
base, rest = m.groups()
key = 'noto_' + base
if not key in notoconfig.values:
raise ValueError('notoconfig has no entry for %s' % key)
base = notoconfig.values.get(key)
path = os.path.join(base, rest)
path = os.path.expanduser(path)
path = os.path.abspath(path)
path = os.path.realpath(path)
return path
def check_dir_exists(dirpath):
if not os.path.isdir(dirpath):
raise ValueError('%s does not exist or is not a directory' % dirpath)
def check_file_exists(filepath):
if not os.path.isfile(filepath):
raise ValueError('%s does not exist or is not a file' % filepath)
def ensure_dir_exists(path):
path = os.path.realpath(path)
if not os.path.isdir(path):
if os.path.exists(path):
raise ValueError('%s exists and is not a directory' % path)
print "making '%s'" % path
os.makedirs(path)
return path
def generate_zip_with_7za(root_dir, file_paths, archive_path):
"""file_paths is a list of files relative to root_dir, these will be the names
in the archive at archive_path."""
arg_list = ['7za', 'a', archive_path, '-tzip', '-mx=7', '-bd', '--']
arg_list.extend(file_paths)
with temp_chdir(root_dir):
# capture but discard output
subprocess.check_output(arg_list)
def generate_zip_with_7za_from_filepairs(pairs, archive_path):
"""Pairs are source/destination path pairs. The source will be put into the
zip with name destination. The destination must be a suffix of the source."""
staging_dir = '/tmp/stage_7za'
if os.path.exists(staging_dir):
shutil.rmtree(staging_dir)
os.makedirs(staging_dir)
pair_map = {}
for source, dest in pairs:
if not source.endswith(dest):
staging_source = os.path.join(staging_dir, dest)
shutil.copyfile(source, staging_source)
source_root = staging_dir
else:
source_root = source[:-len(dest)]
if source_root not in pair_map:
pair_map[source_root] = set()
pair_map[source_root].add(dest)
for source_root, dest_set in pair_map.iteritems():
generate_zip_with_7za(source_root, sorted(dest_set), archive_path)
def dos2unix(root_dir, glob_list):
"""Convert dos line endings to unix ones in place."""
with temp_chdir(root_dir):
for g in glob_list:
file_list = glob.glob(g)
if file_list:
subprocess.check_call(['dos2unix', '-k', '-q', '-o'] + file_list)
def zip_extract_with_timestamp(zippath, dstdir):
zip = zipfile.ZipFile(zippath)
with temp_chdir(dstdir):
for info in zip.infolist():
zip.extract(info.filename)
# of course, time zones mess this up, so don't expect precision
date_time = time.mktime(info.date_time + (0, 0, -1))
os.utime(info.filename, (date_time, date_time))
def git_mv(repo, old, new):
"""Rename old to new in repo"""
with temp_chdir(repo):
return subprocess.check_output(
['git', 'mv', old, new])
def git_file_lastlog(repo, filepath):
"""Return a string containing the short hash, date, author email, and title
of most recent commit of filepath, separated by tab."""
with temp_chdir(repo):
return subprocess.check_output(
['git', 'log', '-n', '1', '--format=%h\t%ad\t%ae\t%s', '--date=short',
'--', filepath])
def get_tool_generated(repo, subdir, commit_title_prefix='Updated by tool'):
"""
Return a list of the names of tool-generated files in the provided directory.
The idea is that when we check in files that are generated by a tool, the
commit will start with the given prefix. If a files' most recent log entry
matches this, it means that we've not applied patches or fixes to the file
since it was generated, so we can overwrite it with new tool-generated data.
The motivation for this is mantaining the sample texts. The original source
for most of these is UDHR data, but subsequently we have fixed things in
some of the samples. We generally do not want to blindly overwrite these
fixes, but do want to be able to regenerate the samples if we get new source
data.
"""
tool_generated_files = []
for f in os.listdir(path.join(repo, subdir)):
relpath = path.join(subdir, f)
commit, date, author, title = git_file_lastlog(repo, relpath).split('\t')
if title.startswith(commit_title_prefix):
tool_generated_files.append(f)
return tool_generated_files
def git_get_branch(repo):
with temp_chdir(repo):
return subprocess.check_output(['git', 'symbolic-ref', '--short', 'HEAD']).strip()
def git_is_clean(repo):
"""Ensure there are no unstaged or uncommitted changes in the repo."""
result = True
with temp_chdir(repo):
subprocess.check_call(['git', 'update-index', '-q', '--ignore-submodules', '--refresh'])
if subprocess.call(['git', 'diff-files', '--quiet', '--ignore-submodules', '--']):
print 'There are unstaged changes in the noto branch:'
subprocess.call(['git', 'diff-files', '--name-status', '-r', '--ignore-submodules', '--'])
result = False
if subprocess.call(
['git', 'diff-index', '--cached', '--quiet', 'HEAD', '--ignore-submodules', '--']):
print 'There are uncommitted changes in the noto branch:'
subprocess.call(
['git', 'diff-index', '--cached', '--name-status', '-r', 'HEAD', '--ignore-submodules', '--'])
result = False
return result
def git_add_all(repo_subdir):
"""Add all changed, deleted, and new files in subdir to the staging area."""
# git can now add everything, even removed files
with temp_chdir(repo_subdir):
subprocess.check_call(['git', 'add', '--', '.'])
def svn_get_version(repo):
with temp_chdir(repo):
version_string = subprocess.check_output(['svnversion', '-c']).strip()
colon_index = version_string.find(':')
if colon_index >= 0:
version_string = version_string[colon_index + 1:]
return version_string
def svn_update(repo):
with temp_chdir(repo):
subprocess.check_call(['svn', 'up'], stderr=subprocess.STDOUT)
| apache-2.0 | 684,536,509,520,578,000 | 32.082569 | 102 | 0.674432 | false |
skidzo/sympy | bin/generate_test_list.py | 37 | 1675 | """
Execute like this:
$ python bin/generate_test_list.py
tests = [
'sympy.concrete.tests',
'sympy.core.tests',
'sympy.functions.combinatorial.tests',
'sympy.functions.elementary.tests',
'sympy.functions.special.tests',
'sympy.geometry.tests',
'sympy.integrals.tests',
'sympy.matrices.tests',
'sympy.ntheory.tests',
'sympy.numerics.tests',
'sympy.parsing.tests',
'sympy.physics.tests',
'sympy.plotting.tests',
'sympy.polynomials.tests',
'sympy.printing.tests',
'sympy.series.tests',
'sympy.simplify.tests',
'sympy.solvers.tests',
'sympy.specfun.tests',
'sympy.test_external',
'sympy.utilities.tests',
]
"""
from __future__ import print_function
from glob import glob
def get_paths(level=15):
"""
Generates a set of paths for testfiles searching.
Examples
========
>>> get_paths(2)
['sympy/test_*.py', 'sympy/*/test_*.py', 'sympy/*/*/test_*.py']
>>> get_paths(6)
['sympy/test_*.py', 'sympy/*/test_*.py', 'sympy/*/*/test_*.py',
'sympy/*/*/*/test_*.py', 'sympy/*/*/*/*/test_*.py',
'sympy/*/*/*/*/*/test_*.py', 'sympy/*/*/*/*/*/*/test_*.py']
"""
wildcards = ["/"]
for i in range(level):
wildcards.append(wildcards[-1] + "*/")
p = ["sympy" + x + "test_*.py" for x in wildcards]
return p
def generate_test_list():
g = []
for x in get_paths():
g.extend(glob(x))
g = [".".join(x.split("/")[:-1]) for x in g]
g = list(set(g))
g.sort()
return g
if __name__ == '__main__':
g = generate_test_list()
print("tests = [")
for x in g:
print(" '%s'," % x)
print("]")
| bsd-3-clause | 4,903,072,874,808,780,000 | 22.591549 | 67 | 0.549254 | false |
rmnl/htg | htg/migrate/__init__.py | 1 | 1114 | import click
from ..classes import Config
from ..migrate.in2ex import in2ex as in2exfunc
from ..utils.aliased_group import AliasedGroup
@click.group(
'migrate',
cls=AliasedGroup,
help="Migration functions to help with upgrades",
epilog="Use \"htg utils migrate COMMAND --help\" for help with "
"subcommands.",
)
def migrate(*args, **kwargs):
pass
@click.command('in2ex', help="Move from include to exclude in meta_file.")
@click.option(
'--directory', '-d',
required=False,
metavar="DIR",
multiple=True,
help='The in2ex will default to the original_photos_dirs setting '
'in the config file if you do not provide root directories.',
type=click.Path(exists=True, file_okay=False, resolve_path=True),
)
@click.argument(
'config-file',
required=False,
type=click.Path(exists=True, dir_okay=False, resolve_path=True)
)
def in2ex(config_file=None, directory=[], include_all=False):
config = Config(config_file)
dirs = directory if directory else config.original_photos_dirs
in2exfunc(config, dirs)
migrate.add_command(in2ex)
| mit | -1,996,822,145,365,975,300 | 27.564103 | 74 | 0.693896 | false |
prakritish/ansible | lib/ansible/modules/cloud/openstack/os_stack.py | 16 | 9224 | #!/usr/bin/python
#coding: utf-8 -*-
# (c) 2016, Mathieu Bultel <[email protected]>
# (c) 2016, Steve Baker <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_stack
short_description: Add/Remove Heat Stack
extends_documentation_fragment: openstack
version_added: "2.2"
author: "Mathieu Bultel (matbu), Steve Baker (steveb)"
description:
- Add or Remove a Stack to an OpenStack Heat
options:
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
required: false
default: present
name:
description:
- Name of the stack that should be created, name could be char and digit, no space
required: true
template:
description:
- Path of the template file to use for the stack creation
required: false
default: None
environment:
description:
- List of environment files that should be used for the stack creation
required: false
default: None
parameters:
description:
- Dictionary of parameters for the stack creation
required: false
default: None
rollback:
description:
- Rollback stack creation
required: false
default: false
timeout:
description:
- Maximum number of seconds to wait for the stack creation
required: false
default: 3600
availability_zone:
description:
- Ignored. Present for backwards compatability
required: false
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
---
- name: create stack
ignore_errors: True
register: stack_create
os_stack:
name: "{{ stack_name }}"
state: present
template: "/path/to/my_stack.yaml"
environment:
- /path/to/resource-registry.yaml
- /path/to/environment.yaml
parameters:
bmc_flavor: m1.medium
bmc_image: CentOS
key_name: default
private_net: "{{ private_net_param }}"
node_count: 2
name: undercloud
image: CentOS
my_flavor: m1.large
external_net: "{{ external_net_param }}"
'''
RETURN = '''
id:
description: Stack ID.
type: string
sample: "97a3f543-8136-4570-920e-fd7605c989d6"
stack:
action:
description: Action, could be Create or Update.
type: string
sample: "CREATE"
creation_time:
description: Time when the action has been made.
type: string
sample: "2016-07-05T17:38:12Z"
description:
description: Description of the Stack provided in the heat template.
type: string
sample: "HOT template to create a new instance and networks"
id:
description: Stack ID.
type: string
sample: "97a3f543-8136-4570-920e-fd7605c989d6"
name:
description: Name of the Stack
type: string
sample: "test-stack"
identifier:
description: Identifier of the current Stack action.
type: string
sample: "test-stack/97a3f543-8136-4570-920e-fd7605c989d6"
links:
description: Links to the current Stack.
type: list of dict
sample: "[{'href': 'http://foo:8004/v1/7f6a/stacks/test-stack/97a3f543-8136-4570-920e-fd7605c989d6']"
outputs:
description: Output returned by the Stack.
type: list of dict
sample: "{'description': 'IP address of server1 in private network',
'output_key': 'server1_private_ip',
'output_value': '10.1.10.103'}"
parameters:
description: Parameters of the current Stack
type: dict
sample: "{'OS::project_id': '7f6a3a3e01164a4eb4eecb2ab7742101',
'OS::stack_id': '97a3f543-8136-4570-920e-fd7605c989d6',
'OS::stack_name': 'test-stack',
'stack_status': 'CREATE_COMPLETE',
'stack_status_reason': 'Stack CREATE completed successfully',
'status': 'COMPLETE',
'template_description': 'HOT template to create a new instance and networks',
'timeout_mins': 60,
'updated_time': null}"
'''
from time import sleep
from distutils.version import StrictVersion
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
def _create_stack(module, stack, cloud):
try:
stack = cloud.create_stack(module.params['name'],
template_file=module.params['template'],
environment_files=module.params['environment'],
timeout=module.params['timeout'],
wait=True,
rollback=module.params['rollback'],
**module.params['parameters'])
stack = cloud.get_stack(stack.id, None)
if stack.stack_status == 'CREATE_COMPLETE':
return stack
else:
return False
module.fail_json(msg = "Failure in creating stack: ".format(stack))
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
def _update_stack(module, stack, cloud):
try:
stack = cloud.update_stack(
module.params['name'],
template_file=module.params['template'],
environment_files=module.params['environment'],
timeout=module.params['timeout'],
rollback=module.params['rollback'],
wait=module.params['wait'],
**module.params['parameters'])
if stack['stack_status'] == 'UPDATE_COMPLETE':
return stack
else:
module.fail_json(msg = "Failure in updating stack: %s" %
stack['stack_status_reason'])
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
def _system_state_change(module, stack, cloud):
state = module.params['state']
if state == 'present':
if not stack:
return True
if state == 'absent' and stack:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
template=dict(default=None),
environment=dict(default=None, type='list'),
parameters=dict(default={}, type='dict'),
rollback=dict(default=False, type='bool'),
timeout=dict(default=3600, type='int'),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
# stack API introduced in 1.8.0
if not HAS_SHADE or (StrictVersion(shade.__version__) < StrictVersion('1.8.0')):
module.fail_json(msg='shade 1.8.0 or higher is required for this module')
state = module.params['state']
name = module.params['name']
# Check for required parameters when state == 'present'
if state == 'present':
for p in ['template']:
if not module.params[p]:
module.fail_json(msg='%s required with present state' % p)
try:
cloud = shade.openstack_cloud(**module.params)
stack = cloud.get_stack(name)
if module.check_mode:
module.exit_json(changed=_system_state_change(module, stack,
cloud))
if state == 'present':
if not stack:
stack = _create_stack(module, stack, cloud)
else:
stack = _update_stack(module, stack, cloud)
changed = True
module.exit_json(changed=changed,
stack=stack,
id=stack.id)
elif state == 'absent':
if not stack:
changed = False
else:
changed = True
if not cloud.delete_stack(name, wait=module.params['wait']):
module.fail_json(msg='delete stack failed for stack: %s' % name)
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 | -7,856,817,435,298,525,000 | 32.787546 | 109 | 0.588248 | false |
rven/odoo | addons/mail/models/mail_mail.py | 3 | 21571 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import ast
import base64
import datetime
import logging
import psycopg2
import smtplib
import threading
import re
from collections import defaultdict
from odoo import _, api, fields, models
from odoo import tools
from odoo.addons.base.models.ir_mail_server import MailDeliveryException
_logger = logging.getLogger(__name__)
class MailMail(models.Model):
""" Model holding RFC2822 email messages to send. This model also provides
facilities to queue and send new email messages. """
_name = 'mail.mail'
_description = 'Outgoing Mails'
_inherits = {'mail.message': 'mail_message_id'}
_order = 'id desc'
_rec_name = 'subject'
# content
mail_message_id = fields.Many2one('mail.message', 'Message', required=True, ondelete='cascade', index=True, auto_join=True)
body_html = fields.Text('Rich-text Contents', help="Rich-text/HTML message")
references = fields.Text('References', help='Message references, such as identifiers of previous messages', readonly=1)
headers = fields.Text('Headers', copy=False)
# Auto-detected based on create() - if 'mail_message_id' was passed then this mail is a notification
# and during unlink() we will not cascade delete the parent and its attachments
notification = fields.Boolean('Is Notification', help='Mail has been created to notify people of an existing mail.message')
# recipients: include inactive partners (they may have been archived after
# the message was sent, but they should remain visible in the relation)
email_to = fields.Text('To', help='Message recipients (emails)')
email_cc = fields.Char('Cc', help='Carbon copy message recipients')
recipient_ids = fields.Many2many('res.partner', string='To (Partners)',
context={'active_test': False})
# process
state = fields.Selection([
('outgoing', 'Outgoing'),
('sent', 'Sent'),
('received', 'Received'),
('exception', 'Delivery Failed'),
('cancel', 'Cancelled'),
], 'Status', readonly=True, copy=False, default='outgoing')
auto_delete = fields.Boolean(
'Auto Delete',
help="This option permanently removes any track of email after it's been sent, including from the Technical menu in the Settings, in order to preserve storage space of your Odoo database.")
failure_reason = fields.Text(
'Failure Reason', readonly=1,
help="Failure reason. This is usually the exception thrown by the email server, stored to ease the debugging of mailing issues.")
scheduled_date = fields.Char('Scheduled Send Date',
help="If set, the queue manager will send the email after the date. If not set, the email will be send as soon as possible.")
@api.model_create_multi
def create(self, values_list):
# notification field: if not set, set if mail comes from an existing mail.message
for values in values_list:
if 'notification' not in values and values.get('mail_message_id'):
values['notification'] = True
new_mails = super(MailMail, self).create(values_list)
new_mails_w_attach = self
for mail, values in zip(new_mails, values_list):
if values.get('attachment_ids'):
new_mails_w_attach += mail
if new_mails_w_attach:
new_mails_w_attach.mapped('attachment_ids').check(mode='read')
return new_mails
def write(self, vals):
res = super(MailMail, self).write(vals)
if vals.get('attachment_ids'):
for mail in self:
mail.attachment_ids.check(mode='read')
return res
def unlink(self):
# cascade-delete the parent message for all mails that are not created for a notification
mail_msg_cascade_ids = [mail.mail_message_id.id for mail in self if not mail.notification]
res = super(MailMail, self).unlink()
if mail_msg_cascade_ids:
self.env['mail.message'].browse(mail_msg_cascade_ids).unlink()
return res
@api.model
def default_get(self, fields):
# protection for `default_type` values leaking from menu action context (e.g. for invoices)
# To remove when automatic context propagation is removed in web client
if self._context.get('default_type') not in type(self).message_type.base_field.selection:
self = self.with_context(dict(self._context, default_type=None))
return super(MailMail, self).default_get(fields)
def mark_outgoing(self):
return self.write({'state': 'outgoing'})
def cancel(self):
return self.write({'state': 'cancel'})
@api.model
def process_email_queue(self, ids=None):
"""Send immediately queued messages, committing after each
message is sent - this is not transactional and should
not be called during another transaction!
:param list ids: optional list of emails ids to send. If passed
no search is performed, and these ids are used
instead.
:param dict context: if a 'filters' key is present in context,
this value will be used as an additional
filter to further restrict the outgoing
messages to send (by default all 'outgoing'
messages are sent).
"""
filters = ['&',
('state', '=', 'outgoing'),
'|',
('scheduled_date', '<', datetime.datetime.now()),
('scheduled_date', '=', False)]
if 'filters' in self._context:
filters.extend(self._context['filters'])
# TODO: make limit configurable
filtered_ids = self.search(filters, limit=10000).ids
if not ids:
ids = filtered_ids
else:
ids = list(set(filtered_ids) & set(ids))
ids.sort()
res = None
try:
# auto-commit except in testing mode
auto_commit = not getattr(threading.currentThread(), 'testing', False)
res = self.browse(ids).send(auto_commit=auto_commit)
except Exception:
_logger.exception("Failed processing mail queue")
return res
def _postprocess_sent_message(self, success_pids, failure_reason=False, failure_type=None):
"""Perform any post-processing necessary after sending ``mail``
successfully, including deleting it completely along with its
attachment if the ``auto_delete`` flag of the mail was set.
Overridden by subclasses for extra post-processing behaviors.
:return: True
"""
notif_mails_ids = [mail.id for mail in self if mail.notification]
if notif_mails_ids:
notifications = self.env['mail.notification'].search([
('notification_type', '=', 'email'),
('mail_id', 'in', notif_mails_ids),
('notification_status', 'not in', ('sent', 'canceled'))
])
if notifications:
# find all notification linked to a failure
failed = self.env['mail.notification']
if failure_type:
failed = notifications.filtered(lambda notif: notif.res_partner_id not in success_pids)
(notifications - failed).sudo().write({
'notification_status': 'sent',
'failure_type': '',
'failure_reason': '',
})
if failed:
failed.sudo().write({
'notification_status': 'exception',
'failure_type': failure_type,
'failure_reason': failure_reason,
})
messages = notifications.mapped('mail_message_id').filtered(lambda m: m.is_thread_message())
# TDE TODO: could be great to notify message-based, not notifications-based, to lessen number of notifs
messages._notify_message_notification_update() # notify user that we have a failure
if not failure_type or failure_type == 'RECIPIENT': # if we have another error, we want to keep the mail.
mail_to_delete_ids = [mail.id for mail in self if mail.auto_delete]
self.browse(mail_to_delete_ids).sudo().unlink()
return True
# ------------------------------------------------------
# mail_mail formatting, tools and send mechanism
# ------------------------------------------------------
def _send_prepare_body(self):
"""Return a specific ir_email body. The main purpose of this method
is to be inherited to add custom content depending on some module."""
self.ensure_one()
return self.body_html or ''
def _send_prepare_values(self, partner=None):
"""Return a dictionary for specific email values, depending on a
partner, or generic to the whole recipients given by mail.email_to.
:param Model partner: specific recipient partner
"""
self.ensure_one()
body = self._send_prepare_body()
body_alternative = tools.html2plaintext(body)
if partner:
email_to = [tools.formataddr((partner.name or 'False', partner.email or 'False'))]
else:
email_to = tools.email_split_and_format(self.email_to)
res = {
'body': body,
'body_alternative': body_alternative,
'email_to': email_to,
}
return res
def _split_by_server(self):
"""Returns an iterator of pairs `(mail_server_id, record_ids)` for current recordset.
The same `mail_server_id` may repeat in order to limit batch size according to
the `mail.session.batch.size` system parameter.
"""
groups = defaultdict(list)
# Turn prefetch OFF to avoid MemoryError on very large mail queues, we only care
# about the mail server ids in this case.
for mail in self.with_context(prefetch_fields=False):
groups[mail.mail_server_id.id].append(mail.id)
sys_params = self.env['ir.config_parameter'].sudo()
batch_size = int(sys_params.get_param('mail.session.batch.size', 1000))
for server_id, record_ids in groups.items():
for mail_batch in tools.split_every(batch_size, record_ids):
yield server_id, mail_batch
def send(self, auto_commit=False, raise_exception=False):
""" Sends the selected emails immediately, ignoring their current
state (mails that have already been sent should not be passed
unless they should actually be re-sent).
Emails successfully delivered are marked as 'sent', and those
that fail to be deliver are marked as 'exception', and the
corresponding error mail is output in the server logs.
:param bool auto_commit: whether to force a commit of the mail status
after sending each mail (meant only for scheduler processing);
should never be True during normal transactions (default: False)
:param bool raise_exception: whether to raise an exception if the
email sending process has failed
:return: True
"""
for server_id, batch_ids in self._split_by_server():
smtp_session = None
try:
smtp_session = self.env['ir.mail_server'].connect(mail_server_id=server_id)
except Exception as exc:
if raise_exception:
# To be consistent and backward compatible with mail_mail.send() raised
# exceptions, it is encapsulated into an Odoo MailDeliveryException
raise MailDeliveryException(_('Unable to connect to SMTP Server'), exc)
else:
batch = self.browse(batch_ids)
batch.write({'state': 'exception', 'failure_reason': exc})
batch._postprocess_sent_message(success_pids=[], failure_type="SMTP")
else:
self.browse(batch_ids)._send(
auto_commit=auto_commit,
raise_exception=raise_exception,
smtp_session=smtp_session)
_logger.info(
'Sent batch %s emails via mail server ID #%s',
len(batch_ids), server_id)
finally:
if smtp_session:
smtp_session.quit()
def _send(self, auto_commit=False, raise_exception=False, smtp_session=None):
IrMailServer = self.env['ir.mail_server']
IrAttachment = self.env['ir.attachment']
for mail_id in self.ids:
success_pids = []
failure_type = None
processing_pid = None
mail = None
try:
mail = self.browse(mail_id)
if mail.state != 'outgoing':
if mail.state != 'exception' and mail.auto_delete:
mail.sudo().unlink()
continue
# remove attachments if user send the link with the access_token
body = mail.body_html or ''
attachments = mail.attachment_ids
for link in re.findall(r'/web/(?:content|image)/([0-9]+)', body):
attachments = attachments - IrAttachment.browse(int(link))
# load attachment binary data with a separate read(), as prefetching all
# `datas` (binary field) could bloat the browse cache, triggerring
# soft/hard mem limits with temporary data.
attachments = [(a['name'], base64.b64decode(a['datas']), a['mimetype'])
for a in attachments.sudo().read(['name', 'datas', 'mimetype']) if a['datas'] is not False]
# specific behavior to customize the send email for notified partners
email_list = []
if mail.email_to:
email_list.append(mail._send_prepare_values())
for partner in mail.recipient_ids:
values = mail._send_prepare_values(partner=partner)
values['partner_id'] = partner
email_list.append(values)
# headers
headers = {}
ICP = self.env['ir.config_parameter'].sudo()
bounce_alias = ICP.get_param("mail.bounce.alias")
catchall_domain = ICP.get_param("mail.catchall.domain")
if bounce_alias and catchall_domain:
if mail.mail_message_id.is_thread_message():
headers['Return-Path'] = '%s+%d-%s-%d@%s' % (bounce_alias, mail.id, mail.model, mail.res_id, catchall_domain)
else:
headers['Return-Path'] = '%s+%d@%s' % (bounce_alias, mail.id, catchall_domain)
if mail.headers:
try:
headers.update(ast.literal_eval(mail.headers))
except Exception:
pass
# Writing on the mail object may fail (e.g. lock on user) which
# would trigger a rollback *after* actually sending the email.
# To avoid sending twice the same email, provoke the failure earlier
mail.write({
'state': 'exception',
'failure_reason': _('Error without exception. Probably due do sending an email without computed recipients.'),
})
# Update notification in a transient exception state to avoid concurrent
# update in case an email bounces while sending all emails related to current
# mail record.
notifs = self.env['mail.notification'].search([
('notification_type', '=', 'email'),
('mail_id', 'in', mail.ids),
('notification_status', 'not in', ('sent', 'canceled'))
])
if notifs:
notif_msg = _('Error without exception. Probably due do concurrent access update of notification records. Please see with an administrator.')
notifs.sudo().write({
'notification_status': 'exception',
'failure_type': 'UNKNOWN',
'failure_reason': notif_msg,
})
# `test_mail_bounce_during_send`, force immediate update to obtain the lock.
# see rev. 56596e5240ef920df14d99087451ce6f06ac6d36
notifs.flush(fnames=['notification_status', 'failure_type', 'failure_reason'], records=notifs)
# build an RFC2822 email.message.Message object and send it without queuing
res = None
for email in email_list:
msg = IrMailServer.build_email(
email_from=mail.email_from,
email_to=email.get('email_to'),
subject=mail.subject,
body=email.get('body'),
body_alternative=email.get('body_alternative'),
email_cc=tools.email_split(mail.email_cc),
reply_to=mail.reply_to,
attachments=attachments,
message_id=mail.message_id,
references=mail.references,
object_id=mail.res_id and ('%s-%s' % (mail.res_id, mail.model)),
subtype='html',
subtype_alternative='plain',
headers=headers)
processing_pid = email.pop("partner_id", None)
try:
res = IrMailServer.send_email(
msg, mail_server_id=mail.mail_server_id.id, smtp_session=smtp_session)
if processing_pid:
success_pids.append(processing_pid)
processing_pid = None
except AssertionError as error:
if str(error) == IrMailServer.NO_VALID_RECIPIENT:
failure_type = "RECIPIENT"
# No valid recipient found for this particular
# mail item -> ignore error to avoid blocking
# delivery to next recipients, if any. If this is
# the only recipient, the mail will show as failed.
_logger.info("Ignoring invalid recipients for mail.mail %s: %s",
mail.message_id, email.get('email_to'))
else:
raise
if res: # mail has been sent at least once, no major exception occured
mail.write({'state': 'sent', 'message_id': res, 'failure_reason': False})
_logger.info('Mail with ID %r and Message-Id %r successfully sent', mail.id, mail.message_id)
# /!\ can't use mail.state here, as mail.refresh() will cause an error
# see revid:[email protected] in 6.1
mail._postprocess_sent_message(success_pids=success_pids, failure_type=failure_type)
except MemoryError:
# prevent catching transient MemoryErrors, bubble up to notify user or abort cron job
# instead of marking the mail as failed
_logger.exception(
'MemoryError while processing mail with ID %r and Msg-Id %r. Consider raising the --limit-memory-hard startup option',
mail.id, mail.message_id)
# mail status will stay on ongoing since transaction will be rollback
raise
except (psycopg2.Error, smtplib.SMTPServerDisconnected):
# If an error with the database or SMTP session occurs, chances are that the cursor
# or SMTP session are unusable, causing further errors when trying to save the state.
_logger.exception(
'Exception while processing mail with ID %r and Msg-Id %r.',
mail.id, mail.message_id)
raise
except Exception as e:
failure_reason = tools.ustr(e)
_logger.exception('failed sending mail (id: %s) due to %s', mail.id, failure_reason)
mail.write({'state': 'exception', 'failure_reason': failure_reason})
mail._postprocess_sent_message(success_pids=success_pids, failure_reason=failure_reason, failure_type='UNKNOWN')
if raise_exception:
if isinstance(e, (AssertionError, UnicodeEncodeError)):
if isinstance(e, UnicodeEncodeError):
value = "Invalid text: %s" % e.object
else:
value = '. '.join(e.args)
raise MailDeliveryException(value)
raise
if auto_commit is True:
self._cr.commit()
return True
| agpl-3.0 | 2,610,351,839,287,534,000 | 49.755294 | 197 | 0.563581 | false |
blowmage/gcloud-python | gcloud/datastore/_testing.py | 1 | 1234 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared datastore testing utilities."""
from gcloud._testing import _Monkey
from gcloud.datastore import _implicit_environ
from gcloud.datastore._implicit_environ import _DefaultsContainer
def _monkey_defaults(*args, **kwargs):
mock_defaults = _DefaultsContainer(*args, **kwargs)
return _Monkey(_implicit_environ, _DEFAULTS=mock_defaults)
def _setup_defaults(test_case, *args, **kwargs):
test_case._replaced_defaults = _implicit_environ._DEFAULTS
_implicit_environ._DEFAULTS = _DefaultsContainer(*args, **kwargs)
def _tear_down_defaults(test_case):
_implicit_environ._DEFAULTS = test_case._replaced_defaults
| apache-2.0 | -1,639,224,862,537,679,400 | 36.393939 | 74 | 0.753647 | false |
MaineKuehn/workshop-collaborative_software | gksolite/plain.py | 1 | 1801 | class ListGol(object):
"""
Game of Life implemented via lists
:param board: the initial state of the board
:type board: :py:class:`gksol.boards.PaddedBoard` or List[List[int]]
.. describe:: gol[n]
Return the ``n``'th row of the board as a list-like view.
"""
def __init__(self, board):
self._board = board
self.height = len(board)
self.width = len(board[0]) if board else 0
def advance(self):
"""Advance the board to the next generation"""
# most of the board will be empty, so efficiently initialize to that
next_board = [[0] * self.width for _ in range(self.height)]
for w in range(self.width):
for h in range(self.height):
neighbours = self._neighbours(h, w)
if neighbours == 3:
next_board[h][w] = 1
elif neighbours == 2:
next_board[h][w] = self._board[h][w]
self._board = next_board
def _neighbours(self, h, w):
if h == 0:
h_indizes = (0, 1)
elif h == self.height - 1:
h_indizes = (h - 1, h)
else:
h_indizes = (h - 1, h, h + 1)
if w == 0:
w_indizes = (0, 1)
elif w == self.width - 1:
w_indizes = (w - 1, w)
else:
w_indizes = (w - 1, w, w + 1)
return sum(
self._board[i][j]
for i in h_indizes
for j in w_indizes
if i != h or j != w
)
def __getitem__(self, item):
return self._board[item]
def __iter__(self):
yield from self._board
def get_matrix(self):
"""Return the game board as a nested list"""
return [line[:] for line in self._board]
GOL = ListGol
| mit | -6,665,646,626,962,620,000 | 29.016667 | 76 | 0.49528 | false |
ostrokach/biskit | Biskit/ColorSpectrum.py | 1 | 10642 | ## Automatically adapted for numpy.oldnumeric Mar 26, 2007 by alter_code1.py
##
## Biskit, a toolkit for the manipulation of macromolecular structures
## Copyright (C) 2004-2012 Raik Gruenberg & Johan Leckner
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You find a copy of the GNU General Public License in the file
## license.txt along with this program; if not, write to the Free
## Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
## last $Author$
## last $Date$
## $Revision$
##
## Contributions:
## mostly copied over from the MatrixPlot class of Wolfgang Rieping
"""
Create color scales.
"""
import numpy.oldnumeric as N
from Errors import BiskitError
class ColorError( BiskitError ):
pass
class ColorSpectrum:
"""
Translate a range of numeric values into a range of color codes.
Example::
>>> p = ColorSpectrum( 'grey', 1, 500 )
>>> single_color= p.color( 250 )
>>> color_range = p.colors( range(25,250), resetLimits=0 )
Available palettes are:
* grey
* plasma
* plasma2 (default)
* sausage (seems to have a discontinuity at 50%, see example below)
"""
MAX_COL = {'grey': 3 * 255,
'plasma': 3 * 255,
'plasma2': 3 * 255,
'sausage': 2 * 255}
def __init__(self, palette="plasma2", vmin=0., vmax=1., default=0xffffff ):
"""
Create a new palette of given type and value range.
@param palette: palette type (grey, sausage, plasma, plasma2)
(default: plasma2)
@type palette: str
@param vmin: smallest value covered by the color range (default: 0.)
@type vmin: float
@param vmax: largest value covered by the color range (default: 1.)
@type vmax: float
@param default: default color for values below the palette range
@type default: int (color code, default=0xffff)
@raise ColorError: if palette unknown
"""
try:
self.col_func = eval( 'self._ColorSpectrum__' + palette )
self.vmin = vmin * 1.0
self.vmax = vmax * 1.0
self.col_max = self.MAX_COL[ palette ]
self.default_color = default
except AttributeError:
raise ColorError, 'Unknown palette: ' + str(palette)
except IndexError, why:
raise ColorError, 'Undefined palette: ' + str(why)
def __make_col(self, red, green, blue):
"""
Create color.
@param red: rgb color, 0-255
@type red: int
@param green: rgb color, 0-255
@type green: int
@param blue: rgb color, 0-255
@type blue: int
@return: color
@rtype: int
"""
return ((red << 16) + (green << 8) + blue)
def __normalize( self, value ):
"""
Normalize values
@param value: normalization value
@type value: float
@return: normalized color
@rtype: int
"""
if self.vmax == self.vmin:
return self.col_max
return (value - self.vmin) / ( self.vmax - self.vmin ) * self.col_max
def color(self, value ):
"""
Translate a single value into a color.
@param value: value to be translated into color
@type value: float
@return: color code for value
@rtype: int
"""
if value < self.vmin:
return self.default_color
r = self.__make_col( *self.col_func(self.__normalize(value)) )
return r
def colors( self, values, resetLimits=1 ):
"""
Translate a list of values into a list of colors.
@param values: values to be translated into colors
@type values: [float]
@param resetLimits: re-define color range on max and min of values
(default: 1)
@type resetLimits: 1|0
@return: color codes
@rtype: [int]
"""
if resetLimits:
self.vmax = max( values ) * 1.
self.vmin = min( values ) * 1.
return [ self.color(v) for v in values ]
def color_array( self, a, resetLimits=1 ):
"""
@param a: array of float
@type a: array of float
@param resetLimits: re-define color range on max and min of values
(default: 1)
@type resetLimits: 1|0
@return: matrix of color codes with same dimensions as a
@rtype: array of float
"""
s = N.shape( a )
v = N.ravel( a )
r = self.colors( v, resetLimits=resetLimits )
r = N.reshape( r, s )
return r
def legend( self ):
"""
@return: color mapping for each color
@rtype: [ (float,int) ], value
"""
r = []
step = (self.vmax - self.vmin) / self.col_max
for i in range( self.col_max ):
v = i*step + self.vmin
c = self.color( v )
r.append( (v,c) )
return r
def __map(self, x):
return int(min(255, round(x)))
##
## Available color palettes
##
def __grey(self, x):
x = self.__map(255 * x / self.col_max)
return x, x, x
def __plasma2(self, x):
blue_range = 150
red_range = 255
green_range = 255
if x <= blue_range:
red = 0
green = self.__map(x)
blue = self.__map(blue_range - x)
elif x <= 255:
red = 0
blue = 0
green = self.__map(x)
elif x > 255 + green_range:
x -= 255 + green_range
blue = 0
red = 255
green = self.__map(255 - x)
else:
x -= 255
blue = 0
red = self.__map(x)
green = 255
return red, green, blue
def __plasma(self, x):
blue_range = 255
red_range = 255
green_range = 255
if x <= blue_range:
red = 0
green = self.__map(x)
blue = self.__map(blue_range - x)
elif x > 255 + green_range:
x -= 255 + green_range
blue = 0
red = 255
green = self.__map(255 - x)
else:
x -= 255
blue = 0
red = self.__map(x)
green = 255
return red, green, blue
def __sausage(self, x):
first_half = 255
if x <= first_half:
red = self.__map(x)
green = 0
blue = self.__map(first_half - x)
else:
x -= 255
red = 255
green = self.__map(x)
blue = int(0.3 * 255)
return red, green, blue
def colorRange( nColors, palette='plasma2' ):
"""Quick access to a range of colors.
@param nColors: number of colors needed
@type nColors: int
@param palette: type of color spectrum
@type palette: str
@return: a range of color values
@rtype: [ int ]
"""
c = ColorSpectrum( palette=palette, vmin=0, vmax=1. )
r = 1. * N.arange( 0, nColors ) / nColors
return c.colors( r )
#############
## TESTING
#############
import Biskit.test as BT
class Test(BT.BiskitTest):
"""ColorSpectrum test"""
def test_ColorSpectrum( self ):
"""ColorSpectrum test"""
import biggles as B
c_grey = ColorSpectrum( 'grey', 0, 100 )
c_sausage = ColorSpectrum( 'sausage', 0, 100 )
c_plasma = ColorSpectrum( 'plasma', 0, 100 )
c_plasma2 = ColorSpectrum( 'plasma2', 0, 100 )
self.p = B.FramedPlot()
## old_spectrum = tools.colorSpectrum( 100 )
self.result = []
for i in range( -1, 100 ):
x = (i, i+1 )
self.p.add( B.FillBelow( x, (1., 1.),
color = c_grey.color( i ) ) )
self.result += [ c_grey.color( i ) ]
self.p.add( B.FillBelow( x, (0.75, 0.75),
color = c_sausage.color( i ) ) )
self.p.add( B.FillBelow( x, (0.5, 0.5),
color = c_plasma.color( i ) ) )
self.p.add( B.FillBelow( x, (0.25, 0.25),
color = c_plasma2.color( i ) ) )
## self.p.add( B.FillBelow( x, (0., 0.),
## color = old_spectrum[i] ))
self.p.add( B.Curve( (0,100), (1.,1.)) )
self.p.add( B.Curve( (0,100), (.75,.75)) )
self.p.add( B.Curve( (0,100), (.5,.5) ))
self.p.add( B.Curve( (0,100), (0.25, 0.25)) )
self.p.add( B.Curve( (0,100), (0.0, 0.0)) )
self.p.add( B.PlotLabel( 0.5 ,0.9, 'grey') )
self.p.add( B.PlotLabel( 0.5 ,0.65, 'sausage') )
self.p.add( B.PlotLabel( 0.5 ,0.4, 'plasma') )
self.p.add( B.PlotLabel( 0.5 ,0.15, 'plasma2') )
if self.local or self.VERBOSITY > 2:
self.p.show()
self.assertEqual(self.result, self.EXPECTED)
EXPECTED = [16777215, 0, 197379, 328965, 526344, 657930, 855309, 986895, 1184274, 1315860, 1513239, 1710618, 1842204, 2039583, 2171169, 2368548, 2500134, 2697513, 2829099, 3026478, 3158064, 3355443, 3552822, 3684408, 3881787, 4013373, 4210752, 4342338, 4539717, 4671303, 4868682, 5066061, 5197647, 5395026, 5526612, 5723991, 5855577, 6052956, 6184542, 6381921, 6513507, 6710886, 6908265, 7039851, 7237230, 7368816, 7566195, 7697781, 7895160, 8026746, 8224125, 8421504, 8553090, 8750469, 8882055, 9079434, 9211020, 9408399, 9539985, 9737364, 9868950, 10066329, 10263708, 10395294, 10592673, 10724259, 10921638, 11053224, 11250603, 11382189, 11579568, 11776947, 11908533, 12105912, 12237498, 12434877, 12566463, 12763842, 12895428, 13092807, 13224393, 13421772, 13619151, 13750737, 13948116, 14079702, 14277081, 14408667, 14606046, 14737632, 14935011, 15132390, 15263976, 15461355, 15592941, 15790320, 15921906, 16119285, 16250871, 16448250, 16579836]
if __name__ == '__main__':
BT.localTest()
| gpl-3.0 | -4,445,687,505,259,121,000 | 28.479224 | 953 | 0.53317 | false |
trivoldus28/pulsarch-verilog | tools/local/bas-release/bas,3.9-SunOS-i386/lib/python/lib/python2.4/shelve.py | 11 | 7982 | """Manage shelves of pickled objects.
A "shelf" is a persistent, dictionary-like object. The difference
with dbm databases is that the values (not the keys!) in a shelf can
be essentially arbitrary Python objects -- anything that the "pickle"
module can handle. This includes most class instances, recursive data
types, and objects containing lots of shared sub-objects. The keys
are ordinary strings.
To summarize the interface (key is a string, data is an arbitrary
object):
import shelve
d = shelve.open(filename) # open, with (g)dbm filename -- no suffix
d[key] = data # store data at key (overwrites old data if
# using an existing key)
data = d[key] # retrieve a COPY of the data at key (raise
# KeyError if no such key) -- NOTE that this
# access returns a *copy* of the entry!
del d[key] # delete data stored at key (raises KeyError
# if no such key)
flag = d.has_key(key) # true if the key exists; same as "key in d"
list = d.keys() # a list of all existing keys (slow!)
d.close() # close it
Dependent on the implementation, closing a persistent dictionary may
or may not be necessary to flush changes to disk.
Normally, d[key] returns a COPY of the entry. This needs care when
mutable entries are mutated: for example, if d[key] is a list,
d[key].append(anitem)
does NOT modify the entry d[key] itself, as stored in the persistent
mapping -- it only modifies the copy, which is then immediately
discarded, so that the append has NO effect whatsoever. To append an
item to d[key] in a way that will affect the persistent mapping, use:
data = d[key]
data.append(anitem)
d[key] = data
To avoid the problem with mutable entries, you may pass the keyword
argument writeback=True in the call to shelve.open. When you use:
d = shelve.open(filename, writeback=True)
then d keeps a cache of all entries you access, and writes them all back
to the persistent mapping when you call d.close(). This ensures that
such usage as d[key].append(anitem) works as intended.
However, using keyword argument writeback=True may consume vast amount
of memory for the cache, and it may make d.close() very slow, if you
access many of d's entries after opening it in this way: d has no way to
check which of the entries you access are mutable and/or which ones you
actually mutate, so it must cache, and write back at close, all of the
entries that you access. You can call d.sync() to write back all the
entries in the cache, and empty the cache (d.sync() also synchronizes
the persistent dictionary on disk, if feasible).
"""
# Try using cPickle and cStringIO if available.
try:
from cPickle import Pickler, Unpickler
except ImportError:
from pickle import Pickler, Unpickler
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import UserDict
import warnings
__all__ = ["Shelf","BsdDbShelf","DbfilenameShelf","open"]
class Shelf(UserDict.DictMixin):
"""Base class for shelf implementations.
This is initialized with a dictionary-like object.
See the module's __doc__ string for an overview of the interface.
"""
def __init__(self, dict, protocol=None, writeback=False, binary=None):
self.dict = dict
if protocol is not None and binary is not None:
raise ValueError, "can't specify both 'protocol' and 'binary'"
if binary is not None:
warnings.warn("The 'binary' argument to Shelf() is deprecated",
PendingDeprecationWarning)
protocol = int(binary)
if protocol is None:
protocol = 0
self._protocol = protocol
self.writeback = writeback
self.cache = {}
def keys(self):
return self.dict.keys()
def __len__(self):
return len(self.dict)
def has_key(self, key):
return self.dict.has_key(key)
def __contains__(self, key):
return self.dict.has_key(key)
def get(self, key, default=None):
if self.dict.has_key(key):
return self[key]
return default
def __getitem__(self, key):
try:
value = self.cache[key]
except KeyError:
f = StringIO(self.dict[key])
value = Unpickler(f).load()
if self.writeback:
self.cache[key] = value
return value
def __setitem__(self, key, value):
if self.writeback:
self.cache[key] = value
f = StringIO()
p = Pickler(f, self._protocol)
p.dump(value)
self.dict[key] = f.getvalue()
def __delitem__(self, key):
del self.dict[key]
try:
del self.cache[key]
except KeyError:
pass
def close(self):
self.sync()
try:
self.dict.close()
except AttributeError:
pass
self.dict = 0
def __del__(self):
self.close()
def sync(self):
if self.writeback and self.cache:
self.writeback = False
for key, entry in self.cache.iteritems():
self[key] = entry
self.writeback = True
self.cache = {}
if hasattr(self.dict, 'sync'):
self.dict.sync()
class BsdDbShelf(Shelf):
"""Shelf implementation using the "BSD" db interface.
This adds methods first(), next(), previous(), last() and
set_location() that have no counterpart in [g]dbm databases.
The actual database must be opened using one of the "bsddb"
modules "open" routines (i.e. bsddb.hashopen, bsddb.btopen or
bsddb.rnopen) and passed to the constructor.
See the module's __doc__ string for an overview of the interface.
"""
def __init__(self, dict, protocol=None, writeback=False, binary=None):
Shelf.__init__(self, dict, protocol, writeback, binary)
def set_location(self, key):
(key, value) = self.dict.set_location(key)
f = StringIO(value)
return (key, Unpickler(f).load())
def next(self):
(key, value) = self.dict.next()
f = StringIO(value)
return (key, Unpickler(f).load())
def previous(self):
(key, value) = self.dict.previous()
f = StringIO(value)
return (key, Unpickler(f).load())
def first(self):
(key, value) = self.dict.first()
f = StringIO(value)
return (key, Unpickler(f).load())
def last(self):
(key, value) = self.dict.last()
f = StringIO(value)
return (key, Unpickler(f).load())
class DbfilenameShelf(Shelf):
"""Shelf implementation using the "anydbm" generic dbm interface.
This is initialized with the filename for the dbm database.
See the module's __doc__ string for an overview of the interface.
"""
def __init__(self, filename, flag='c', protocol=None, writeback=False, binary=None):
import anydbm
Shelf.__init__(self, anydbm.open(filename, flag), protocol, writeback, binary)
def open(filename, flag='c', protocol=None, writeback=False, binary=None):
"""Open a persistent dictionary for reading and writing.
The filename parameter is the base filename for the underlying
database. As a side-effect, an extension may be added to the
filename and more than one file may be created. The optional flag
parameter has the same interpretation as the flag parameter of
anydbm.open(). The optional protocol parameter specifies the
version of the pickle protocol (0, 1, or 2).
The optional binary parameter is deprecated and may be set to True
to force the use of binary pickles for serializing data values.
See the module's __doc__ string for an overview of the interface.
"""
return DbfilenameShelf(filename, flag, protocol, writeback, binary)
| gpl-2.0 | 8,327,687,535,641,170,000 | 33.554113 | 88 | 0.639815 | false |
openpermissions/auth-srv | auth/oauth2/scope.py | 1 | 9035 | # -*- coding: utf-8 -*-
# Copyright 2016 Open Permissions Platform Coalition
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
"""
Scopes
------
The following scopes are available:
- read
- write
- delegate
The "read" scope permits the client to read a protected resource.
This is the default scope.
The "write" scope permits a client to write to a protected resource.
Unilke "read", "write" must also include the identity of the
resource, within "[]" brackets, e.g. "write[1234]" allows writing to a
resource identified by 1234. Services may also be identied with it's registered
URL, e.g. "write[http://test.com]". The auth service verifies whether the
client is permitted to write to the resource before issuing an access
token.
The "delegate" scope is used to delegate writing to a resource, e.g.
the onboarding service accesses the repository on the client's behalf
To access the resource, the delegate will exchange the token for a new
"write" token.
The delegate scope has the form
delegate[<service id or url>]:write[<resource id or url>]
Where service id or url is the delegate service's ID or URL (e.g. the
onbooarding service URL), and the resource id or URL is the protected
resource's ID or URL (e.g. the repository ID).
The advantage of using "delegate" instead of "write" is that the token can
only be used by the specified delegate (assuming the delegate keeps their
credentials secure), and the delegate will only be able to write to the
specified resource.
"""
import re
from collections import defaultdict, namedtuple
from functools import partial
import couch
from perch import views, Repository, Service
from tornado.gen import coroutine, Return
from .exceptions import InvalidScope, Unauthorized
READ = 'read'
READ_REGEX = re.compile(r'^read\[(?P<resource_id>.+)\]$')
WRITE = 'write'
WRITE_REGEX = re.compile(r'^write\[(?P<resource_id>.+)\]$')
DELEGATE = 'delegate'
DELEGATE_REGEX = re.compile(r'^delegate\[(?P<delegate_id>.+)\]:(?P<delegated_action>read|write)\[(?P<resource_id>.+)\]$')
ACCESS_MAPPING = {
READ: 'r',
WRITE: 'w'
}
RESOURCE_TYPES = {
Repository.resource_type: Repository,
Service.resource_type: Service,
}
Access = namedtuple('Access', ['access', 'delegate_id'])
class Scope(object):
def __init__(self, scope):
self.scope = scope
# read is True if the scope is for reading any resource
self.read = False
try:
self._group()
except KeyError:
raise InvalidScope('Invalid action')
def __str__(self):
return self.scope
def __repr__(self):
return '<Scope: {}>'.format(self.scope)
def _group(self):
"""
Group scope string by actions and resources
Raises InvalidScope the scope is invalid
"""
self.resources = defaultdict(set)
self.delegates = defaultdict(set)
for x in self.scope.split():
if x.startswith(READ):
self._add_read(x)
elif x.startswith(WRITE):
self._add_write(x)
elif x.startswith(DELEGATE):
self._add_delegate(x)
else:
raise InvalidScope('Scope has missing elements')
def _add_read(self, scope):
"""Add 'read' scope to self.resources"""
access = ACCESS_MAPPING[READ]
matched = re.match(READ_REGEX, scope)
if not matched:
self.read = True
else:
resource_id = matched.group('resource_id')
self.resources[resource_id].add(Access(access, None))
def _add_write(self, scope):
"""Add 'write' scope to self.resources"""
access = ACCESS_MAPPING[WRITE]
matched = re.match(WRITE_REGEX, scope)
if not matched:
raise InvalidScope('Write scope requires a resource ID')
resource_id = matched.group('resource_id')
self.resources[resource_id].add(Access(access, None))
def _add_delegate(self, scope):
"""Add 'delegate' scope to self.delegates & self.resources"""
matched = re.match(DELEGATE_REGEX, scope)
if not matched:
raise InvalidScope('Invalid delegate scope')
resource_id = matched.group('resource_id')
delegate_id = matched.group('delegate_id')
access = ACCESS_MAPPING[matched.group('delegated_action')]
self.delegates[delegate_id].add(Access(access, None))
self.resources[resource_id].add(Access(access, delegate_id))
def within_scope(self, access, resource_id):
"""Is accessing the resource within this scope"""
if access in ('r', 'rw') and self.read is True:
return True
access_set = {Access(x, None) for x in access if x in 'rw'}
return bool(access_set & (self.resources[resource_id] | self.delegates[resource_id]))
@coroutine
def validate(self, client):
"""
Validate the requested OAuth2 scope
If a "write" or "delegate" scope is requested then also checks access
to the resource and delegate
:param scope: tornado.httputil.HTTPServerRequest
:param client: the client object. Used to check the client is
authorized for the requested scope
:param default_scope: the default scope if not included in the request
:raise:
InvalidScope: The scope is invalid
Unauthorized: The client is not authorized for the scope
"""
resource_func = partial(self._check_access_resource, client)
delegate_func = partial(self._check_access_delegate, client)
yield [self._check_access_resources(resource_func, self.resources),
self._check_access_resources(delegate_func, self.delegates)]
@coroutine
def _check_access_resources(self, func, resources):
"""Check resources exist and then call func for each resource"""
grouped = {'ids': {}, 'urls': {}}
for k, v in resources.items():
if k.startswith('http'):
grouped['urls'][k] = v
else:
grouped['ids'][k] = v
yield [self._check_access_resource_ids(func, grouped['ids']),
self._check_access_resource_urls(func, grouped['urls'])]
@coroutine
def _check_access_resource_ids(self, func, resources):
"""
Check resource identified by an ID exist and then call func for
each resource
"""
if not resources:
raise Return()
for resource_id in resources:
try:
doc = yield views.service_and_repository.first(key=resource_id)
except couch.NotFound:
raise InvalidScope('Scope contains an unknown resource ID')
resource = RESOURCE_TYPES[doc['value']['type']](**doc['value'])
try:
yield resource.get_parent()
except couch.NotFound:
raise InvalidScope('Invalid resource - missing parent')
func(resource, resources[resource_id])
@coroutine
def _check_access_resource_urls(self, func, resources):
"""
Check resource identified by an URL exist and then call func for each
resource
"""
for url in resources:
try:
resource = yield Service.get_by_location(url)
except couch.NotFound:
raise InvalidScope("Scope contains an unknown location: '{}'"
.format(url))
func(resource, resources[url])
def _concatenate_access(self, access):
"""Concatenate a resource's access"""
return ''.join(sorted(list({x.access for x in access})))
def _check_access_resource(self, client, resource, access):
"""Check the client has access to the resource"""
requested_access = self._concatenate_access(access)
has_access = client.authorized(requested_access, resource)
if not has_access:
raise Unauthorized(
"Client '{}' does not have '{}' access to '{}'"
.format(client.id, requested_access, resource.id))
def _check_access_delegate(self, client, delegate, access):
"""Check delegate is the correct type and check access"""
if delegate.type != Service.resource_type:
raise InvalidScope("Only services can be delegates. '{}' is a '{}'"
.format(delegate.id, delegate.type))
self._check_access_resource(client, delegate, access)
| apache-2.0 | -4,525,683,355,783,926,300 | 34.996016 | 121 | 0.633868 | false |
stripe/stripe-datadog-checks | checks.d/sequins.py | 2 | 3341 | import requests
from checks import AgentCheck
class Sequins(AgentCheck):
DEFAULT_TIMEOUT = 1
DEFAULT_MAX_DBS = 100
CONNECT_CHECK_NAME = 'sequins.can_connect'
def check(self, instance):
if 'url' not in instance:
self.log.info("Skipping instance, no url found.")
return
instance_tags = instance.get('tags', [])
max_dbs = instance.get('max_dbs', self.DEFAULT_MAX_DBS)
timeout = self.init_config.get('default_timeout', self.DEFAULT_TIMEOUT)
resp = self.get_json(instance['url'], timeout)
self.gauge('sequins.db_count', len(resp['dbs']), tags=instance_tags)
for db_name, db in resp['dbs'].iteritems():
db_tags = instance_tags + ['sequins_db:%s' % db_name]
num_dbs = len(db['versions'])
if num_dbs > max_dbs:
raise Exception("%d dbs is more than the configured maximum (%d)" % (num_dbs, max_dbs))
self.gauge('sequins.version_count', num_dbs, db_tags)
for version_name, version in db['versions'].iteritems():
version_tags = db_tags + ['sequins_version:%s' % version_name]
self.gauge('sequins.partition_count', version['num_partitions'], version_tags)
self.gauge('sequins.missing_partition_count', version['missing_partitions'], version_tags)
self.gauge('sequins.underreplicated_partition_count', version['underreplicated_partitions'], version_tags)
self.gauge('sequins.overreplicated_partition_count', version['overreplicated_partitions'], version_tags)
self.gauge('sequins.average_replication', version['average_replication'], version_tags)
node_counts = {}
for node in version['nodes'].itervalues():
st = node['state']
node_counts[st] = node_counts.get(st, 0) + 1
for state, count in node_counts.iteritems():
tags = version_tags + ['sequins_node_state:%s' % state.lower()]
self.gauge('sequins.node_count', count, tags)
if 'shard_id' in resp:
self.gauge('sequins.shard_id', 1, instance_tags + ['sequins_shard:%s' % resp['shard_id']])
def get_json(self, url, timeout):
try:
r = requests.get(url, timeout=timeout, headers={'accept': 'application/json'})
r.raise_for_status()
except requests.exceptions.Timeout:
# If there's a timeout
self.service_check(
self.CONNECT_CHECK_NAME, AgentCheck.CRITICAL,
message='%s timed out after %s seconds.' % (url, timeout),
tags=["url:{0}".format(url)]
)
raise Exception("Timeout when hitting %s" % url)
except requests.exceptions.HTTPError:
self.service_check(
self.CONNECT_CHECK_NAME, AgentCheck.CRITICAL,
message='%s returned a status of %s' % (url, r.status_code),
tags=["url:{0}".format(url)]
)
raise Exception("Got %s when hitting %s" % (r.status_code, url))
else:
self.service_check(
self.CONNECT_CHECK_NAME, AgentCheck.OK,
tags=["url:{0}".format(url)]
)
return r.json()
| mit | -54,815,468,666,939,336 | 41.833333 | 122 | 0.567794 | false |
MartialD/hyperspy | hyperspy/tests/io/test_dm_stackbuilder_plugin.py | 4 | 2535 | # -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import os
from hyperspy.io import load
from numpy.testing import assert_allclose
my_path = os.path.dirname(__file__)
class TestStackBuilder:
def test_load_stackbuilder_imagestack(self):
image_stack = load(os.path.join(my_path, "dm_stackbuilder_plugin",
"test_stackbuilder_imagestack.dm3"))
data_dimensions = image_stack.data.ndim
am = image_stack.axes_manager
axes_dimensions = am.signal_dimension + am.navigation_dimension
assert data_dimensions == axes_dimensions
md = image_stack.metadata
assert md.Acquisition_instrument.TEM.acquisition_mode == "STEM"
assert_allclose(md.Acquisition_instrument.TEM.beam_current, 0.0)
assert_allclose(md.Acquisition_instrument.TEM.beam_energy, 200.0)
assert_allclose(md.Acquisition_instrument.TEM.camera_length, 15.0)
assert_allclose(
md.Acquisition_instrument.TEM.dwell_time, 0.03000005078125)
assert_allclose(md.Acquisition_instrument.TEM.magnification, 200000.0)
assert md.Acquisition_instrument.TEM.microscope == "JEM-ARM200F"
assert md.General.date == "2015-05-17"
assert md.General.original_filename == "test_stackbuilder_imagestack.dm3"
assert md.General.title == "stackbuilder_test4_16x2"
assert md.General.time == "17:00:16"
assert md.Sample.description == "DWNC"
assert md.Signal.quantity == "Electrons (Counts)"
assert md.Signal.signal_type == ""
assert md.Signal.binned == False
assert_allclose(md.Signal.Noise_properties.Variance_linear_model.gain_factor,
0.15674974)
assert_allclose(md.Signal.Noise_properties.Variance_linear_model.gain_offset,
2228741.5)
| gpl-3.0 | 6,849,309,042,125,993,000 | 42.706897 | 85 | 0.687968 | false |
AlanZatarain/visvis.dev | freezeHelp.py | 5 | 2890 | # -*- coding: utf-8 -*-
# Copyright (C) 2012, Almar Klein
#
# Visvis is distributed under the terms of the (new) BSD License.
# The full license can be found in 'license.txt'.
""" Module freezeHelp
Helps freezing apps made using visvis.
"""
import visvis as vv
import os, shutil, sys
def copyResources(destPath):
""" copyResources(destinationPath)
Copy the visvis resource dir to the specified folder
(The folder containing the frozen executable).
"""
# create folder (if required)
destPath = os.path.join(destPath, 'visvisResources')
if not os.path.isdir(destPath):
os.makedirs(destPath)
# copy files
path = vv.misc.getResourceDir()
for file in os.listdir(path):
if file.startswith('.') or file.startswith('_'):
continue
shutil.copy(os.path.join(path,file), os.path.join(destPath,file))
def getIncludes(backendName):
""" getIncludes(backendName)
Get a list of includes to extend the 'includes' list
with of py2exe or bbfreeze. The list contains:
* the module of the specified backend
* all the functionnames, which are dynamically loaded and therefore
not included by default.
* opengl stuff
"""
# init
includes = []
# backend
backendModule = 'visvis.backends.backend_'+ backendName
includes.append(backendModule)
if backendName == 'qt4':
includes.extend(["sip", "PyQt4.QtCore", "PyQt4.QtGui"])
# functions
for funcName in vv.functions._functionNames:
includes.append('visvis.functions.'+funcName)
# processing functions
for funcName in vv.processing._functionNames:
includes.append('visvis.processing.'+funcName)
# opengl stuff
arrayModules = ["nones", "strings","lists","numbers","ctypesarrays",
"ctypesparameters", "ctypespointers", "numpymodule",
"formathandler"]
GLUModules = ["glustruct"]
for name in arrayModules:
name = 'OpenGL.arrays.'+name
if name in sys.modules:
includes.append(name)
for name in GLUModules:
name = 'OpenGL.GLU.'+name
if name in sys.modules:
includes.append(name)
if sys.platform.startswith('win'):
includes.append("OpenGL.platform.win32")
# done
return includes
def getExcludes(backendName):
""" getExcludes(backendName)
Get a list of excludes. If using the 'wx' backend, you don't
want all the qt4 libaries.
backendName is the name of the backend which you do want to use.
"""
# init
excludes = []
# Neglect qt4
if 'qt4' != backendName:
excludes.extend(["sip", "PyQt4", "PyQt4.QtCore", "PyQt4.QtGui"])
# Neglect wx
if 'wx' != backendName:
excludes.extend(["wx"])
# done
return excludes
| bsd-3-clause | -5,252,314,897,518,609,000 | 26.52381 | 73 | 0.626298 | false |
abdoosh00/edx-rtl-final | lms/djangoapps/django_comment_client/base/tests.py | 3 | 16649 | import logging
from django.test.utils import override_settings
from django.test.client import Client
from django.contrib.auth.models import User
from student.tests.factories import CourseEnrollmentFactory
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from django.core.urlresolvers import reverse
from django.core.management import call_command
from util.testing import UrlResetMixin
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
from nose.tools import assert_true, assert_equal # pylint: disable=E0611
from mock import patch, ANY
log = logging.getLogger(__name__)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
@patch('lms.lib.comment_client.utils.requests.request')
class ViewsTestCase(UrlResetMixin, ModuleStoreTestCase):
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
# Patching the ENABLE_DISCUSSION_SERVICE value affects the contents of urls.py,
# so we need to call super.setUp() which reloads urls.py (because
# of the UrlResetMixin)
super(ViewsTestCase, self).setUp()
# create a course
self.course = CourseFactory.create(org='MITx', course='999',
display_name='Robot Super Course')
self.course_id = self.course.id
# seed the forums permissions and roles
call_command('seed_permissions_roles', self.course_id)
# Patch the comment client user save method so it does not try
# to create a new cc user when creating a django user
with patch('student.models.cc.User.save'):
uname = 'student'
email = '[email protected]'
password = 'test'
# Create the user and make them active so we can log them in.
self.student = User.objects.create_user(uname, email, password)
self.student.is_active = True
self.student.save()
# Enroll the student in the course
CourseEnrollmentFactory(user=self.student,
course_id=self.course_id)
self.client = Client()
assert_true(self.client.login(username='student', password='test'))
def test_create_thread(self, mock_request):
mock_request.return_value.status_code = 200
mock_request.return_value.text = u'{"title":"Hello",\
"body":"this is a post",\
"course_id":"MITx/999/Robot_Super_Course",\
"anonymous":false,\
"anonymous_to_peers":false,\
"commentable_id":"i4x-MITx-999-course-Robot_Super_Course",\
"created_at":"2013-05-10T18:53:43Z",\
"updated_at":"2013-05-10T18:53:43Z",\
"at_position_list":[],\
"closed":false,\
"id":"518d4237b023791dca00000d",\
"user_id":"1","username":"robot",\
"votes":{"count":0,"up_count":0,\
"down_count":0,"point":0},\
"abuse_flaggers":[],"tags":[],\
"type":"thread","group_id":null,\
"pinned":false,\
"endorsed":false,\
"unread_comments_count":0,\
"read":false,"comments_count":0}'
thread = {"body": ["this is a post"],
"anonymous_to_peers": ["false"],
"auto_subscribe": ["false"],
"anonymous": ["false"],
"title": ["Hello"]
}
url = reverse('create_thread', kwargs={'commentable_id': 'i4x-MITx-999-course-Robot_Super_Course',
'course_id': self.course_id})
response = self.client.post(url, data=thread)
assert_true(mock_request.called)
mock_request.assert_called_with(
'post',
'http://localhost:4567/api/v1/i4x-MITx-999-course-Robot_Super_Course/threads',
data={
'body': u'this is a post',
'anonymous_to_peers': False, 'user_id': 1,
'title': u'Hello',
'commentable_id': u'i4x-MITx-999-course-Robot_Super_Course',
'anonymous': False, 'course_id': u'MITx/999/Robot_Super_Course',
},
params={'request_id': ANY},
headers={'X-Edx-Api-Key': 'PUT_YOUR_API_KEY_HERE'},
timeout=5
)
assert_equal(response.status_code, 200)
def test_flag_thread(self, mock_request):
mock_request.return_value.status_code = 200
mock_request.return_value.text = u'{"title":"Hello",\
"body":"this is a post",\
"course_id":"MITx/999/Robot_Super_Course",\
"anonymous":false,\
"anonymous_to_peers":false,\
"commentable_id":"i4x-MITx-999-course-Robot_Super_Course",\
"created_at":"2013-05-10T18:53:43Z",\
"updated_at":"2013-05-10T18:53:43Z",\
"at_position_list":[],\
"closed":false,\
"id":"518d4237b023791dca00000d",\
"user_id":"1","username":"robot",\
"votes":{"count":0,"up_count":0,\
"down_count":0,"point":0},\
"abuse_flaggers":[1],"tags":[],\
"type":"thread","group_id":null,\
"pinned":false,\
"endorsed":false,\
"unread_comments_count":0,\
"read":false,"comments_count":0}'
url = reverse('flag_abuse_for_thread', kwargs={'thread_id': '518d4237b023791dca00000d', 'course_id': self.course_id})
response = self.client.post(url)
assert_true(mock_request.called)
call_list = [
(
('get', 'http://localhost:4567/api/v1/threads/518d4237b023791dca00000d'),
{
'data': None,
'params': {'mark_as_read': True, 'request_id': ANY},
'headers': {'X-Edx-Api-Key': 'PUT_YOUR_API_KEY_HERE'},
'timeout': 5
}
),
(
('put', 'http://localhost:4567/api/v1/threads/518d4237b023791dca00000d/abuse_flag'),
{
'data': {'user_id': '1'},
'params': {'request_id': ANY},
'headers': {'X-Edx-Api-Key': 'PUT_YOUR_API_KEY_HERE'},
'timeout': 5
}
),
(
('get', 'http://localhost:4567/api/v1/threads/518d4237b023791dca00000d'),
{
'data': None,
'params': {'mark_as_read': True, 'request_id': ANY},
'headers': {'X-Edx-Api-Key': 'PUT_YOUR_API_KEY_HERE'},
'timeout': 5
}
)
]
assert_equal(call_list, mock_request.call_args_list)
assert_equal(response.status_code, 200)
def test_un_flag_thread(self, mock_request):
mock_request.return_value.status_code = 200
mock_request.return_value.text = u'{"title":"Hello",\
"body":"this is a post",\
"course_id":"MITx/999/Robot_Super_Course",\
"anonymous":false,\
"anonymous_to_peers":false,\
"commentable_id":"i4x-MITx-999-course-Robot_Super_Course",\
"created_at":"2013-05-10T18:53:43Z",\
"updated_at":"2013-05-10T18:53:43Z",\
"at_position_list":[],\
"closed":false,\
"id":"518d4237b023791dca00000d",\
"user_id":"1","username":"robot",\
"votes":{"count":0,"up_count":0,\
"down_count":0,"point":0},\
"abuse_flaggers":[],"tags":[],\
"type":"thread","group_id":null,\
"pinned":false,\
"endorsed":false,\
"unread_comments_count":0,\
"read":false,"comments_count":0}'
url = reverse('un_flag_abuse_for_thread', kwargs={'thread_id': '518d4237b023791dca00000d', 'course_id': self.course_id})
response = self.client.post(url)
assert_true(mock_request.called)
call_list = [
(
('get', 'http://localhost:4567/api/v1/threads/518d4237b023791dca00000d'),
{
'data': None,
'params': {'mark_as_read': True, 'request_id': ANY},
'headers': {'X-Edx-Api-Key': 'PUT_YOUR_API_KEY_HERE'},
'timeout': 5
}
),
(
('put', 'http://localhost:4567/api/v1/threads/518d4237b023791dca00000d/abuse_unflag'),
{
'data': {'user_id': '1'},
'params': {'request_id': ANY},
'headers': {'X-Edx-Api-Key': 'PUT_YOUR_API_KEY_HERE'},
'timeout': 5
}
),
(
('get', 'http://localhost:4567/api/v1/threads/518d4237b023791dca00000d'),
{
'data': None,
'params': {'mark_as_read': True, 'request_id': ANY},
'headers': {'X-Edx-Api-Key': 'PUT_YOUR_API_KEY_HERE'},
'timeout': 5
}
)
]
assert_equal(call_list, mock_request.call_args_list)
assert_equal(response.status_code, 200)
def test_flag_comment(self, mock_request):
mock_request.return_value.status_code = 200
mock_request.return_value.text = u'{"body":"this is a comment",\
"course_id":"MITx/999/Robot_Super_Course",\
"anonymous":false,\
"anonymous_to_peers":false,\
"commentable_id":"i4x-MITx-999-course-Robot_Super_Course",\
"created_at":"2013-05-10T18:53:43Z",\
"updated_at":"2013-05-10T18:53:43Z",\
"at_position_list":[],\
"closed":false,\
"id":"518d4237b023791dca00000d",\
"user_id":"1","username":"robot",\
"votes":{"count":0,"up_count":0,\
"down_count":0,"point":0},\
"abuse_flaggers":[1],\
"type":"comment",\
"endorsed":false}'
url = reverse('flag_abuse_for_comment', kwargs={'comment_id': '518d4237b023791dca00000d', 'course_id': self.course_id})
response = self.client.post(url)
assert_true(mock_request.called)
call_list = [
(
('get', 'http://localhost:4567/api/v1/comments/518d4237b023791dca00000d'),
{
'data': None,
'params': {'request_id': ANY},
'headers': {'X-Edx-Api-Key': 'PUT_YOUR_API_KEY_HERE'},
'timeout': 5
}
),
(
('put', 'http://localhost:4567/api/v1/comments/518d4237b023791dca00000d/abuse_flag'),
{
'data': {'user_id': '1'},
'params': {'request_id': ANY},
'headers': {'X-Edx-Api-Key': 'PUT_YOUR_API_KEY_HERE'},
'timeout': 5
}
),
(
('get', 'http://localhost:4567/api/v1/comments/518d4237b023791dca00000d'),
{
'data': None,
'params': {'request_id': ANY},
'headers': {'X-Edx-Api-Key': 'PUT_YOUR_API_KEY_HERE'},
'timeout': 5
}
)
]
assert_equal(call_list, mock_request.call_args_list)
assert_equal(response.status_code, 200)
def test_un_flag_comment(self, mock_request):
mock_request.return_value.status_code = 200
mock_request.return_value.text = u'{"body":"this is a comment",\
"course_id":"MITx/999/Robot_Super_Course",\
"anonymous":false,\
"anonymous_to_peers":false,\
"commentable_id":"i4x-MITx-999-course-Robot_Super_Course",\
"created_at":"2013-05-10T18:53:43Z",\
"updated_at":"2013-05-10T18:53:43Z",\
"at_position_list":[],\
"closed":false,\
"id":"518d4237b023791dca00000d",\
"user_id":"1","username":"robot",\
"votes":{"count":0,"up_count":0,\
"down_count":0,"point":0},\
"abuse_flaggers":[],\
"type":"comment",\
"endorsed":false}'
url = reverse('un_flag_abuse_for_comment', kwargs={'comment_id': '518d4237b023791dca00000d', 'course_id': self.course_id})
response = self.client.post(url)
assert_true(mock_request.called)
call_list = [
(
('get', 'http://localhost:4567/api/v1/comments/518d4237b023791dca00000d'),
{
'data': None,
'params': {'request_id': ANY},
'headers': {'X-Edx-Api-Key': 'PUT_YOUR_API_KEY_HERE'},
'timeout': 5
}
),
(
('put', 'http://localhost:4567/api/v1/comments/518d4237b023791dca00000d/abuse_unflag'),
{
'data': {'user_id': '1'},
'params': {'request_id': ANY},
'headers': {'X-Edx-Api-Key': 'PUT_YOUR_API_KEY_HERE'},
'timeout': 5
}
),
(
('get', 'http://localhost:4567/api/v1/comments/518d4237b023791dca00000d'),
{
'data': None,
'params': {'request_id': ANY},
'headers': {'X-Edx-Api-Key': 'PUT_YOUR_API_KEY_HERE'},
'timeout': 5
}
)
]
assert_equal(call_list, mock_request.call_args_list)
assert_equal(response.status_code, 200)
| agpl-3.0 | -5,509,927,326,649,776,000 | 48.550595 | 130 | 0.418644 | false |
garnertb/firecares | firecares/celery.py | 1 | 3399 |
from __future__ import absolute_import
import boto
import os
import mimetypes
import requests
from celery import Celery
from django.conf import settings
from firecares.utils.s3put import singlepart_upload
from firecares.utils import convert_png_to_jpg
from celery.task import current
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'firecares.settings.local')
app = Celery('firecares')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
def download_file(url, download_to=None):
if not download_to:
download_to = url.split('/')[-1]
# NOTE the stream=True parameter
r = requests.get(url, stream=True)
with open(os.path.join(download_to, download_to), 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
return download_to
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
@app.task(rate_limit=10)
def cache_thumbnail(id, upload_to_s3=False, marker=True):
try:
import shutil
print settings.MAPBOX_ACCESS_TOKEN
from firecares.firestation.models import FireDepartment
department = FireDepartment.objects.get(id=id)
filename = department.thumbnail_name
generate_thumbnail = department.generate_thumbnail(marker=marker)
if not marker:
filename = department.thumbnail_name_no_marker
full_filename = os.path.join('/home/firecares/department-thumbnails', filename)
if not generate_thumbnail.startswith('/static'):
f = download_file(generate_thumbnail, full_filename.replace('jpg', 'png'))
full_filename = convert_png_to_jpg(f)
else:
shutil.copy('/webapps/firecares/firecares/firecares/firestation/static/firestation/theme/assets/images/content/property-1.jpg', full_filename)
if upload_to_s3:
c = boto.s3.connect_to_region('us-east-1',
aws_access_key_id=getattr(settings, 'AWS_ACCESS_KEY_ID', None),
aws_secret_access_key=getattr(settings, 'AWS_SECRET_ACCESS_KEY', None),
is_secure=True,
calling_format=boto.s3.connection.OrdinaryCallingFormat(),
debug=2
)
b = c.get_bucket('firecares-static/department-thumbnails', validate=False)
mtype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
headers = {'Content-Type': mtype, 'Cache-Control': 'max-age=%d, public' % (3600 * 24)}
singlepart_upload(b,
key_name=filename,
fullpath=full_filename,
policy='public-read',
reduced_redundancy=False,
headers=headers)
except Exception as exc:
if current.request.retries < 3:
current.retry(exc=exc, countdown=min(2 ** current.request.retries, 128))
| mit | 994,849,652,240,190,200 | 38.068966 | 154 | 0.610179 | false |
non-official-SD/base | src/tools/blender/blender-2.6/osg/osgdata.py | 3 | 69729 | # -*- python-indent: 4; mode: python -*-
# -*- coding: UTF-8 -*-
#
# Copyright (C) 2008-2012 Cedric Pinson
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors:
# Cedric Pinson <[email protected]>
# Jeremy Moles <[email protected]>
import bpy
import mathutils
from mathutils import *
import bpy
import sys
import math
import os
import shutil
import subprocess
from sys import exit
import osg
from . import osglog
from . import osgconf
from .osgconf import DEBUG
from .osgconf import debug
from . import osgbake
from . import osgobject
from .osgobject import *
osgobject.VERSION = osg.__version__
Vector = mathutils.Vector
Quaternion = mathutils.Quaternion
Matrix = mathutils.Matrix
Euler = mathutils.Euler
def createImageFilename(texturePath, image):
fn = bpy.path.basename(bpy.path.display_name_from_filepath(image.filepath))
i = fn.rfind(".")
if i != -1:
name = fn[0:i]
else:
name = fn
# [BMP, IRIS, PNG, JPEG, TARGA, TARGA_RAW, AVI_JPEG, AVI_RAW, FRAMESERVER]
#print("format " + image.file_format)
if image.file_format == 'PNG':
ext = "png"
elif image.file_format == 'HDR':
ext = "hdr"
elif image.file_format == 'JPEG':
ext = "jpg"
elif image.file_format == 'TARGA' or image.file_format == 'TARGA_RAW':
ext = "tga"
elif image.file_format == 'BMP':
ext = "bmp"
elif image.file_format == 'AVI_JPEG' or image.file_format == 'AVI_RAW':
ext = "avi"
else:
ext = "unknown"
name = name + "." +ext
print("create Image Filename " + name)
if texturePath != "" and not texturePath.endswith("/"):
texturePath = texturePath + "/"
return texturePath + name
def getImageFilesFromStateSet(stateset):
list = []
#if DEBUG: osglog.log("stateset %s" % str(stateset))
if stateset is not None and len(stateset.texture_attributes) > 0:
for unit, attributes in stateset.texture_attributes.items():
for a in attributes:
if a.className() == "Texture2D":
list.append(a.source_image)
return list
def getRootBonesList(armature):
bones = []
for bone in armature.bones:
if bone.parent == None:
bones.append(bone)
return bones
def getTransform(matrix):
return (matrix.translationPart(),
matrix.scalePart(),
matrix.toQuat())
def getDeltaMatrixFrom(parent, child):
if parent is None:
return child.matrix_world
return getDeltaMatrixFromMatrix(parent.matrix_world,
child.matrix_world)
def getDeltaMatrixFromMatrix(parent, child):
p = parent
bi = p.copy()
bi.invert()
return bi*child
def getChildrenOf(scene, object):
children = []
for obj in scene.objects:
if obj.parent == object:
children.append(obj)
return children
def findBoneInHierarchy(scene, bonename):
if scene.name == bonename and (type(scene) == type(Bone()) or type(scene) == type(Skeleton())):
return scene
#print scene.name
if isinstance(scene, Group) is False:
return None
for child in scene.children:
result = findBoneInHierarchy(child, bonename)
if result is not None:
return result
return None
def isActionLinkedToObject(action, objects_name):
action_fcurves = action.fcurves
#log("action ipos " + str(action_ipos_items))
for fcurve in action_fcurves:
#log("is " + str(obj_name) + " in "+ str(objects_name))
path = fcurve.data_path.split("\"")
if objects_name in path:
return True;
return False
def findArmatureObjectForTrack(track):
for o in bpy.data.objects:
if o.type.lower() == "Armature".lower():
if list(o.animation_data.nla_tracks).count(track) > 0:
return 0
return None
#def findObjectForIpo(ipo):
# index = ipo.name.rfind('-')
# if index != -1:
# objname = ipo.name[index+1:]
# try:
# obj = self.config.scene.objects[objname]
# log("bake ipo %s to object %s" % (ipo.name, objname))
# return obj
# except:
# return None
#
# for o in self.config.scene.objects:
# if o.getIpo() == ipo:
# log("bake ipo %s to object %s" % (ipo.name, o.name))
# return o
# return None
#
#def findMaterialForIpo(ipo):
# index = ipo.name.rfind('-')
# if index != -1:
# objname = ipo.name[index+1:]
# try:
# obj = bpy.data.materials[objname]
# log("bake ipo %s to material %s" % (ipo.name, objname))
# return obj
# except:
# return None
#
# for o in bpy.data.materials:
# if o.getIpo() == ipo:
# log("bake ipo %s to material %s" % (ipo.name, o.name))
# return o
# return None
def createAnimationUpdate(obj, callback, rotation_mode, prefix="", zero=False):
has_location_keys = False
has_scale_keys = False
has_rotation_keys = False
if obj.animation_data:
action = obj.animation_data.action
if action:
for curve in action.fcurves:
datapath = curve.data_path[len(prefix):]
osglog.log("curve.data_path " + curve.data_path + " " + str(curve.array_index) + " " + datapath)
if datapath == "location":
has_location_keys = True
if datapath.startswith("rotation"):
has_rotation_keys = True
if datapath == "scale":
has_scale_keys = True
if not (has_location_keys or has_scale_keys or has_rotation_keys) and (len(obj.constraints) == 0):
return None
if zero:
if has_location_keys:
tr = StackedTranslateElement()
tr.translate = Vector()
callback.stacked_transforms.append(tr)
if has_rotation_keys:
if rotation_mode in ["XYZ", "XYZ", "XZY", "YXZ", "YZX", "ZXY", "ZYX"]:
rotation_keys = [StackedRotateAxisElement(name = "euler_x", axis = Vector((1,0,0)), angle = 0),
StackedRotateAxisElement(name = "euler_y", axis = Vector((0,1,0)), angle = 0),
StackedRotateAxisElement(name = "euler_z", axis = Vector((0,0,1)), angle = 0)]
callback.stacked_transforms.append(rotation_keys[ord(obj.rotation_mode[2]) - ord('X')])
callback.stacked_transforms.append(rotation_keys[ord(obj.rotation_mode[1]) - ord('X')])
callback.stacked_transforms.append(rotation_keys[ord(obj.rotation_mode[0]) - ord('X')])
if rotation_mode == "QUATERNION":
q = StackedQuaternionElement()
q.quaternion = Quaternion()
callback.stacked_transforms.append(q)
if rotation_mode == "AXIS_ANGLE":
callback.stacked_transforms.append(StackedRotateAxisElement(name = "axis_angle",
axis = Vector((1, 0, 0)),
angle = 0))
if has_scale_keys:
sc = StackedScaleElement()
sc.scale = Vector(obj.scale)
callback.stacked_transforms.append(sc)
else:
tr = StackedTranslateElement()
tr.translate = Vector(obj.location)
callback.stacked_transforms.append(tr)
if rotation_mode in ["XYZ", "XYZ", "XZY", "YXZ", "YZX", "ZXY", "ZYX"]:
rotation_keys = [StackedRotateAxisElement(name = "euler_x", axis = Vector((1,0,0)), angle = obj.rotation_euler[0]),
StackedRotateAxisElement(name = "euler_y", axis = Vector((0,1,0)), angle = obj.rotation_euler[1]),
StackedRotateAxisElement(name = "euler_z", axis = Vector((0,0,1)), angle = obj.rotation_euler[2])]
callback.stacked_transforms.append(rotation_keys[ord(obj.rotation_mode[2]) - ord('X')])
callback.stacked_transforms.append(rotation_keys[ord(obj.rotation_mode[1]) - ord('X')])
callback.stacked_transforms.append(rotation_keys[ord(obj.rotation_mode[0]) - ord('X')])
if rotation_mode == "QUATERNION":
q = StackedQuaternionElement()
q.quaternion = obj.rotation_quaternion
callback.stacked_transforms.append(q)
if rotation_mode == "AXIS_ANGLE":
callback.stacked_transforms.append(StackedRotateAxisElement(name = "axis_angle",
axis = Vector(obj.rotation_axis_angle[0:2]),
angle = obj.rotation_axis_angle[3]))
sc = StackedScaleElement()
sc.scale = Vector(obj.scale)
callback.stacked_transforms.append(sc)
return callback
def createAnimationsGenericObject(osg_object, blender_object, config, update_callback, unique_objects):
if (config.export_anim is False) or (update_callback is None) or (blender_object.animation_data is None):
return None
if unique_objects.hasAnimation(blender_object.animation_data.action):
return None
action2animation = BlenderAnimationToAnimation(object = blender_object,
config = config,
unique_objects = unique_objects)
anim = action2animation.createAnimation()
if len(anim) > 0:
osg_object.update_callbacks.append(update_callback)
return anim
def createAnimationMaterialAndSetCallback(osg_node, obj, config, unique_objects):
osglog.log("WARNING update material animation not yet supported")
return None
#return createAnimationsGenericObject(osg_node, obj, config, UpdateMaterial(), uniq_anims)
class UniqueObject(object):
def __init__(self):
self.statesets = {}
self.textures = {}
self.objects = {}
self.anims = {}
def hasAnimation(self, obj):
return obj in self.anims
def getAnimation(self, obj):
if self.hasAnimation(obj):
return self.anims[obj]
return None
def registerAnimation(self, obj, reg):
self.anims[obj] = reg
def hasObject(self, obj):
return obj in self.objects
def getObject(self, obj):
if self.hasObject(obj):
return self.objects[obj]
return None
def registerObject(self, obj, reg):
self.objects[obj] = reg
def hasTexture(self, obj):
return obj in self.textures
def getTexture(self, obj):
if self.hasTexture(obj):
return self.textures[obj]
return None
def registerTexture(self, obj, reg):
self.textures[obj] = reg
def hasStateSet(self, obj):
return obj in self.statesets
def getStateSet(self, obj):
if self.hasStateSet(obj):
return self.statesets[obj]
return None
def registerStateSet(self, obj, reg):
self.statesets[obj] = reg
class Export(object):
def __init__(self, config = None):
object.__init__(self)
self.items = []
self.config = config
if self.config is None:
self.config = osgconf.Config()
self.rest_armatures = []
self.animations = []
self.images = set()
self.lights = {}
self.root = None
self.unique_objects = UniqueObject()
def isValidToExport(self, object):
if object.name in self.config.exclude_objects:
return False
if self.config.only_visible:
if object.is_visible(self.config.scene):
return True
else:
return True
return False
def setArmatureInRestMode(self):
for arm in bpy.data.objects:
if arm.type == "ARMATURE":
print(arm)
if arm.data.pose_position == 'POSE':
arm.data.pose_position = 'REST'
self.rest_armatures.append(arm)
def restoreArmaturePoseMode(self):
for arm in self.rest_armatures:
arm.data.pose_position = 'POSE'
def exportItemAndChildren(self, obj):
item = self.exportChildrenRecursively(obj, None, None)
if item is not None:
self.items.append(item)
def evaluateGroup(self, obj, item, rootItem):
if obj.dupli_group is None or len(obj.dupli_group.objects) == 0:
return
osglog.log(str("resolving " + obj.dupli_group.name + " for " + obj.name + " offset " + str(obj.dupli_group.dupli_offset)) )
group = MatrixTransform()
group.matrix = Matrix.Translation(-obj.dupli_group.dupli_offset)
item.children.append(group)
# for group we disable the only visible
config_visible = self.config.only_visible
self.config.only_visible = False
for o in obj.dupli_group.objects:
osglog.log(str("object " + str(o)))
self.exportChildrenRecursively( o, group, rootItem)
self.config.only_visible = config_visible
# and restore it after processing group
def getName(self, obj):
if hasattr(obj, "name"):
return obj.name
return "no name"
def createAnimationsSkeletonObject(self, osg_object, blender_object):
if (self.config.export_anim is False) or (blender_object.animation_data == None) or (blender_object.animation_data.action == None):
return None
if self.unique_objects.hasAnimation(blender_object.animation_data.action):
return None
osglog.log("animation_data is %s %s" % (blender_object.name, blender_object.animation_data))
action2animation = BlenderAnimationToAnimation(object = blender_object, config = self.config, unique_objects = self.unique_objects)
osglog.log("animations created for object '%s'" % (blender_object.name))
anims = action2animation.createAnimation()
return anims
def createAnimationsObjectAndSetCallback(self, osg_object, blender_object):
return createAnimationsGenericObject(osg_object, blender_object, self.config,
createAnimationUpdate(blender_object, UpdateMatrixTransform(name=osg_object.name), blender_object.rotation_mode),
self.unique_objects)
def exportChildrenRecursively(self, obj, parent, rootItem):
if self.isValidToExport(obj) == False:
return None
osglog.log("")
anims = []
item = None
if self.unique_objects.hasObject(obj):
osglog.log(str("use referenced item for " + obj.name + " " + obj.type))
item = self.unique_objects.getObject(obj)
else:
osglog.log("Type of " + obj.name + " is " + obj.type)
if obj.type == "ARMATURE":
item = self.createSkeleton(obj)
anims = self.createAnimationsSkeletonObject(item, obj)
elif obj.type == "MESH" or obj.type == "EMPTY" or obj.type == "CAMERA":
# because it blender can insert inverse matrix, we have to recompute the parent child
# matrix for our use. Not if an armature we force it to be in rest position to compute
# matrix in the good space
matrix = getDeltaMatrixFrom(obj.parent, obj)
item = MatrixTransform()
item.setName(obj.name)
item.matrix = matrix.copy()
if self.config.zero_translations and parent == None:
if bpy.app.version[0] >= 2 and bpy.app.version[1] >= 62:
print("zero_translations option has not been converted to blender 2.62")
else:
item.matrix[3].xyz = Vector()
anims = self.createAnimationsObjectAndSetCallback(item, obj)
if obj.type == "MESH":
objectItem = self.createGeodeFromObject(obj)
item.children.append(objectItem)
else:
self.evaluateGroup(obj, item, rootItem)
elif obj.type == "LAMP":
matrix = getDeltaMatrixFrom(obj.parent, obj)
item = MatrixTransform()
item.setName(obj.name)
item.matrix = matrix
lightItem = self.createLight(obj)
anims = self.createAnimationsObjectAndSetCallback(item, obj)
item.children.append(lightItem)
else:
osglog.log(str("WARNING " + obj.name + " " + obj.type + " not exported"))
return None
self.unique_objects.registerObject(obj, item)
if anims != None:
self.animations += [a for a in anims if a != None]
if rootItem is None:
rootItem = item
if obj.parent_type == "BONE":
bone = findBoneInHierarchy(rootItem, obj.parent_bone)
if bone is None:
osglog.log(str("WARNING " + obj.parent_bone + " not found"))
else:
armature = obj.parent.data
original_pose_position = armature.pose_position
armature.pose_position = 'REST'
boneInWorldSpace = obj.parent.matrix_world * armature.bones[obj.parent_bone].matrix_local
matrix = getDeltaMatrixFromMatrix(boneInWorldSpace, obj.matrix_world)
item.matrix = matrix
bone.children.append(item)
armature.pose_position = original_pose_position
elif parent:
parent.children.append(item)
children = getChildrenOf(self.config.scene, obj)
for child in children:
self.exportChildrenRecursively(child, item, rootItem)
return item
def createSkeleton(self, obj):
osglog.log("processing Armature " + obj.name)
roots = getRootBonesList(obj.data)
matrix = getDeltaMatrixFrom(obj.parent, obj)
skeleton = Skeleton(obj.name, matrix)
for bone in roots:
b = Bone(obj, bone)
b.buildBoneChildren()
skeleton.children.append(b)
skeleton.collectBones()
return skeleton
def process(self):
# Object.resetWriter()
self.scene_name = self.config.scene.name
osglog.log("current scene %s" % self.scene_name)
if self.config.validFilename() is False:
self.config.filename += self.scene_name
self.config.createLogfile()
self.setArmatureInRestMode()
try:
if self.config.object_selected != None:
o = bpy.data.objects[self.config.object_selected]
try:
self.config.scene.objects.active = o
self.config.scene.objects.selected = [o]
except ValueError:
osglog.log("Error, problem happens when assigning object %s to scene %s" % (o.name, self.config.scene.name))
raise
for obj in self.config.scene.objects:
if (self.config.selected == "SELECTED_ONLY_WITH_CHILDREN" and obj.select) \
or (self.config.selected == "ALL" and obj.parent == None):
self.exportItemAndChildren(obj)
finally:
self.restoreArmaturePoseMode()
self.postProcess()
# OSG requires that rig geometry be a child of the skeleton,
# but Blender does not. Move any meshes that are modified by
# an armature to be under the armature.
def reparentRiggedGeodes(self, item, parent):
if isinstance(item, MatrixTransform) \
and len(item.children) == 1 \
and isinstance(item.children[0], Geode) \
and not isinstance(parent, Skeleton):
geode = item.children[0]
osglog.log("geode {}".format(geode.name))
# some blend files has a armature_modifier but a None object
# so we have to test armature_modifier and armature_modifier.object
if geode.armature_modifier != None and geode.armature_modifier.object:
parent.children.remove(item)
modifier_object = item.children[0].armature_modifier.object
arm = self.unique_objects.getObject(modifier_object)
for (k, v) in self.unique_objects.objects.items():
if v == item:
meshobj = k
item.matrix = getDeltaMatrixFromMatrix(item.children[0].armature_modifier.object.matrix_world, meshobj.matrix_world)
arm.children.append(item)
osglog.log("NOTICE: Reparenting {} to {}".format(geode.name, arm.name))
if hasattr(item, "children"):
for c in list(item.children):
self.reparentRiggedGeodes(c, item)
def postProcess(self):
# set only one root to the scene
self.root = None
self.root = Group()
self.root.setName("Root")
self.root.children = self.items
if len(self.animations) > 0:
animation_manager = BasicAnimationManager()
animation_manager.animations = self.animations
self.root.update_callbacks.append(animation_manager)
self.reparentRiggedGeodes(self.root, None)
# index light num for opengl use and enable them in a stateset
if len(self.lights) > 0:
st = StateSet()
self.root.stateset = st
if len(self.lights) > 8:
osglog.log("WARNING more than 8 lights")
# retrieve world to global ambient
lm = LightModel()
lm.ambient = (1.0, 1.0, 1.0, 1.0)
if self.config.scene.world is not None:
amb = self.config.scene.world.ambient_color
lm.ambient = (amb[0], amb[1], amb[2], 1.0)
st.attributes.append(lm)
# add by default
st.attributes.append(Material())
light_num = 0
for name, ls in self.lights.items():
ls.light.light_num = light_num
key = "GL_LIGHT{}".format(light_num)
st.modes[key] = "ON"
light_num += 1
for key in self.unique_objects.statesets.keys():
stateset = self.unique_objects.statesets[key]
if stateset is not None: # register images to unpack them at the end
images = getImageFilesFromStateSet(stateset)
for i in images:
self.images.add(i)
def write(self):
if len(self.items) == 0:
if self.config.log_file is not None:
self.config.closeLogfile()
return
filename = self.config.getFullName("osgt")
osglog.log("write file to " + filename)
with open(filename, "wb") as sfile:
#sfile.write(str(self.root).encode('utf-8'))
self.root.writeFile(sfile)
nativePath = os.path.join(os.path.abspath(self.config.getFullPath()), self.config.texture_prefix)
#blenderPath = bpy.path.relpath(nativePath)
if len(self.images) > 0:
try:
if not os.path.exists(nativePath):
os.mkdir(nativePath)
except:
osglog.log("can't create textures directory {}".format(nativePath))
raise
copied_images = []
for i in self.images:
if i is not None:
imagename = bpy.path.basename(createImageFilename("", i))
try:
if i.packed_file:
original_filepath = i.filepath_raw
try:
if len(imagename.split('.')) == 1:
imagename += ".png"
filename = os.path.join(nativePath, imagename)
if not os.path.exists(filename):
# record which images that were newly copied and can be safely
# cleaned up
copied_images.append(filename)
i.filepath_raw = filename
osglog.log("packed file, save it to {}".format(os.path.abspath(bpy.path.abspath(filename))))
i.save()
except:
osglog.log("failed to save file {} to {}".format(imagename, nativePath))
i.filepath_raw = original_filepath
else:
filepath = os.path.abspath(bpy.path.abspath(i.filepath))
texturePath = os.path.join(nativePath, imagename)
if os.path.exists(filepath):
if not os.path.exists(texturePath):
# record which images that were newly copied and can be safely
# cleaned up
copied_images.append(texturePath)
shutil.copy(filepath, texturePath)
osglog.log("copy file {} to {}".format(filepath, texturePath))
else:
osglog.log("file {} not available".format(filepath))
except Exception as e:
osglog.log("error while trying to copy file {} to {}: {}".format(imagename, nativePath, str(e)))
filetoview = self.config.getFullName("osgt")
if self.config.osgconv_to_ive:
if self.config.osgconv_embed_textures:
r = [self.config.osgconv_path, "-O", "includeImageFileInIVEFile", self.config.getFullName("osgt"), self.config.getFullName("ive")]
else:
r = [self.config.osgconv_path, "-O", "noTexturesInIVEFile", self.config.getFullName("osgt"), self.config.getFullName("ive")]
try:
if subprocess.call(r) == 0:
filetoview = self.config.getFullName("ive")
if self.config.osgconv_cleanup:
os.unlink(self.config.getFullName("osgt"))
if self.config.osgconv_embed_textures:
for i in copied_images:
os.unlink(i)
except Exception as e:
print("Error running " + str(r))
print(repr(e))
if self.config.run_viewer:
r = [self.config.viewer_path, filetoview]
try:
subprocess.Popen(r)
except Exception as e:
print("Error running " + str(r))
print(repr(e))
if self.config.log_file is not None:
self.config.closeLogfile()
def createGeodeFromObject(self, mesh, skeleton = None):
osglog.log("exporting object " + mesh.name)
# check if the mesh has a armature modifier
# if no we don't write influence
exportInfluence = False
#if mesh.parent and mesh.parent.type == "ARMATURE":
# exportInfluence = True
armature_modifier = None
has_non_armature_modifiers = False
for mod in mesh.modifiers:
if mod.type == "ARMATURE":
armature_modifier = mod
else:
has_non_armature_modifiers = True
if armature_modifier != None:
exportInfluence = True
if self.config.apply_modifiers and has_non_armature_modifiers:
mesh_object = mesh.to_mesh(self.config.scene, True, 'PREVIEW')
else:
mesh_object = mesh.data
osglog.log("mesh_object is " + mesh_object.name)
if self.unique_objects.hasObject(mesh_object):
return self.unique_objects.getObject(mesh_object)
hasVertexGroup = False
for vertex in mesh_object.vertices:
if len(vertex.groups) > 0:
hasVertexGroup = True
break
geometries = []
converter = BlenderObjectToGeometry(object = mesh, mesh = mesh_object,
config = self.config,
unique_objects = self.unique_objects)
sources_geometries = converter.convert()
osglog.log("vertex groups %s %s " % (exportInfluence, hasVertexGroup))
if exportInfluence and hasVertexGroup:
for geom in sources_geometries:
rig_geom = RigGeometry()
rig_geom.sourcegeometry = geom
rig_geom.copyFrom(geom)
rig_geom.groups = geom.groups
geometries.append(rig_geom)
else:
geometries = sources_geometries
geode = Geode()
geode.setName(mesh_object.name)
geode.armature_modifier = armature_modifier
if len(geometries) > 0:
for geom in geometries:
geode.drawables.append(geom)
for name in converter.material_animations.keys():
self.animations.append(converter.material_animations[name])
self.unique_objects.registerObject(mesh_object, geode)
return geode
def createLight(self, obj):
converter = BlenderLightToLightSource(lamp=obj)
lightsource = converter.convert()
self.lights[lightsource.name] = lightsource # will be used to index lightnum at the end
return lightsource
class BlenderLightToLightSource(object):
def __init__(self, *args, **kwargs):
self.object = kwargs["lamp"]
self.lamp = self.object.data
def convert(self):
ls = LightSource()
ls.setName(self.object.name)
light = ls.light
energy = self.lamp.energy
light.ambient = (1.0, 1.0, 1.0, 1.0)
if self.lamp.use_diffuse:
light.diffuse = (self.lamp.color[0] * energy, self.lamp.color[1]* energy, self.lamp.color[2] * energy,1.0)
else:
light.diffuse = (0, 0, 0, 1.0)
if self.lamp.use_specular:
light.specular = (energy, energy, energy, 1.0) #light.diffuse
else:
light.specular = (0, 0, 0, 1.0)
light.getOrCreateUserData().append(StringValueObject("source", "blender"))
light.getOrCreateUserData().append(StringValueObject("Energy", str(energy)))
light.getOrCreateUserData().append(StringValueObject("Color", "[ %f, %f, %f ]" % (self.lamp.color[0], self.lamp.color[1], self.lamp.color[2])))
if self.lamp.use_diffuse:
light.getOrCreateUserData().append(StringValueObject("UseDiffuse", "true"))
else:
light.getOrCreateUserData().append(StringValueObject("UseDiffuse", "false"))
if self.lamp.use_specular:
light.getOrCreateUserData().append(StringValueObject("UseSpecular", "true"))
else:
light.getOrCreateUserData().append(StringValueObject("UseSpecular", "false"))
light.getOrCreateUserData().append(StringValueObject("Distance", str(self.lamp.distance)))
if self.lamp.type == 'POINT' or self.lamp.type == "SPOT":
light.getOrCreateUserData().append(StringValueObject("FalloffType", str(self.lamp.falloff_type)))
light.getOrCreateUserData().append(StringValueObject("UseSphere", (str(self.lamp.use_sphere)).lower()))
light.getOrCreateUserData().append(StringValueObject("Type", (str(self.lamp.type))))
# Lamp', 'Sun', 'Spot', 'Hemi', 'Area', or 'Photon
if self.lamp.type == 'POINT' or self.lamp.type == 'SPOT':
# position light
# Note DW - the distance may not be necessary anymore (blender 2.5)
light.position = (0,0,0,1) # put light to vec3(0) it will inherit the position from parent transform
light.linear_attenuation = self.lamp.linear_attenuation / self.lamp.distance
light.quadratic_attenuation = self.lamp.quadratic_attenuation / self.lamp.distance
if self.lamp.falloff_type == 'CONSTANT':
light.quadratic_attenuation = 0
light.linear_attenuation = 0
if self.lamp.falloff_type == 'INVERSE_SQUARE':
light.constant_attenuation = 0
light.linear_attenuation = 0
if self.lamp.falloff_type == 'INVERSE_LINEAR':
light.constant_attenuation = 0
light.quadratic_attenuation = 0
elif self.lamp.type == 'SUN':
light.position = (0,0,1,0) # put light to 0 it will inherit the position from parent transform
if self.lamp.type == 'SPOT':
light.spot_cutoff = math.degrees(self.lamp.spot_size * .5)
if light.spot_cutoff > 90:
light.spot_cutoff = 180
light.spot_exponent = 128.0 * self.lamp.spot_blend
light.getOrCreateUserData().append(StringValueObject("SpotSize", str(self.lamp.spot_size)))
light.getOrCreateUserData().append(StringValueObject("SpotBlend", str(self.lamp.spot_blend)))
return ls
class BlenderObjectToGeometry(object):
def __init__(self, *args, **kwargs):
self.object = kwargs["object"]
self.config = kwargs.get("config", None)
if not self.config:
self.config = osgconf.Config()
self.unique_objects = kwargs.get("unique_objects", {})
self.geom_type = Geometry
self.mesh = kwargs.get("mesh", None)
#if self.config.apply_modifiers is False:
# self.mesh = self.object.data
#else:
# self.mesh = self.object.to_mesh(self.config.scene, True, 'PREVIEW')
self.material_animations = {}
def createTexture2D(self, mtex):
image_object = None
try:
image_object = mtex.texture.image
except:
image_object = None
if image_object is None:
osglog.log("WARNING the texture %s has no Image, skip it" % str(mtex))
return None
if self.unique_objects.hasTexture(mtex.texture):
return self.unique_objects.getTexture(mtex.texture)
texture = Texture2D()
texture.name = mtex.texture.name
# reference texture relative to export path
filename = createImageFilename(self.config.texture_prefix, image_object)
texture.file = filename
texture.source_image = image_object
self.unique_objects.registerTexture(mtex.texture, texture)
return texture
def adjustUVLayerFromMaterial(self, geom, material, mesh_uv_textures):
uvs = geom.uvs
if DEBUG: osglog.log("geometry uvs %s" % (str(uvs)))
geom.uvs = {}
texture_list = material.texture_slots
if DEBUG: osglog.log("texture list %d - %s" % (len(texture_list), str(texture_list)))
# find a default channel if exist uv
default_uv = None
default_uv_key = None
if (len(mesh_uv_textures)) > 0:
default_uv_key = mesh_uv_textures[0].name
default_uv = uvs[default_uv_key]
#default_uv_key, default_uv = uvs.popitem()
if DEBUG: osglog.log("default uv key %s" % str(default_uv_key))
for i in range(0, len(texture_list)):
texture_slot = texture_list[i]
if texture_slot is not None:
uv_layer = texture_slot.uv_layer
if DEBUG: osglog.log("uv layer %s" % str(uv_layer))
if len(uv_layer) > 0 and not uv_layer in uvs.keys():
osglog.log("WARNING your material '%s' with texture '%s' use an uv layer '%s' that does not exist on the mesh '%s', use the first uv channel as fallback" % (material.name, texture_slot, uv_layer, geom.name))
if len(uv_layer) > 0 and uv_layer in uvs.keys():
if DEBUG: osglog.log("texture %s use uv layer %s" % (i, uv_layer))
geom.uvs[i] = TexCoordArray()
geom.uvs[i].array = uvs[uv_layer].array
geom.uvs[i].index = i
elif default_uv:
if DEBUG: osglog.log("texture %s use default uv layer %s" % (i, default_uv_key))
geom.uvs[i] = TexCoordArray()
geom.uvs[i].index = i
geom.uvs[i].array = default_uv.array
# adjust uvs channels if no textures assigned
if len(geom.uvs.keys()) == 0:
if DEBUG: osglog.log("no texture set, adjust uvs channels, in arbitrary order")
index = 0
for k in uvs.keys():
uvs[k].index = index
index += 1
geom.uvs = uvs
return
def createStateSet(self, index_material, mesh, geom):
if len(mesh.materials) == 0:
return None
mat_source = mesh.materials[index_material]
if self.unique_objects.hasStateSet(mat_source):
return self.unique_objects.getStateSet(mat_source)
if mat_source is None:
return None
s = StateSet()
s.dataVariance = "DYNAMIC"
self.unique_objects.registerStateSet(mat_source, s)
m = Material()
m.dataVariance = "DYNAMIC"
m.setName(mat_source.name)
s.setName(mat_source.name)
#bpy.ops.object.select_name(name=self.object.name)
anim = createAnimationMaterialAndSetCallback(m, mat_source, self.config, self.unique_objects)
if anim :
self.material_animations[anim.name] = anim
if mat_source.use_shadeless:
s.modes["GL_LIGHTING"] = "OFF"
alpha = 1.0
if mat_source.use_transparency:
alpha = 1.0 - mat_source.alpha
refl = mat_source.diffuse_intensity
# we premultiply color with intensity to have rendering near blender for opengl fixed pipeline
m.diffuse = (mat_source.diffuse_color[0] * refl, mat_source.diffuse_color[1] * refl, mat_source.diffuse_color[2] * refl, alpha)
m.getOrCreateUserData().append(StringValueObject("source", "blender"))
m.getOrCreateUserData().append(StringValueObject("DiffuseIntensity", str(mat_source.diffuse_intensity)))
m.getOrCreateUserData().append(StringValueObject("DiffuseColor", "[ %f, %f, %f ]" % (mat_source.diffuse_color[0], mat_source.diffuse_color[1], mat_source.diffuse_color[2])))
m.getOrCreateUserData().append(StringValueObject("SpecularIntensity", str(mat_source.specular_intensity)))
#print ("%s SpecularIntensity %s" % (m.name, str(mat_source.specular_intensity)))
m.getOrCreateUserData().append(StringValueObject("SpecularColor", "[ %f, %f, %f ]" % (mat_source.specular_color[0], mat_source.specular_color[1], mat_source.specular_color[2])))
m.getOrCreateUserData().append(StringValueObject("SpecularHardness", str(mat_source.specular_hardness)))
if mat_source.use_shadeless:
m.getOrCreateUserData().append(StringValueObject("Shadeless", "true"))
else:
m.getOrCreateUserData().append(StringValueObject("Emit", str(mat_source.emit)))
m.getOrCreateUserData().append(StringValueObject("Ambient", str(mat_source.ambient)))
m.getOrCreateUserData().append(StringValueObject("Translucency", str(mat_source.translucency)))
m.getOrCreateUserData().append(StringValueObject("DiffuseShader", str(mat_source.diffuse_shader)))
m.getOrCreateUserData().append(StringValueObject("SpecularShader", str(mat_source.specular_shader)))
if mat_source.use_transparency:
m.getOrCreateUserData().append(StringValueObject("Transparency", str("true")))
m.getOrCreateUserData().append(StringValueObject("TransparencyMethod", str(mat_source.transparency_method)))
if mat_source.diffuse_shader == "TOON":
m.getOrCreateUserData().append(StringValueObject("DiffuseToonSize", str(mat_source.diffuse_toon_size)))
m.getOrCreateUserData().append(StringValueObject("DiffuseToonSmooth", str(mat_source.diffuse_toon_smooth)))
if mat_source.diffuse_shader == "OREN_NAYAR":
m.getOrCreateUserData().append(StringValueObject("Roughness", str(mat_source.roughness)))
if mat_source.diffuse_shader == "MINNAERT":
m.getOrCreateUserData().append(StringValueObject("Darkness", str(mat_source.roughness)))
if mat_source.diffuse_shader == "FRESNEL":
m.getOrCreateUserData().append(StringValueObject("DiffuseFresnel", str(mat_source.diffuse_fresnel)))
m.getOrCreateUserData().append(StringValueObject("DiffuseFresnelFactor", str(mat_source.diffuse_fresnel_factor)))
# specular
if mat_source.specular_shader == "TOON":
m.getOrCreateUserData().append(StringValueObject("SpecularToonSize", str(mat_source.specular_toon_size)))
m.getOrCreateUserData().append(StringValueObject("SpecularToonSmooth", str(mat_source.specular_toon_smooth)))
if mat_source.specular_shader == "WARDISO":
m.getOrCreateUserData().append(StringValueObject("SpecularSlope", str(mat_source.specular_slope)))
if mat_source.specular_shader == "BLINN":
m.getOrCreateUserData().append(StringValueObject("SpecularIor", str(mat_source.specular_ior)))
# if alpha not 1 then we set the blending mode on
if DEBUG: osglog.log("state material alpha %s" % str(alpha))
if alpha != 1.0:
s.modes["GL_BLEND"] = "ON"
ambient_factor = mat_source.ambient
if bpy.context.scene.world:
m.ambient =((bpy.context.scene.world.ambient_color[0])*ambient_factor,
(bpy.context.scene.world.ambient_color[1])*ambient_factor,
(bpy.context.scene.world.ambient_color[2])*ambient_factor,
1.0)
else:
m.ambient = (0, 0, 0, 1.0)
# we premultiply color with intensity to have rendering near blender for opengl fixed pipeline
spec = mat_source.specular_intensity
m.specular = (mat_source.specular_color[0] * spec, mat_source.specular_color[1] * spec, mat_source.specular_color[2] * spec, 1)
emissive_factor = mat_source.emit
m.emission = (mat_source.diffuse_color[0] * emissive_factor, mat_source.diffuse_color[1] * emissive_factor, mat_source.diffuse_color[2] * emissive_factor, 1)
m.shininess = (mat_source.specular_hardness / 512.0) * 128.0
s.attributes.append(m)
texture_list = mat_source.texture_slots
if DEBUG: osglog.log("texture list %s" % str(texture_list))
if len(texture_list) > 0:
userData = s.getOrCreateUserData()
userData.append(StringValueObject("source", "blender"))
for i in range(0, len(texture_list)):
texture_slot = texture_list[i]
if texture_slot is None:
continue
t = self.createTexture2D(texture_list[i])
if DEBUG: osglog.log("texture %s %s" % (i, texture_list[i]))
if t is None:
continue
def premultAlpha(texture_slot, i, userData):
if texture_slot.texture and texture_slot.texture.image and texture_slot.texture.image.use_premultiply:
v = "false"
if texture_slot.texture.image.use_premultiply:
v = "true"
userData.append(StringValueObject("%02d_UsePremultiplyAlpha" % i, v))
def useAlpha(texture_slot, i, userData):
if texture_slot.texture and texture_slot.texture.use_alpha:
v = "true"
userData.append(StringValueObject("%02d_UseAlpha" % i, v))
userData = s.getOrCreateUserData()
# use texture as diffuse
if texture_slot.use_map_diffuse:
premultAlpha(texture_slot, i, userData)
useAlpha(texture_slot, i, userData)
userData.append(StringValueObject("%02d_DiffuseIntensity" % i, str(texture_slot.diffuse_factor)))
if texture_slot.use_map_color_diffuse:
premultAlpha(texture_slot, i, userData)
useAlpha(texture_slot, i, userData)
userData.append(StringValueObject("%02d_DiffuseColor" % i, str(texture_slot.diffuse_color_factor)))
if texture_slot.use_map_alpha:
premultAlpha(texture_slot, i, userData)
useAlpha(texture_slot, i, userData)
userData.append(StringValueObject("%02d_Alpha" % i, str(texture_slot.alpha_factor)))
if texture_slot.use_map_translucency:
premultAlpha(texture_slot, i, userData)
useAlpha(texture_slot, i, userData)
userData.append(StringValueObject("%02d_Translucency" % i, str(texture_slot.translucency_factor)))
# use texture as specular
if texture_slot.use_map_specular:
premultAlpha(texture_slot, i, userData)
useAlpha(texture_slot, i, userData)
userData.append(StringValueObject("%02d_SpecularIntensity" % i, str(texture_slot.specular_factor)))
if texture_slot.use_map_color_spec:
premultAlpha(texture_slot, i, userData)
useAlpha(texture_slot, i, userData)
userData.append(StringValueObject("%02d_SpecularColor" % i, str(texture_slot.specular_color_factor)))
# mirror
if texture_slot.use_map_mirror:
premultAlpha(texture_slot, i, userData)
useAlpha(texture_slot, i, userData)
userData.append(StringValueObject("%02d_Mirror" % i, str(texture_slot.mirror_factor)))
# use texture as normalmap
if texture_slot.use_map_normal:
premultAlpha(texture_slot, i, userData)
useAlpha(texture_slot, i, userData)
userData.append(StringValueObject("%02d_Normal" % i, str(texture_slot.normal_factor)))
if texture_slot.use_map_ambient:
premultAlpha(texture_slot, i, userData)
useAlpha(texture_slot, i, userData)
userData.append(StringValueObject("%02d_Ambient" % i, str(texture_slot.ambient_factor)))
if texture_slot.use_map_emit:
premultAlpha(texture_slot, i, userData)
useAlpha(texture_slot, i, userData)
userData.append(StringValueObject("%02d_Emit" % i, str(texture_slot.emit_factor)))
# use blend
userData.append(StringValueObject("%02d_BlendType" % i, str(texture_slot.blend_type)))
if not i in s.texture_attributes.keys():
s.texture_attributes[i] = []
s.texture_attributes[i].append(t)
try:
if t.source_image.getDepth() > 24: # there is an alpha
s.modes["GL_BLEND"] = "ON"
except:
pass
# happens for all generated textures
#log("can't read the source image file for texture %s" % t.name)
#if DEBUG: osglog.log("state set %s" % str(s))
return s
def createGeomForMaterialIndex(self, material_index, mesh):
geom = Geometry()
geom.groups = {}
if bpy.app.version[0] >= 2 and bpy.app.version[1] >= 63:
faces = mesh.tessfaces
else:
faces = mesh.faces
if (len(faces) == 0):
osglog.log("object %s has no faces, so no materials" % self.object.name)
return None
if len(mesh.materials) and mesh.materials[material_index] != None:
material_name = mesh.materials[material_index].name
title = "mesh %s with material %s" % (self.object.name, material_name)
else:
title = "mesh %s without material" % (self.object.name)
osglog.log(title)
vertexes = []
collected_faces = []
for face in faces:
if face.material_index != material_index:
continue
f = []
if DEBUG: fdebug = []
for vertex in face.vertices:
index = len(vertexes)
vertexes.append(mesh.vertices[vertex])
f.append(index)
if DEBUG: fdebug.append(vertex)
if DEBUG: osglog.log("true face %s" % str(fdebug))
if DEBUG: osglog.log("face %s" % str(f))
collected_faces.append((face,f))
if (len(collected_faces) == 0):
osglog.log("object %s has no faces for sub material slot %s" % (self.object.name, str(material_index)))
end_title = '-' * len(title)
osglog.log(end_title)
return None
# colors = {}
# if mesh.vertex_colors:
# names = mesh.getColorLayerNames()
# backup_name = mesh.activeColorLayer
# for name in names:
# mesh.activeColorLayer = name
# mesh.update()
# color_array = []
# for face,f in collected_faces:
# for i in range(0, len(face.vertices)):
# color_array.append(face.col[i])
# colors[name] = color_array
# mesh.activeColorLayer = backup_name
# mesh.update()
colors = {}
vertex_colors = None
if bpy.app.version[0] >= 2 and bpy.app.version[1] >= 63:
vertex_colors = mesh.tessface_vertex_colors
else:
vertex_colors = mesh.vertex_colors
if vertex_colors:
backupColor = None
for colorLayer in vertex_colors:
if colorLayer.active:
backupColor = colorLayer
for colorLayer in vertex_colors:
idx = 0
colorLayer.active= True
#mesh.update()
color_array = []
for data in colorLayer.data:
color_array.append(data.color1)
color_array.append(data.color2)
color_array.append(data.color3)
# DW - how to tell if this is a tri or a quad?
if len(faces[idx].vertices) > 3:
color_array.append(data.color4)
idx += 1
colors[colorLayer.name] = color_array
backupColor.active = True
#mesh.update()
# uvs = {}
# if mesh.faceUV:
# names = mesh.getUVLayerNames()
# backup_name = mesh.activeUVLayer
# for name in names:
# mesh.activeUVLayer = name
# mesh.update()
# uv_array = []
# for face,f in collected_faces:
# for i in range(0, len(face.vertices)):
# uv_array.append(face.uv[i])
# uvs[name] = uv_array
# mesh.activeUVLayer = backup_name
# mesh.update()
uv_textures = None
if bpy.app.version[0] >= 2 and bpy.app.version[1] >= 63:
uv_textures = mesh.tessface_uv_textures
else:
uv_textures = mesh.uv_textures
uvs = {}
if uv_textures:
backup_texture = None
for textureLayer in uv_textures:
if textureLayer.active:
backup_texture = textureLayer
for textureLayer in uv_textures:
textureLayer.active = True
#mesh.update()
uv_array = []
for face,f in collected_faces:
data = textureLayer.data[face.index]
uv_array.append(data.uv1)
uv_array.append(data.uv2)
uv_array.append(data.uv3)
if len(face.vertices) > 3:
uv_array.append(data.uv4)
uvs[textureLayer.name] = uv_array
backup_texture.active = True
#mesh.update()
normals = []
for face,f in collected_faces:
if face.use_smooth:
for vert in face.vertices:
normals.append(mesh.vertices[vert].normal)
else:
for vert in face.vertices:
normals.append(face.normal)
mapping_vertexes = []
merged_vertexes = []
tagged_vertexes = []
for i in range(0,len(vertexes)):
merged_vertexes.append(i)
tagged_vertexes.append(False)
def truncateFloat(value, digit = 5):
return round(value, digit)
def truncateVector(vector, digit = 5):
for i in range(0,len(vector)):
vector[i] = truncateFloat(vector[i], digit)
return vector
def get_vertex_key(index):
return (
(truncateFloat(vertexes[index].co[0]), truncateFloat(vertexes[index].co[1]), truncateFloat(vertexes[index].co[2])),
(truncateFloat(normals[index][0]), truncateFloat(normals[index][1]), truncateFloat(normals[index][2])),
tuple([tuple(truncateVector(uvs[x][index])) for x in uvs.keys()]))
# vertex color not supported
#tuple([tuple(truncateVector(colors[x][index])) for x in colors.keys()]))
# Build a dictionary of indexes to all the vertexes that
# are equal.
vertex_dict = {}
for i in range(0, len(vertexes)):
key = get_vertex_key(i)
if DEBUG: osglog.log("key %s" % str(key))
if key in vertex_dict.keys():
vertex_dict[key].append(i)
else:
vertex_dict[key] = [i]
for i in range(0, len(vertexes)):
if tagged_vertexes[i] is True: # avoid processing more than one time a vertex
continue
index = len(mapping_vertexes)
merged_vertexes[i] = index
mapping_vertexes.append([i])
if DEBUG: osglog.log("process vertex %s" % i)
vertex_indexes = vertex_dict[get_vertex_key(i)]
for j in vertex_indexes:
if j <= i:
continue
if tagged_vertexes[j] is True: # avoid processing more than one time a vertex
continue
if DEBUG: osglog.log(" vertex %s is the same" % j)
merged_vertexes[j] = index
tagged_vertexes[j] = True
mapping_vertexes[index].append(j)
if DEBUG:
for i in range(0, len(mapping_vertexes)):
osglog.log("vertex %s contains %s" % (str(i), str(mapping_vertexes[i])))
if len(mapping_vertexes) != len(vertexes):
osglog.log("vertexes reduced from %s to %s" % (str(len(vertexes)),len(mapping_vertexes)))
else:
osglog.log("vertexes %s" % str(len(vertexes)))
faces = []
for (original, face) in collected_faces:
f = []
if DEBUG: fdebug = []
for v in face:
f.append(merged_vertexes[v])
if DEBUG: fdebug.append(vertexes[mapping_vertexes[merged_vertexes[v]][0]].index)
faces.append(f)
if DEBUG: osglog.log("new face %s" % str(f))
if DEBUG: osglog.log("true face %s" % str(fdebug))
osglog.log("faces %s" % str(len(faces)))
vgroups = {}
original_vertexes2optimized = {}
for i in range(0, len(mapping_vertexes)):
for k in mapping_vertexes[i]:
index = vertexes[k].index
if not index in original_vertexes2optimized.keys():
original_vertexes2optimized[index] = []
original_vertexes2optimized[index].append(i)
# for i in mesh.getVertGroupNames():
# verts = {}
# for idx, weight in mesh.getVertsFromGroup(i, 1):
# if weight < 0.001:
# log( "WARNING " + str(idx) + " to has a weight too small (" + str(weight) + "), skipping vertex")
# continue
# if idx in original_vertexes2optimized.keys():
# for v in original_vertexes2optimized[idx]:
# if not v in verts.keys():
# verts[v] = weight
# #verts.append([v, weight])
# if len(verts) == 0:
# log( "WARNING " + str(i) + " has not vertexes, skip it, if really unsued you should clean it")
# else:
# vertex_weight_list = [ list(e) for e in verts.items() ]
# vg = VertexGroup()
# vg.targetGroupName = i
# vg.vertexes = vertex_weight_list
# vgroups[i] = vg
#blenObject = None
#for obj in bpy.context.blend_data.objects:
# if obj.data == mesh:
# blenObject = obj
for vertex_group in self.object.vertex_groups:
#osglog.log("Look at vertex group: " + repr(vertex_group))
verts = {}
for idx in range(0, len(mesh.vertices)):
weight = 0
for vg in mesh.vertices[idx].groups:
if vg.group == vertex_group.index:
weight = vg.weight
if weight >= 0.001:
if idx in original_vertexes2optimized.keys():
for v in original_vertexes2optimized[idx]:
if not v in verts.keys():
verts[v] = weight
if len(verts) == 0:
osglog.log( "WARNING group has no vertexes, skip it, if really unsued you should clean it")
else:
vertex_weight_list = [ list(e) for e in verts.items() ]
vg = VertexGroup()
vg.targetGroupName = vertex_group.name
vg.vertexes = vertex_weight_list
vgroups[vertex_group.name] = vg
if (len(vgroups)):
osglog.log("vertex groups %s" % str(len(vgroups)))
geom.groups = vgroups
osg_vertexes = VertexArray()
osg_normals = NormalArray()
osg_uvs = {}
#osg_colors = {}
for vertex in mapping_vertexes:
vindex = vertex[0]
coord = vertexes[vindex].co
osg_vertexes.getArray().append([coord[0], coord[1], coord[2] ])
ncoord = normals[vindex]
osg_normals.getArray().append([ncoord[0], ncoord[1], ncoord[2]])
for name in uvs.keys():
if not name in osg_uvs.keys():
osg_uvs[name] = TexCoordArray()
osg_uvs[name].getArray().append(uvs[name][vindex])
if (len(osg_uvs)):
osglog.log("uvs channels %s - %s" % (len(osg_uvs), str(osg_uvs.keys())))
nlin = 0
ntri = 0
nquad = 0
# counting number of lines, triangles and quads
for face in faces:
nv = len(face)
if nv == 2:
nlin = nlin + 1
elif nv == 3:
ntri = ntri + 1
elif nv == 4:
nquad = nquad + 1
else:
osglog.log("WARNING can't manage faces with %s vertices" % nv)
# counting number of primitives (one for lines, one for triangles and one for quads)
numprims = 0
if (nlin > 0):
numprims = numprims + 1
if (ntri > 0):
numprims = numprims + 1
if (nquad > 0):
numprims = numprims + 1
# Now we write each primitive
primitives = []
if nlin > 0:
lines = DrawElements()
lines.type = "GL_LINES"
nface=0
for face in faces:
nv = len(face)
if nv == 2:
lines.indexes.append(face[0])
lines.indexes.append(face[1])
nface = nface + 1
primitives.append(lines)
if ntri > 0:
triangles = DrawElements()
triangles.type = "GL_TRIANGLES"
nface=0
for face in faces:
nv = len(face)
if nv == 3:
triangles.indexes.append(face[0])
triangles.indexes.append(face[1])
triangles.indexes.append(face[2])
nface = nface + 1
primitives.append(triangles)
if nquad > 0:
quads = DrawElements()
quads.type = "GL_QUADS"
nface=0
for face in faces:
nv = len(face)
if nv == 4:
quads.indexes.append(face[0])
quads.indexes.append(face[1])
quads.indexes.append(face[2])
quads.indexes.append(face[3])
nface = nface + 1
primitives.append(quads)
geom.uvs = osg_uvs
#geom.colors = osg_colors
geom.vertexes = osg_vertexes
geom.normals = osg_normals
geom.primitives = primitives
geom.setName(self.object.name)
stateset = self.createStateSet(material_index, mesh, geom)
if stateset is not None:
geom.stateset = stateset
if len(mesh.materials) > 0 and mesh.materials[material_index] is not None:
self.adjustUVLayerFromMaterial(geom, mesh.materials[material_index], uv_textures)
end_title = '-' * len(title)
osglog.log(end_title)
return geom
def process(self, mesh):
if bpy.app.version[0] >= 2 and bpy.app.version[1] >= 63:
mesh.update(calc_tessface=True)
geometry_list = []
material_index = 0
if len(mesh.materials) == 0:
geom = self.createGeomForMaterialIndex(0, mesh)
if geom is not None:
geometry_list.append(geom)
else:
for material in mesh.materials:
geom = self.createGeomForMaterialIndex(material_index, mesh)
if geom is not None:
geometry_list.append(geom)
material_index += 1
return geometry_list
def convert(self):
# looks like this was dropped
# if self.mesh.vertexUV:
# osglog.log("WARNING mesh %s use sticky UV and it's not supported" % self.object.name)
list = self.process(self.mesh)
return list
class BlenderObjectToRigGeometry(BlenderObjectToGeometry):
def __init__(self, *args, **kwargs):
BlenderObjectToGeometry.__init__(self, *args, **kwargs)
self.geom_type = RigGeometry
class BlenderAnimationToAnimation(object):
def __init__(self, *args, **kwargs):
self.config = kwargs["config"]
self.object = kwargs.get("object", None)
self.unique_objects = kwargs.get("unique_objects", {})
self.animations = None
self.action = None
self.action_name = None
def handleAnimationBaking(self):
need_bake = False
if hasattr(self.object, "constraints") and (len(self.object.constraints) > 0) and self.config.bake_constraints:
osglog.log("Baking constraints " + str(self.object.constraints))
need_bake = True
else:
if hasattr(self.object, "animation_data") and hasattr(self.object.animation_data, "action"):
self.action = self.object.animation_data.action
for fcu in self.action.fcurves:
for kf in fcu.keyframe_points:
if kf.interpolation != 'LINEAR':
need_bake = True
if need_bake:
self.action = osgbake.bake(self.config.scene,
self.object,
self.config.scene.frame_start,
self.config.scene.frame_end,
self.config.bake_frame_step,
False, #only_selected
True, #do_pose
True, #do_object
False, #do_constraint_clear
False) #to_quat
def createAnimation(self, target = None):
osglog.log("Exporting animation on object " + str(self.object))
if hasattr(self.object, "animation_data") and hasattr(self.object.animation_data, "action") and self.object.animation_data.action != None:
self.action_name = self.object.animation_data.action.name
self.handleAnimationBaking()
if target == None:
target = self.object.name
anim = self.createAnimationFromAction(target, self.action_name, self.action)
self.unique_objects.registerAnimation(self.action, anim)
return [anim]
def createAnimationFromAction(self, target, name, action):
animation = Animation()
animation.setName(name)
if self.object.type == "ARMATURE":
for bone in self.object.data.bones:
bname = bone.name
osglog.log("%s processing channels for bone %s" % (name, bname))
self.appendChannelsToAnimation(bname, animation, action, prefix=('pose.bones["%s"].' % (bname)))
else:
self.appendChannelsToAnimation(target, animation, action)
return animation
def appendChannelsToAnimation(self, target, anim, action, prefix = ""):
channels = exportActionsToKeyframeSplitRotationTranslationScale(target, action, self.config.anim_fps, prefix)
for i in channels:
anim.channels.append(i)
def getChannel(target, action, fps, data_path, array_indexes):
times = []
duration = 0
fcurves = []
for array_index in array_indexes:
for fcurve in action.fcurves:
#osglog.log("fcurves %s %s matches %s %s " %(fcurve.data_path, fcurve.array_index, data_path, array_index))
if fcurve.data_path == data_path and fcurve.array_index == array_index:
fcurves.append(fcurve)
#osglog.log("yes")
if len(fcurves) == 0:
return None
for fcurve in fcurves:
for keyframe in fcurve.keyframe_points:
if times.count(keyframe.co[0]) == 0:
times.append(keyframe.co[0])
if len(times) == 0:
return None
channel = Channel()
channel.target = target
if len(array_indexes) == 1:
channel.type = "FloatLinearChannel"
if len(array_indexes) == 3:
channel.type = "Vec3LinearChannel"
if len(array_indexes) == 4:
channel.type = "QuatSphericalLinearChannel"
times.sort()
for time in times:
realtime = (time) / fps
osglog.log("time {} {} {}".format(time, realtime, fps))
# realtime = time
if realtime > duration:
duration = realtime
value = [realtime]
for fcurve in fcurves:
value.append(fcurve.evaluate(time))
channel.keys.append(value)
return channel
# as for blender 2.49
def exportActionsToKeyframeSplitRotationTranslationScale(target, action, fps, prefix):
channels = []
translate = getChannel(target, action, fps, prefix+"location", [0, 1, 2])
if translate:
translate.setName("translate")
channels.append(translate)
euler = []
eulerName = [ "euler_x", "euler_y", "euler_z"]
for i in range(0,3):
c = getChannel(target, action, fps, prefix+"rotation_euler", [i])
if c:
c.setName(eulerName[i])
channels.append(c)
quaternion = getChannel(target, action, fps, prefix+"rotation_quaternion", [1, 2, 3, 0])
if quaternion:
quaternion.setName("quaternion")
channels.append(quaternion)
axis_angle = getChannel(target, action, fps, prefix+"rotation_axis_angle", [1, 2, 3, 0])
if axis_angle:
axis_angle.setName("axis_angle")
channels.append(axis_angle)
scale = getChannel(target, action, fps, prefix+"scale", [0, 1, 2])
if scale:
scale.setName("scale")
channels.append(scale)
return channels
| gpl-3.0 | 7,495,812,333,147,195,000 | 38.982225 | 227 | 0.560599 | false |
tcstewar/boardgame | boardgame/legendary/hero.py | 1 | 40148 | import copy
import boardgame as bg
import action
from .core import *
from .tags import *
class ShieldAgent(Hero):
name = 'SHIELD Agent'
star = 1
tags = [Shield]
grey = True
class ShieldTrooper(Hero):
name = 'SHIELD Trooper'
power = 1
tags = [Shield]
grey = True
class ShieldOfficer(Hero):
name = 'SHIELD Officer'
star = 2
cost = 3
tags = [Shield]
grey = True
class IronMan(HeroGroup):
name = 'Iron Man'
def fill(self):
self.add(IronManRepulsor, 5)
self.add(IronManQuantum, 1)
self.add(IronManEndless, 5)
self.add(IronManArc, 3)
class IronManArc(Hero):
name = 'Iron Man: Arc Reactor'
cost = 5
tags = [Avenger, Tech]
power = 3
extra_power = True
desc = 'P+1 per other <Tec> played'
def on_play(self, player):
player.available_power += player.count_played(tag=Tech, ignore=self)
class IronManEndless(Hero):
name = 'Iron Man: Endless Intervention'
cost = 3
tags = [Avenger, Tech]
desc = 'Draw a card. <Tec>: Draw another card'
def on_play(self, player):
player.draw(1)
if player.count_played(tag=Tech, ignore=self) > 0:
player.draw(1)
class IronManQuantum(Hero):
name = 'Iron Man: Quantum Breakthrough'
cost = 7
tags = [Avenger, Tech]
desc = 'Draw two cards. <Tec>: Draw two more cards'
def on_play(self, player):
player.draw(2)
if player.count_played(tag=Tech, ignore=self) > 0:
player.draw(2)
class IronManRepulsor(Hero):
name = 'Iron Man: Repulsor Rays'
cost = 3
tags = [Avenger, Ranged]
power = 2
extra_power = True
desc = '<Rng>: P+1'
def on_play(self, player):
if player.count_played(tag=Ranged, ignore=self) > 0:
player.available_power += 1
class SpiderMan(HeroGroup):
name = 'Spider-Man'
def fill(self):
self.add(SpiderManStrength, 5)
self.add(SpiderManAmazing, 1)
self.add(SpiderManResponsibility, 5)
self.add(SpiderManWeb, 3)
class SpiderManStrength(Hero):
name = 'Spider-Man: Astonishing Strength'
cost = 2
star = 1
tags = [Spider, Strength]
desc = 'Reveal top card. If C<=2, draw it.'
def on_play(self, player):
cards = player.reveal(1)
for c in cards:
if c.cost <= 2:
self.game.event('Spider-Man draws %s' % c)
player.hand.append(c)
else:
player.stack.insert(0, c)
class SpiderManAmazing(Hero):
name = 'Spider-Man: The Amazing Spider-Man'
cost = 2
tags = [Spider, Covert]
desc = 'Reveal top 3 cards. Draw any with C<=2. Return others in any order.'
def on_play(self, player):
cards = player.reveal(3)
actions = []
for c in cards:
if c.cost <= 2:
self.game.event('Spider-Man draws %s' % c)
player.hand.append(c)
else:
actions.append(action.ReturnFrom(c, cards))
if len(actions) <= 1:
for a in actions:
player.stack.insert(0, a.card)
else:
for i in range(len(actions)):
self.game.choice(actions)
class SpiderManResponsibility(Hero):
name = 'Spider-Man: Great Responsibility'
cost = 2
power = 1
tags = [Spider, Instinct]
desc = 'Reveal top card. If C<=2, draw it.'
def on_play(self, player):
cards = player.reveal(1)
for c in cards:
if c.cost <= 2:
self.game.event('Spider-Man draws %s' % c)
player.hand.append(c)
else:
player.stack.insert(0, c)
class SpiderManWeb(Hero):
name = 'Spider-Man: Web Shooters'
cost = 2
tags = [Spider, Tech]
desc = 'Rescue Bystander. Reveal top card. If C<=2, draw it.'
def on_play(self, player):
player.rescue_bystander()
cards = player.reveal(1)
for c in cards:
if c.cost <= 2:
self.game.event('Spider-Man draws %s' % c)
player.hand.append(c)
else:
player.stack.insert(0, c)
class Wolverine2(HeroGroup):
name = 'Wolverine (X-Force)'
def fill(self):
self.add(WolverineAmbush, 5)
self.add(WolverineAnimal, 5)
self.add(WolverineReckless, 1)
self.add(WolverineNoMercy, 3)
class WolverineAmbush(Hero):
name = 'Wolverine: Sudden Ambush'
cost = 4
power = 2
tags = [XForce, Covert]
desc = 'If you drew any extra cards this turn, P+2'
extra_power = True
def on_play(self, player):
if player.extra_draw_count > 0:
player.available_power += 2
class WolverineAnimal(Hero):
name = 'Wolverine: Animal Instincts'
cost = 2
tags = [XForce, Instinct]
desc = 'Draw a card. <Ins>: P+2'
extra_power = True
def on_play(self, player):
player.draw(1)
if player.count_played(tag=Instinct, ignore=self) > 0:
player.available_power += 2
class WolverineReckless(Hero):
name = 'Wolverine: Reckless Abandon'
cost = 7
tags = [XForce, Covert]
power = 3
desc = 'Count how many extra cards you have drawn. Draw that many cards.'
def on_play(self, player):
count = player.extra_draw_count
player.draw(count)
class WolverineNoMercy(Hero):
name = 'Wolverine: No Mercy'
cost = 4
tags = [XForce, Strength]
desc = 'Draw a card. You may KO a card from your hand or discard pile.'
def on_play(self, player):
player.draw(1)
actions = []
for c in player.hand:
actions.append(action.KOFrom(c, player.hand))
for c in player.discard:
actions.append(action.KOFrom(c, player.discard))
self.game.choice(actions, allow_do_nothing=True)
class Hawkeye(HeroGroup):
name = 'Hawkeye'
def fill(self):
self.add(HawkeyeCoveringFire, 3)
self.add(HawkeyeTrick, 1)
self.add(HawkeyeQuick, 5)
self.add(HawkeyeTeam, 5)
class HawkeyeCoveringFire(Hero):
name = 'Hawkeye: Covering Fire'
cost = 5
power = 3
tags = [Avenger, Tech]
desc = ('<Tec>: Choose one: Each other player draws a card or '
'each other player discards a card.')
def on_play(self, player):
if player.count_played(tag=Tech, ignore=self):
actions = [
bg.CustomAction('Each other player draws a card',
self.on_choose_draw, kwargs=dict(player=player)),
bg.CustomAction('Each other player discards a card',
self.on_choose_discard, kwargs=dict(player=player)),
]
self.game.choice(actions)
def on_choose_draw(self, player):
for p in self.game.players:
if p is not player:
p.draw(1)
def on_choose_discard(self, player):
for p in self.game.players:
if p is not player:
actions = []
for h in p.hand:
actions.append(action.DiscardFrom(h, p.hand))
self.game.choice(actions, player=p)
class HawkeyeTeam(Hero):
name = 'Hawkeye: Team Player'
cost = 4
power = 2
extra_power = True
tags = [Avenger, Tech]
desc = '<Avg>: P+1'
def on_play(self, player):
if player.count_played(tag=Avenger, ignore=self):
player.available_power +=1
class HawkeyeQuick(Hero):
name = 'Hawkeye: Quick Draw'
cost = 3
power = 1
tags = [Avenger, Instinct]
desc = 'Draw a card'
def on_play(self, player):
player.draw(1)
class HawkeyeTrick(Hero):
name = 'Hawkeye: Impossible Trick Shot'
cost = 7
power = 5
tags = [Avenger, Tech]
desc = ('Whenever you fight a Villain or Mastermind this turn, '
'rescue 3 Bystanders')
def on_play(self, player):
def on_fight(enemy):
player.rescue_bystander()
player.rescue_bystander()
player.rescue_bystander()
player.handlers['on_fight'].append(on_fight)
class Cyclops(HeroGroup):
name = 'Cyclops'
def fill(self):
self.add(CyclopsDetermination, 5)
self.add(CyclopsOptic, 5)
self.add(CyclopsEnergy, 3)
self.add(CyclopsUnited, 1)
class CyclopsDetermination(Hero):
name = 'Cyclops: Determination'
cost = 2
star = 3
tags = [XMen, Strength]
desc = 'To play this card, you must discard a card.'
def valid_play(self, player):
for h in player.hand:
if h is not self:
return True
return False
def on_play(self, player):
actions = []
for h in player.hand:
if h is not self:
actions.append(action.DiscardFrom(h, player.hand))
self.game.choice(actions)
class CyclopsOptic(Hero):
name = 'Cyclops: Optic Blast'
cost = 3
power = 3
tags = [XMen, Ranged]
desc = 'To play this card, you must discard a card.'
def valid_play(self, player):
for h in player.hand:
if h is not self:
return True
return False
def on_play(self, player):
actions = []
for h in player.hand:
if h is not self:
actions.append(action.DiscardFrom(h, player.hand))
self.game.choice(actions)
class CyclopsEnergy(Hero):
name = 'Cyclops: Unending Energy'
cost = 6
power = 4
tags = [XMen, Ranged]
return_from_discard = True
desc = 'If you discard this card, you may return it to your hand'
class CyclopsUnited(Hero):
name = 'Cyclops: X-Men United'
cost = 8
power = 6
extra_power = True
tags = [XMen, Ranged]
desc = '<XMn> P+2 for each other <XMn> played this turn'
def on_play(self, player):
count = player.count_played(tag=XMen, ignore=self)
player.available_power += 2 * count
class BlackWidow(HeroGroup):
name = 'Black Widow'
def fill(self):
self.add(BlackWidowCovert, 3)
self.add(BlackWidowRescue, 5)
self.add(BlackWidowMission, 5)
self.add(BlackWidowSniper, 1)
class BlackWidowCovert(Hero):
name = 'Black Widow: Covert Operation'
cost = 4
power = 0
extra_power = True
tags = [Avenger, Covert]
desc = 'P+1 for each Bystander in victory pile'
def on_play(self, player):
for c in player.victory_pile:
if isinstance(c, Bystander):
player.available_power += 1
class BlackWidowRescue(Hero):
name = 'Black Widow: Dangerous Rescue'
cost = 3
power = 2
tags = [Avenger, Covert]
desc = ('<Cov> You may KO a card from hand or discard. '
'If you do, rescue a Bystander')
def on_play(self, player):
if player.count_played(tag=Covert, ignore=self) > 0:
actions = []
for c in player.hand:
actions.append(action.KOFrom(c, player.hand))
for c in player.discard:
actions.append(action.KOFrom(c, player.discard))
choice = self.game.choice(actions, allow_do_nothing=True)
if choice is not None:
player.rescue_bystander()
class BlackWidowMission(Hero):
name = 'Black Widow: Mission Accomplished'
cost = 2
tags = [Avenger, Tech]
desc = 'Draw a card. <Tec>: Rescue a Bystander'
def on_play(self, player):
player.draw(1)
if player.count_played(tag=Tech, ignore=self) > 0:
player.rescue_bystander()
class BlackWidowSniper(Hero):
name = 'Black Widow: Silent Sniper'
cost = 7
power = 4
tags = [Avenger, Covert]
desc = 'Defeat a Villain or Mastermind that has a Bystander'
def on_play(self, player):
actions = []
for v in [self.game.mastermind] + self.game.city:
if v is not None:
for c in v.captured:
if isinstance(c, Bystander):
actions.append(bg.CustomAction(
'Defeat %s' % v.text(),
func=player.defeat,
kwargs=dict(villain=v)))
if len(actions) > 0:
self.game.choice(actions)
class Hulk(HeroGroup):
name = 'Hulk'
def fill(self):
self.add(HulkUnstoppable, 5)
self.add(HulkSmash, 1)
self.add(HulkAnger, 5)
self.add(HulkRampage, 3)
class HulkRampage(Hero):
name = 'Hulk: Crazed Rampage'
cost = 5
power = 4
tags = [Avenger, Strength]
desc = 'Each player gains a Wound'
def on_play(self, player):
for p in self.game.players:
self.game.event('Hulk wounds %s' % p.name)
p.gain_wound()
class HulkAnger(Hero):
name = 'Hulk: Growing Anger'
cost = 3
power = 2
extra_power = True
tags = [Avenger, Strength]
desc = '<Str> P+1'
def on_play(self, player):
if player.count_played(tag=Strength, ignore=self) > 0:
player.available_power += 1
class HulkSmash(Hero):
name = 'Hulk: Smash'
cost = 8
power = 5
extra_power = True
tags = [Avenger, Strength]
desc = '<Str> P+5'
def on_play(self, player):
if player.count_played(tag=Strength, ignore=self) > 0:
player.available_power += 5
class HulkUnstoppable(Hero):
name = 'Hulk: Unstoppable'
cost = 4
power = 2
tags = [Avenger, Instinct]
desc = 'You may KO a Wound from your hand or discard. If you do, P+2.'
def on_play(self, player):
actions = []
for c in player.hand:
if isinstance(c, Wound):
actions.append(action.KOFrom(c, player.hand))
break
for c in player.discard:
if isinstance(c, Wound):
actions.append(action.KOFrom(c, player.discard))
break
choice = self.game.choice(actions, allow_do_nothing=True)
if choice is not None:
player.available_power += 2
class Wolverine(HeroGroup):
name = 'Wolverine (X-Men)'
def fill(self):
self.add(WolverineSenses, 5)
self.add(WolverineHealing, 5)
self.add(WolverineSlashing, 3)
self.add(WolverineRage, 1)
class WolverineRage(Hero):
name = 'Wolverine: Berserker Rage'
cost = 8
power = 2
tags = [XMen, Instinct]
desc = 'Draw 3 cards. <Ins> You get P+1 for each extra card drawn'
extra_power = True
def on_play(self, player):
player.draw(3)
player.available_power += player.extra_draw_count
class WolverineSlashing(Hero):
name = 'Wolverine: Frenzied Slashing'
cost = 5
power = 2
tags = [XMen, Instinct]
desc = '<Ins> Draw 2 cards'
def on_play(self, player):
if player.count_played(tag=Instinct, ignore=self):
player.draw(2)
class WolverineHealing(Hero):
name = 'Wolverine: Healing Factor'
cost = 3
power = 2
tags = [XMen, Instinct]
desc = 'You may KO a Wound from hand or discard. If you do, draw a card.'
def on_play(self, player):
actions = []
for c in player.hand:
if isinstance(c, Wound):
actions.append(action.KOFrom(c, player.hand))
for c in player.discard:
if isinstance(c, Wound):
actions.append(action.KOFrom(c, player.discard))
if len(actions) > 0:
choice = self.game.choice(actions, allow_do_nothing=True)
if choice is not None:
player.draw(1)
class WolverineSenses(Hero):
name = 'Wolverine: Keen Senses'
cost = 2
power = 1
tags = [XMen, Instinct]
desc = '<Ins> Draw a card'
def on_play(self, player):
if player.count_played(tag=Instinct, ignore=self) > 0:
player.draw(1)
class CaptainAmerica(HeroGroup):
name = 'Captain America'
def fill(self):
self.add(CapTeamwork, 5)
self.add(CapBlock, 3)
self.add(CapDay, 1)
self.add(CapAssemble, 5)
class CapAssemble(Hero):
name = 'Captain America: Avengers Assemble'
cost = 3
star = 0
extra_star = True
tags = [Avenger, Instinct]
desc = 'S+1 for each color of Hero you have'
def on_play(self, player):
cards = player.hand + player.played
for tag in [Tech, Ranged, Strength, Instinct, Covert, Shield]:
for c in cards:
if tag in c.tags:
player.available_star +=1
break
class CapDay(Hero):
name = 'Captain America: A Day Unlike Any Other'
cost = 7
power = 3
extra_power = True
tags = [Avenger, Covert]
desc = '<Avg> P+3 for every other Avg played'
def on_play(self, player):
count = player.count_played(tag=Avenger, ignore=self)
player.available_power += count * 3
class CapBlock(Hero):
name = 'Captain America: Diving Block'
cost = 6
power = 4
tags = [Avenger, Tech]
desc = 'If you would gain a Wound, reveal this and draw a card instead'
def allow_wound(self, player):
choice = self.game.choice([
bg.CustomAction('Have Captain America block the Wound')
], allow_do_nothing=True)
if choice is not None:
player.draw(1)
return False
else:
return True
class CapTeamwork(Hero):
name = 'Captain America: Perfect Teamwork'
cost = 4
extra_power = True
tags = [Avenger, Strength]
desc = 'P+1 for each color of Hero you have'
def on_play(self, player):
cards = player.hand + player.played
for tag in [Tech, Ranged, Strength, Instinct, Covert, Shield]:
for c in cards:
if tag in c.tags:
player.available_power +=1
break
class Thor(HeroGroup):
name = 'Thor'
def fill(self):
self.add(ThorPower, 5)
self.add(ThorOdinson, 5)
self.add(ThorThunder, 1)
self.add(ThorLightning, 3)
class ThorLightning(Hero):
name = 'Thor: Call Lightning'
cost = 6
power = 3
extra_power = True
tags = [Avenger, Ranged]
desc = '<Rng> P+3'
def on_play(self, player):
if player.count_played(tag=Ranged, ignore=self) > 0:
player.available_power += 3
class ThorOdinson(Hero):
name = 'Thor: Odinson'
cost = 3
star = 2
extra_star = True
tags = [Avenger, Strength]
desc = '<Str> S+2'
def on_play(self, player):
if player.count_played(tag=Strength, ignore=self) > 0:
player.available_star += 2
class ThorPower(Hero):
name = 'Thor: Surge of Power'
cost = 4
star = 2
extra_star = True
tags = [Avenger, Ranged]
desc = 'If you made 8 or more S, P+3'
def on_play(self, player):
if player.available_star + player.used_star >= 8:
player.available_power += 3
class ThorThunder(Hero):
name = 'Thor: God of Thunder'
cost = 8
star = 5
extra_power = True
tags = [Avenger, Ranged]
desc = 'You can use S as P this turn'
def on_play(self, player):
player.can_use_star_as_power = True
class Rogue(HeroGroup):
name = 'Rogue'
def fill(self):
self.add(RogueBrawn, 5)
self.add(RogueEnergy, 5)
self.add(RogueSteal, 1)
self.add(RogueCopy, 3)
class RogueBrawn(Hero):
name = 'Rogue: Stolen Brawn'
cost = 4
power = 1
extra_power = True
tags = [XMen, Strength]
desc = '<Str>: P+3'
def on_play(self, player):
if player.count_played(tag=Strength, ignore=self):
player.available_power += 3
class RogueSteal(Hero):
name = 'Rogue: Steal Abilities'
cost = 8
power = 4
tags = [XMen, Strength]
desc = ('Each player discards the top card of their deck. '
'Play a copy of each card.')
def on_play(self, player):
for p in self.game.players:
cards = p.draw(1, put_in_hand=False)
if len(cards) > 0:
card = cards[0]
p.discard.append(card)
self.game.event('Rogue steals: %s' % card.text())
copied = card.copy()
try:
player.play(copied)
except bg.NoValidActionException:
self.game.event("Rogue's copy of %s skipped requirements")
class RogueCopy(Hero):
name = 'Rogue: Copy Powers'
cost = 5
tags = [XMen, Covert]
desc = ('Play this as a copy of another Hero you played this turn. '
'This card <Cov> in addition')
def valid_play(self, player):
for c in player.played:
if not hasattr(c, 'valid_play') or c.valid_play(player):
return True
return False
def on_play(self, player):
player.played.remove(self)
actions = []
for c in player.played:
if not hasattr(c, 'valid_play') or c.valid_play(player):
actions.append(bg.CustomAction(
'Copy %s' % c.text(),
func=self.on_copy,
kwargs=dict(player=player, card=c)))
self.game.choice(actions)
def on_copy(self, player, card):
copied = card.copy()
if Covert not in copied.tags:
copied.tags = [Covert] + copied.tags
copied.original = self
player.play(copied)
class RogueEnergy(Hero):
name = 'Rogue: Energy Drain'
cost = 3
star = 2
extra_star = True
tags = [XMen, Covert]
desc = '<Cov>: You may KO a card from hand or discard. If you do, S+1.'
def on_play(self, player):
if player.count_played(tag=Covert, ignore=self):
actions = []
for c in player.hand:
actions.append(action.KOFrom(c, player.hand))
for c in player.discard:
actions.append(action.KOFrom(c, player.discard))
if len(actions) > 0:
choice = self.game.choice(actions, allow_do_nothing=True)
if choice is not None:
player.available_star += 1
class Deadpool(HeroGroup):
name = 'Deadpool'
def fill(self):
self.add(DeadpoolUnkind, 1)
self.add(DeadpoolDoOver, 3)
self.add(DeadpoolOddball, 5)
self.add(DeadpoolHoldThis, 5)
class DeadpoolUnkind(Hero):
name = 'Deadpool: Random Acts of Unkindness'
cost = 7
power = 6
tags = [Instinct]
desc = ('You may gain Wound to your hand. '
'Each player passes a card to their left.')
def on_play(self, player):
if len(self.game.wounds) > 0:
actions = [bg.CustomAction(
'Gain a Wound to your hand',
func=self.on_gain_wound,
kwargs=dict(player=player))]
self.game.choice(actions, allow_do_nothing=True)
if len(self.game.players) > 1:
all_actions = []
for i, p in enumerate(self.game.players):
p2 = self.game.players[(i + 1) % len(self.game.players)]
actions = []
for c in p.hand:
actions.append(bg.CustomAction(
'Pass %s to %s' % (c, p2.name),
func=self.on_pass,
kwargs=dict(card=c, player=p, player2=p2)))
all_actions.append(actions)
for i, p in enumerate(self.game.players):
if all_actions[i]:
self.game.choice(all_actions[i], player=p)
def on_gain_wound(self, player):
player.hand.append(self.game.wounds.pop(0))
def on_pass(self, card, player, player2):
self.game.event('%s gives %s to %s' % (player.name, card.name,
player2.name))
player.hand.remove(card)
player2.hand.append(card)
class DeadpoolDoOver(Hero):
name = 'Deadpool: Hey, can I get a Do Over?'
cost = 3
power = 2
tags = [Instinct]
desc = ('If this is the first Hero played, you may discard the rest of '
'your hand and draw four cards.')
def on_play(self, player):
if len(player.played) == 1:
actions = [bg.CustomAction(
'Get a Do Over',
func=self.on_do_over,
kwargs=dict(player=player))]
self.game.choice(actions, allow_do_nothing=True)
def on_do_over(self, player):
for c in player.hand[:]:
player.discard_from(c, player.hand)
player.draw(4)
class DeadpoolOddball(Hero):
name = 'Deadpool: Oddball'
cost = 5
power = 2
extra_power = True
tags = [Covert]
desc = ('P+1 for each other Hero played with odd C.')
def on_play(self, player):
for c in player.played:
if c is not self and (c.cost % 2) == 1:
player.available_power += 1
class DeadpoolHoldThis(Hero):
name = 'Deadpool: Here, Hold This for a Second'
cost = 3
star = 2
tags = [Tech]
desc = ('A Villain of your choice captures a Bystander.')
def on_play(self, player):
if self.game.bystanders:
actions = []
for v in self.game.city:
if v is not None:
actions.append(bg.CustomAction(
'%s captures Bystander' % v.name,
func=self.on_capture,
kwargs=dict(villain=v)))
if actions:
self.game.choice(actions)
def on_capture(self, villain):
villain.capture(self.game.bystanders.pop(0))
class EmmaFrost(HeroGroup):
name = 'Emma Frost'
def fill(self):
self.add(EmmaFrostDiamond, 1)
self.add(EmmaFrostPsychic, 3)
self.add(EmmaFrostThoughts, 5)
self.add(EmmaFrostMental, 5)
class EmmaFrostDiamond(Hero):
name = 'Emma Frost: Diamond Form'
cost = 7
power = 5
tags = [XMen, Strength]
desc = ('Whenever you defeat a Villain or Mastermind this turn, S+3.')
def on_play(self, player):
def on_fight(enemy):
player.available_star += 3
player.handlers['on_fight'].append(on_fight)
class EmmaFrostPsychic(Hero):
name = 'Emma Frost: Psychic Link'
cost = 5
power = 3
tags = [XMen, Instinct]
desc = 'Each player who reveals another <XMn> draws a card.'
def on_play(self, player):
for p in self.game.players:
if p.reveal_tag(XMen, ignore=self) is not None:
p.draw(1)
class EmmaFrostThoughts(Hero):
name = 'Emma Frost: Shadowed Thoughts'
cost = 4
power = 2
tags = [XMen, Covert]
desc = ('You may play a Villain. If you do, P+3.')
def on_play(self, player):
if self.game.villain:
actions = [bg.CustomAction(
'Play Villain',
func=self.on_play_villain,
kwargs=dict(player=player))]
self.game.choice(actions, allow_do_nothing=True)
def on_play_villain(self, player):
self.game.play_villain()
player.available_power += 3
class EmmaFrostMental(Hero):
name = 'Emma Frost: Mental Discipline'
cost = 3
star = 1
tags = [XMen, Ranged]
desc = ('Draw a card.')
def on_play(self, player):
player.draw(1)
class Gambit(HeroGroup):
name = 'Gambit'
def fill(self):
self.add(GambitJackpot, 1)
self.add(GambitCharm, 3)
self.add(GambitCardShark, 5)
self.add(GambitStackDeck, 5)
class GambitJackpot(Hero):
name = 'Gambit: High Stakes Jackpot'
cost = 7
power = 4
extra_power = True
tags = [XMen, Instinct]
desc = ("Reveal top card of deck. P+ that card's C.")
def on_play(self, player):
cards = player.reveal(1)
if len(cards) > 0:
card = cards[0]
player.available_power += card.cost
player.stack.insert(0, card)
class GambitCharm(Hero):
name = 'Gambit: Hypnotic Charm'
cost = 3
star = 2
tags = [XMen, Instinct]
desc = ("Reveal top card of deck. Discard it or put it back."
" <Ins>: do the same to each other player's deck.")
def on_play(self, player):
cards = player.reveal(1)
if cards:
self.game.choice([action.DiscardFrom(cards[0], cards),
action.ReturnFrom(cards[0], cards)])
if player.count_played(tag=Instinct, ignore=self):
for p in self.game.players:
if p is not player:
cards = p.reveal(1)
if cards:
self.game.choice([action.DiscardFrom(cards[0],
cards, p),
action.ReturnFrom(cards[0],
cards, p)])
class GambitCardShark(Hero):
name = 'Gambit: Card Shark'
cost = 4
power = 2
tags = [XMen, Ranged]
desc = ("Reveal top card of deck. If it is <XMn>, draw it.")
def on_play(self, player):
cards = player.reveal(1)
if cards:
player.return_to_stack(cards[0])
if XMen in cards[0].tags:
player.draw(1)
class GambitStackDeck(Hero):
name = 'Gambit: Stack the Deck'
cost = 2
tags = [XMen, Covert]
desc = ("Draw 2 cards. Put a card from your hand on top of the deck.")
def on_play(self, player):
player.draw(2)
actions = []
for c in player.hand:
actions.append(action.ReturnFrom(c, player.hand))
if actions:
self.game.choice(actions)
class NickFury(HeroGroup):
name = 'Nick Fury'
def fill(self):
self.add(NickFuryPure, 1)
self.add(NickFuryCommander, 3)
self.add(NickFuryPromotion, 5)
self.add(NickFuryWeaponry, 5)
class NickFuryPure(Hero):
name = 'Nick Fury: Pure Fury'
cost = 8
tags = [Shield, Tech]
desc = ("Defeat any Villain or Mastermind whose P is less than the"
" number of SHIELD Heroes in the KO pile.")
def on_play(self, player):
count = 0
for c in self.game.ko:
if Shield in c.tags:
count += 1
actions = []
for v in self.game.city:
if v is not None and v.power < count:
actions.append(bg.CustomAction('Defeat %s' % v.text(),
func=player.defeat,
kwargs=dict(villain=v)))
if self.game.mastermind.power < count:
v = self.game.mastermind
actions.append(bg.CustomAction('Defeat %s' % v.text(),
func=player.defeat,
kwargs=dict(villain=v)))
if len(actions) > 0:
self.game.choice(actions)
else:
self.game.event('No Villians with P<%d' % count)
class NickFuryCommander(Hero):
name = 'Nick Fury: Legendary Commander'
cost = 6
power = 1
extra_power = True
tags = [Shield, Strength]
desc = ("P+1 for each other <Shd> played this turn.")
def on_play(self, player):
count = player.count_played(Shield, ignore=self)
player.available_power += count
class NickFuryPromotion(Hero):
name = 'Nick Fury: Battlefield Promotion'
cost = 4
tags = [Shield, Covert]
desc = ("You may KO a <Shd> Hero from hand or discard pile. "
"If you do, gain a SHIELD Officer to your hand")
def on_play(self, player):
actions = []
for c in player.hand:
if Shield in c.tags:
actions.append(action.KOFrom(c, player.hand))
for c in player.discard:
if Shield in c.tags:
actions.append(action.KOFrom(c, player.discard))
choice = self.game.choice(actions, allow_do_nothing=True)
if choice is not None and self.game.officers:
player.hand.append(self.game.officers.pop())
class NickFuryWeaponry(Hero):
name = 'Nick Fury: High Tech Weaponry'
cost = 3
power = 2
extra_power = True
tags = [Shield, Tech]
desc = ("<Tec>: P+1")
def on_play(self, player):
if player.count_played(Tech, ignore=self):
player.available_power += 1
class Storm(HeroGroup):
name = 'Storm'
def fill(self):
self.add(StormTidalWave, 1)
self.add(StormCyclone, 3)
self.add(StormLightning, 5)
self.add(StormGathering, 5)
class StormTidalWave(Hero):
name = 'Storm: Tidal Wave'
cost = 7
power = 5
tags = [XMen, Ranged]
desc = ("Villains on Bridge get P-2. <Rng>: Mastermind gets P-2.")
def on_play(self, player):
adjust_bridge = AdjustPower(
items=lambda game: [game.city[0]],
amount=-2)
self.game.add_turn_handler('on_choice', adjust_bridge)
if player.count_played(Ranged, ignore=self):
adjust_mastermind = AdjustPower(
items=lambda game: [game.mastermind],
amount=-2)
self.game.add_turn_handler('on_choice', adjust_mastermind)
class StormCyclone(Hero):
name = 'Storm: Spinning Cyclone'
cost = 6
power = 4
tags = [XMen, Covert]
desc = ("You may move Villain to new space. Rescue "
"Bystanders captured by Villain. If space full, swap.")
def on_play(self, player):
actions = []
for v in self.game.city:
if v is not None:
actions.append(bg.CustomAction(
'Move %s' % v.text(),
func=self.on_move,
kwargs=dict(villain=v, player=player)))
if actions:
self.game.choice(actions, allow_do_nothing=True)
def on_move(self, villain, player):
actions = []
for i in range(5):
if self.game.city[i] is not villain:
actions.append(bg.CustomAction(
'Move %s to %s' % (villain.name, self.game.city_names[i]),
func=self.on_move_to,
kwargs=dict(villain=villain, target=i, player=player)))
self.game.choice(actions)
def on_move_to(self, villain, target, player):
player.rescue_bystander(villain)
index = self.game.city.index(villain)
self.game.city[index] = self.game.city[target]
self.game.city[target] = villain
class StormLightning(Hero):
name = 'Storm: Lightning Bolt'
cost = 4
power = 2
tags = [XMen, Ranged]
desc = ("Villains on Rooftops get P-2.")
def on_play(self, player):
adjust_roof = AdjustPower(
items=lambda game: [game.city[2]],
amount=-2)
self.game.add_turn_handler('on_choice', adjust_roof)
class StormGathering(Hero):
name = 'Storm: Gathering Stormclouds'
cost = 3
star = 2
tags = [XMen, Ranged]
desc = ("<Rng>: Draw a card")
def on_play(self, player):
if player.count_played(Ranged, ignore=self):
player.draw(1)
class Angel(HeroGroup):
name = 'Angel'
def fill(self):
self.add(AngelStrengthOfSpirit, 1)
self.add(AngelDropOffAFriend, 3)
self.add(AngelDivingCatch, 5)
self.add(AngelHighSpeedChase, 5)
class AngelStrengthOfSpirit(Hero):
name = 'Angel: Strength of Spirit'
cost = 7
power = 4
tags = [XMen, Strength]
desc = ("Discard any number of cards. Draw that many cards.")
def on_play(self, player):
count = 0
original_hand = list(player.hand)
while True:
actions = []
for h in player.hand:
#TODO: this card should finish before discarding sideeffects
# trigger (this holds for all cards and may require lots
# of tweaking throughout the code!)
if h in original_hand:
actions.append(action.DiscardFrom(h, player.hand))
if len(actions) == 0:
break
choice = self.game.choice(actions, player=player,
allow_do_nothing=True)
if choice is None:
break
else:
original_hand.remove(choice.card)
count += 1
player.draw(count)
class AngelDropOffAFriend(Hero):
name = 'Angel: Drop Off a Friend'
cost = 5
power = 2
extra_power = True
tags = [XMen, Instinct]
desc = ("You may discard a card. P+ that card's cost.")
def on_play(self, player):
if len(player.hand) > 0:
actions = []
for h in player.hand:
actions.append(action.DiscardFrom(h, player.hand,
show_cost=True))
choice = self.game.choice(actions, player=player,
allow_do_nothing=True)
if choice is not None:
player.available_power += choice.card.cost
class AngelDivingCatch(Hero):
name = 'Angel: Diving Catch'
cost = 4
star = 2
tags = [XMen, Strength]
desc = ("If this card discarded, rescue Bystander and draw 2 cards.")
def on_discard(self, player):
player.rescue_bystander()
player.draw(2)
class AngelHighSpeedChase(Hero):
name = 'Angel: High Speed Chase'
cost = 3
tags = [XMen, Covert]
desc = ("Draw 2 cards, then discard 1 card.")
def on_play(self, player):
player.draw(2)
if len(player.hand) > 0:
actions = []
for h in player.hand:
actions.append(action.DiscardFrom(h, player.hand))
self.game.choice(actions, player=player)
class Bishop(HeroGroup):
name = 'Bishop'
def fill(self):
self.add(BishopFirepowerFromTheFuture, 1)
self.add(BishopConcussiveBlast, 3)
self.add(BishopAbsorbEnergies, 5)
self.add(BishopWhateverTheCost, 5)
class BishopFirepowerFromTheFuture(Hero):
name = 'Bishop: Firepower From the Future'
cost = 7
power = 4
extra_power = True
tags = [XMen, Tech]
desc = ("Discard top 4 cards from deck; gain P+ equal to cards' P. "
"<Xmn> KO any of those 4 cards.")
def on_play(self, player):
cards = player.draw(4, put_in_hand=False,
count_draw=False, event_message=False)
for c in cards:
player.available_power += c.power
if player.count_played(XMen, ignore=self):
while len(cards) > 0:
actions = []
for c in cards:
actions.append(action.KOFrom(c, cards))
choice = self.game.choice(actions, player=player,
allow_do_nothing=True)
if choice is None:
break
for c in cards:
player.discard_from(c, cards)
class BishopConcussiveBlast(Hero):
name = 'Bishop: Concussive Blast'
cost = 5
power = 3
extra_power = True
tags = [XMen, Ranged]
desc = ("<Rng><Rng> P+3")
def on_play(self, player):
if player.count_played(Ranged, ignore=self) >= 2:
player.available_power += 3
class BishopAbsorbEnergies(Hero):
name = 'Bishop: Absorb Energies'
cost = 3
power = 2
extra_star = True
tags = [XMen, Covert]
desc = ("Whenever card you own is KO'd this turn, S+2")
def on_play(self, player):
def on_ko(card):
player.available_star +=2
player.handlers['on_ko'].append(on_ko)
class BishopWhateverTheCost(Hero):
name = 'Bishop: Whatever the Cost'
cost = 2
tags = [XMen, Ranged]
desc = ("Draw a card. <Cov>: You may KO a card from hand or discard pile")
def on_play(self, player):
player.draw(1)
if player.count_played(Covert, ignore=self):
actions = []
for c in player.hand:
actions.append(action.KOFrom(c, player.hand))
for c in player.discard:
actions.append(action.KOFrom(c, player.discard))
choice = self.game.choice(actions, player=player,
allow_do_nothing=True)
| gpl-2.0 | 5,499,505,911,280,402,000 | 30.63751 | 80 | 0.564511 | false |
harayz/raspberry_pwn | src/pentest/sqlmap/thirdparty/chardet/mbcharsetprober.py | 20 | 3155 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import constants, sys
from constants import eStart, eError, eItsMe
from charsetprober import CharSetProber
class MultiByteCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mDistributionAnalyzer = None
self._mCodingSM = None
self._mLastChar = ['\x00', '\x00']
def reset(self):
CharSetProber.reset(self)
if self._mCodingSM:
self._mCodingSM.reset()
if self._mDistributionAnalyzer:
self._mDistributionAnalyzer.reset()
self._mLastChar = ['\x00', '\x00']
def get_charset_name(self):
pass
def feed(self, aBuf):
aLen = len(aBuf)
for i in xrange(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == eError:
if constants._debug:
sys.stderr.write(self.get_charset_name() + ' prober hit error at byte ' + str(i) + '\n')
self._mState = constants.eNotMe
break
elif codingState == eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mDistributionAnalyzer.feed(aBuf[i-1:i+1], charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if self._mDistributionAnalyzer.got_enough_data() and \
(self.get_confidence() > constants.SHORTCUT_THRESHOLD):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
return self._mDistributionAnalyzer.get_confidence()
| gpl-3.0 | -4,295,514,968,578,284,500 | 37.47561 | 108 | 0.619017 | false |
robcarver17/pysystemtrade | syscore/interactively_run_functions.py | 1 | 4195 | import inspect
NO_DEFAULT = object()
NO_TYPE_PROVIDED = object()
NO_VALID_ARGUMENT_PASSED = object()
def parameter_default(parameter_signature):
default = parameter_signature.default
if default is inspect._empty:
default = NO_DEFAULT
return default
def has_default(parameter_signature):
return parameter_default(parameter_signature) is not NO_DEFAULT
def parameter_type(parameter_signature):
ptype = parameter_signature.annotation
if ptype is inspect._empty:
# get from default
if has_default(parameter_signature):
default_value = parameter_default(parameter_signature)
ptype = type(default_value)
else:
# give up
return NO_TYPE_PROVIDED
return ptype.__name__
def has_type(parameter_signature):
return parameter_type(parameter_signature) is not NO_TYPE_PROVIDED
def input_and_type_cast_argument(argname, parameter_signature):
"""
Interactively get a value for a parameter, considering any type casting required or defaults
:param argname: str
:param parameter_signature: results of doing inspect.signature(func)['parameter name']
:return: argument value
"""
default_provided = has_default(parameter_signature)
needs_casting = has_type(parameter_signature)
if default_provided:
argdefault = parameter_default(parameter_signature)
if needs_casting:
type_to_cast_to = parameter_type(parameter_signature)
# Should never return this unless something gone horribly wrong
arg_value = NO_VALID_ARGUMENT_PASSED
while arg_value is NO_VALID_ARGUMENT_PASSED:
if default_provided:
default_string = " (default: '%s')" % str(argdefault)
else:
default_string = ""
if needs_casting:
type_string = " (type: %s)" % str(type_to_cast_to)
else:
type_string = ""
arg_value = input(
"Argument %s %s %s?" %
(argname, default_string, type_string))
if arg_value == "": # just pressed carriage return...
if default_provided:
arg_value = argdefault
break
else:
print(
"No default provided for %s - need a value. Please type something!" %
argname)
arg_value = NO_VALID_ARGUMENT_PASSED
else:
# A value has been typed - check if needs type casting
if needs_casting:
try:
# Cast the type
# this might not work
type_func = eval("%s" % type_to_cast_to)
arg_value = type_func(arg_value)
break
except BaseException:
print(
"\nCouldn't cast value %s to type %s\n"
% (arg_value, type_to_cast_to)
)
arg_value = NO_VALID_ARGUMENT_PASSED
else:
# no type casting required
pass
return arg_value
def fill_args_and_run_func(func, full_funcname):
"""
Prints the docstring of func, then asks for all of its arguments with defaults
Optionally casts to type, if any argument name is an entry in the dict type_casting
"""
# print doc string
print("\n")
print(full_funcname + ":")
print(inspect.getdoc(func))
print("\n")
func_arguments = inspect.signature(func).parameters
print("\nArguments:")
print(list(func_arguments.keys()))
print("\n")
args = []
kwargs = dict()
for (argname, parameter_signature) in func_arguments.items():
arg_value = input_and_type_cast_argument(argname, parameter_signature)
if arg_value is NO_VALID_ARGUMENT_PASSED:
raise Exception(
"Invalid argument passed - should not happen - check function 'input_and_type_cast_argument' logic"
)
is_kwarg = has_default(parameter_signature)
if is_kwarg:
kwargs[argname] = arg_value
else:
args.append(arg_value)
return args, kwargs
| gpl-3.0 | -7,863,437,943,170,438,000 | 29.620438 | 115 | 0.587366 | false |
wkubiak/grpc | src/python/src/grpc/framework/face/testing/digest.py | 17 | 15849 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Code for making a service.TestService more amenable to use in tests."""
import collections
import threading
# testing_control, interfaces, and testing_service are referenced from
# specification in this module.
from grpc.framework.common import cardinality
from grpc.framework.common import style
from grpc.framework.face import exceptions
from grpc.framework.face import interfaces as face_interfaces
from grpc.framework.face.testing import control as testing_control # pylint: disable=unused-import
from grpc.framework.face.testing import interfaces # pylint: disable=unused-import
from grpc.framework.face.testing import service as testing_service # pylint: disable=unused-import
from grpc.framework.foundation import stream
from grpc.framework.foundation import stream_util
_IDENTITY = lambda x: x
class TestServiceDigest(
collections.namedtuple(
'TestServiceDigest',
['name',
'methods',
'inline_method_implementations',
'event_method_implementations',
'multi_method_implementation',
'unary_unary_messages_sequences',
'unary_stream_messages_sequences',
'stream_unary_messages_sequences',
'stream_stream_messages_sequences'])):
"""A transformation of a service.TestService.
Attributes:
name: The RPC service name to be used in the test.
methods: A sequence of interfaces.Method objects describing the RPC
methods that will be called during the test.
inline_method_implementations: A dict from RPC method name to
face_interfaces.MethodImplementation object to be used in tests of
in-line calls to behaviors under test.
event_method_implementations: A dict from RPC method name to
face_interfaces.MethodImplementation object to be used in tests of
event-driven calls to behaviors under test.
multi_method_implementation: A face_interfaces.MultiMethodImplementation to
be used in tests of generic calls to behaviors under test.
unary_unary_messages_sequences: A dict from method name to sequence of
service.UnaryUnaryTestMessages objects to be used to test the method
with the given name.
unary_stream_messages_sequences: A dict from method name to sequence of
service.UnaryStreamTestMessages objects to be used to test the method
with the given name.
stream_unary_messages_sequences: A dict from method name to sequence of
service.StreamUnaryTestMessages objects to be used to test the method
with the given name.
stream_stream_messages_sequences: A dict from method name to sequence of
service.StreamStreamTestMessages objects to be used to test the
method with the given name.
serialization: A serial.Serialization object describing serialization
behaviors for all the RPC methods.
"""
class _BufferingConsumer(stream.Consumer):
"""A trivial Consumer that dumps what it consumes in a user-mutable buffer."""
def __init__(self):
self.consumed = []
self.terminated = False
def consume(self, value):
self.consumed.append(value)
def terminate(self):
self.terminated = True
def consume_and_terminate(self, value):
self.consumed.append(value)
self.terminated = True
class _InlineUnaryUnaryMethod(face_interfaces.MethodImplementation):
def __init__(self, unary_unary_test_method, control):
self._test_method = unary_unary_test_method
self._control = control
self.cardinality = cardinality.Cardinality.UNARY_UNARY
self.style = style.Service.INLINE
def unary_unary_inline(self, request, context):
response_list = []
self._test_method.service(
request, response_list.append, context, self._control)
return response_list.pop(0)
class _EventUnaryUnaryMethod(face_interfaces.MethodImplementation):
def __init__(self, unary_unary_test_method, control, pool):
self._test_method = unary_unary_test_method
self._control = control
self._pool = pool
self.cardinality = cardinality.Cardinality.UNARY_UNARY
self.style = style.Service.EVENT
def unary_unary_event(self, request, response_callback, context):
if self._pool is None:
self._test_method.service(
request, response_callback, context, self._control)
else:
self._pool.submit(
self._test_method.service, request, response_callback, context,
self._control)
class _InlineUnaryStreamMethod(face_interfaces.MethodImplementation):
def __init__(self, unary_stream_test_method, control):
self._test_method = unary_stream_test_method
self._control = control
self.cardinality = cardinality.Cardinality.UNARY_STREAM
self.style = style.Service.INLINE
def unary_stream_inline(self, request, context):
response_consumer = _BufferingConsumer()
self._test_method.service(
request, response_consumer, context, self._control)
for response in response_consumer.consumed:
yield response
class _EventUnaryStreamMethod(face_interfaces.MethodImplementation):
def __init__(self, unary_stream_test_method, control, pool):
self._test_method = unary_stream_test_method
self._control = control
self._pool = pool
self.cardinality = cardinality.Cardinality.UNARY_STREAM
self.style = style.Service.EVENT
def unary_stream_event(self, request, response_consumer, context):
if self._pool is None:
self._test_method.service(
request, response_consumer, context, self._control)
else:
self._pool.submit(
self._test_method.service, request, response_consumer, context,
self._control)
class _InlineStreamUnaryMethod(face_interfaces.MethodImplementation):
def __init__(self, stream_unary_test_method, control):
self._test_method = stream_unary_test_method
self._control = control
self.cardinality = cardinality.Cardinality.STREAM_UNARY
self.style = style.Service.INLINE
def stream_unary_inline(self, request_iterator, context):
response_list = []
request_consumer = self._test_method.service(
response_list.append, context, self._control)
for request in request_iterator:
request_consumer.consume(request)
request_consumer.terminate()
return response_list.pop(0)
class _EventStreamUnaryMethod(face_interfaces.MethodImplementation):
def __init__(self, stream_unary_test_method, control, pool):
self._test_method = stream_unary_test_method
self._control = control
self._pool = pool
self.cardinality = cardinality.Cardinality.STREAM_UNARY
self.style = style.Service.EVENT
def stream_unary_event(self, response_callback, context):
request_consumer = self._test_method.service(
response_callback, context, self._control)
if self._pool is None:
return request_consumer
else:
return stream_util.ThreadSwitchingConsumer(request_consumer, self._pool)
class _InlineStreamStreamMethod(face_interfaces.MethodImplementation):
def __init__(self, stream_stream_test_method, control):
self._test_method = stream_stream_test_method
self._control = control
self.cardinality = cardinality.Cardinality.STREAM_STREAM
self.style = style.Service.INLINE
def stream_stream_inline(self, request_iterator, context):
response_consumer = _BufferingConsumer()
request_consumer = self._test_method.service(
response_consumer, context, self._control)
for request in request_iterator:
request_consumer.consume(request)
while response_consumer.consumed:
yield response_consumer.consumed.pop(0)
response_consumer.terminate()
class _EventStreamStreamMethod(face_interfaces.MethodImplementation):
def __init__(self, stream_stream_test_method, control, pool):
self._test_method = stream_stream_test_method
self._control = control
self._pool = pool
self.cardinality = cardinality.Cardinality.STREAM_STREAM
self.style = style.Service.EVENT
def stream_stream_event(self, response_consumer, context):
request_consumer = self._test_method.service(
response_consumer, context, self._control)
if self._pool is None:
return request_consumer
else:
return stream_util.ThreadSwitchingConsumer(request_consumer, self._pool)
class _UnaryConsumer(stream.Consumer):
"""A Consumer that only allows consumption of exactly one value."""
def __init__(self, action):
self._lock = threading.Lock()
self._action = action
self._consumed = False
self._terminated = False
def consume(self, value):
with self._lock:
if self._consumed:
raise ValueError('Unary consumer already consumed!')
elif self._terminated:
raise ValueError('Unary consumer already terminated!')
else:
self._consumed = True
self._action(value)
def terminate(self):
with self._lock:
if not self._consumed:
raise ValueError('Unary consumer hasn\'t yet consumed!')
elif self._terminated:
raise ValueError('Unary consumer already terminated!')
else:
self._terminated = True
def consume_and_terminate(self, value):
with self._lock:
if self._consumed:
raise ValueError('Unary consumer already consumed!')
elif self._terminated:
raise ValueError('Unary consumer already terminated!')
else:
self._consumed = True
self._terminated = True
self._action(value)
class _UnaryUnaryAdaptation(object):
def __init__(self, unary_unary_test_method):
self._method = unary_unary_test_method
def service(self, response_consumer, context, control):
def action(request):
self._method.service(
request, response_consumer.consume_and_terminate, context, control)
return _UnaryConsumer(action)
class _UnaryStreamAdaptation(object):
def __init__(self, unary_stream_test_method):
self._method = unary_stream_test_method
def service(self, response_consumer, context, control):
def action(request):
self._method.service(request, response_consumer, context, control)
return _UnaryConsumer(action)
class _StreamUnaryAdaptation(object):
def __init__(self, stream_unary_test_method):
self._method = stream_unary_test_method
def service(self, response_consumer, context, control):
return self._method.service(
response_consumer.consume_and_terminate, context, control)
class _MultiMethodImplementation(face_interfaces.MultiMethodImplementation):
def __init__(self, methods, control, pool):
self._methods = methods
self._control = control
self._pool = pool
def service(self, name, response_consumer, context):
method = self._methods.get(name, None)
if method is None:
raise exceptions.NoSuchMethodError(name)
elif self._pool is None:
return method(response_consumer, context, self._control)
else:
request_consumer = method(response_consumer, context, self._control)
return stream_util.ThreadSwitchingConsumer(request_consumer, self._pool)
class _Assembly(
collections.namedtuple(
'_Assembly',
['methods', 'inlines', 'events', 'adaptations', 'messages'])):
"""An intermediate structure created when creating a TestServiceDigest."""
def _assemble(
scenarios, names, inline_method_constructor, event_method_constructor,
adapter, control, pool):
"""Creates an _Assembly from the given scenarios."""
methods = []
inlines = {}
events = {}
adaptations = {}
messages = {}
for name, scenario in scenarios.iteritems():
if name in names:
raise ValueError('Repeated name "%s"!' % name)
test_method = scenario[0]
inline_method = inline_method_constructor(test_method, control)
event_method = event_method_constructor(test_method, control, pool)
adaptation = adapter(test_method)
methods.append(test_method)
inlines[name] = inline_method
events[name] = event_method
adaptations[name] = adaptation
messages[name] = scenario[1]
return _Assembly(methods, inlines, events, adaptations, messages)
def digest(service, control, pool):
"""Creates a TestServiceDigest from a TestService.
Args:
service: A testing_service.TestService.
control: A testing_control.Control.
pool: If RPC methods should be serviced in a separate thread, a thread pool.
None if RPC methods should be serviced in the thread belonging to the
run-time that calls for their service.
Returns:
A TestServiceDigest synthesized from the given service.TestService.
"""
names = set()
unary_unary = _assemble(
service.unary_unary_scenarios(), names, _InlineUnaryUnaryMethod,
_EventUnaryUnaryMethod, _UnaryUnaryAdaptation, control, pool)
names.update(set(unary_unary.inlines))
unary_stream = _assemble(
service.unary_stream_scenarios(), names, _InlineUnaryStreamMethod,
_EventUnaryStreamMethod, _UnaryStreamAdaptation, control, pool)
names.update(set(unary_stream.inlines))
stream_unary = _assemble(
service.stream_unary_scenarios(), names, _InlineStreamUnaryMethod,
_EventStreamUnaryMethod, _StreamUnaryAdaptation, control, pool)
names.update(set(stream_unary.inlines))
stream_stream = _assemble(
service.stream_stream_scenarios(), names, _InlineStreamStreamMethod,
_EventStreamStreamMethod, _IDENTITY, control, pool)
names.update(set(stream_stream.inlines))
methods = list(unary_unary.methods)
methods.extend(unary_stream.methods)
methods.extend(stream_unary.methods)
methods.extend(stream_stream.methods)
adaptations = dict(unary_unary.adaptations)
adaptations.update(unary_stream.adaptations)
adaptations.update(stream_unary.adaptations)
adaptations.update(stream_stream.adaptations)
inlines = dict(unary_unary.inlines)
inlines.update(unary_stream.inlines)
inlines.update(stream_unary.inlines)
inlines.update(stream_stream.inlines)
events = dict(unary_unary.events)
events.update(unary_stream.events)
events.update(stream_unary.events)
events.update(stream_stream.events)
return TestServiceDigest(
service.name(),
methods,
inlines,
events,
_MultiMethodImplementation(adaptations, control, pool),
unary_unary.messages,
unary_stream.messages,
stream_unary.messages,
stream_stream.messages)
| bsd-3-clause | -3,213,750,168,973,987,000 | 34.22 | 99 | 0.723768 | false |
zozo123/buildbot | master/buildbot/test/unit/test_status_build.py | 1 | 3276 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import mock
from buildbot import interfaces
from buildbot import util
from buildbot.status import build
from buildbot.test.fake import fakemaster
from twisted.trial import unittest
from zope.interface import implements
class FakeBuilderStatus:
implements(interfaces.IBuilderStatus)
class FakeSource(util.ComparableMixin):
compare_attrs = ('codebase', 'revision')
def __init__(self, codebase, revision):
self.codebase = codebase
self.revision = revision
def clone(self):
return FakeSource(self.codebase, self.revision)
def getAbsoluteSourceStamp(self, revision):
return FakeSource(self.codebase, revision)
def __repr__(self):
# note: this won't work for VC systems with huge 'revision' strings
text = []
if self.codebase:
text.append("(%s)" % self.codebase)
if self.revision is None:
return text + ["latest"]
text.append(str(self.revision))
return "FakeSource(%s)" % (', '.join(text),)
class TestBuildProperties(unittest.TestCase):
"""
Test that a BuildStatus has the necessary L{IProperties} methods and that
they delegate to its C{properties} attribute properly - so really just a
test of the L{IProperties} adapter.
"""
BUILD_NUMBER = 33
def setUp(self):
self.builder_status = FakeBuilderStatus()
self.master = fakemaster.make_master()
self.build_status = build.BuildStatus(self.builder_status, self.master,
self.BUILD_NUMBER)
self.build_status.properties = mock.Mock()
def test_getProperty(self):
self.build_status.getProperty('x')
self.build_status.properties.getProperty.assert_called_with('x', None)
def test_getProperty_default(self):
self.build_status.getProperty('x', 'nox')
self.build_status.properties.getProperty.assert_called_with('x', 'nox')
def test_setProperty(self):
self.build_status.setProperty('n', 'v', 's')
self.build_status.properties.setProperty.assert_called_with('n', 'v',
's', runtime=True)
def test_hasProperty(self):
self.build_status.properties.hasProperty.return_value = True
self.assertTrue(self.build_status.hasProperty('p'))
self.build_status.properties.hasProperty.assert_called_with('p')
def test_render(self):
self.build_status.render("xyz")
self.build_status.properties.render.assert_called_with("xyz")
| gpl-3.0 | -514,493,351,206,631,100 | 35 | 86 | 0.674603 | false |
longman694/youtube-dl | youtube_dl/extractor/aljazeera.py | 47 | 1490 | from __future__ import unicode_literals
from .common import InfoExtractor
class AlJazeeraIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?aljazeera\.com/(?:programmes|video)/.*?/(?P<id>[^/]+)\.html'
_TESTS = [{
'url': 'http://www.aljazeera.com/programmes/the-slum/2014/08/deliverance-201482883754237240.html',
'info_dict': {
'id': '3792260579001',
'ext': 'mp4',
'title': 'The Slum - Episode 1: Deliverance',
'description': 'As a birth attendant advocating for family planning, Remy is on the frontline of Tondo\'s battle with overcrowding.',
'uploader_id': '665003303001',
'timestamp': 1411116829,
'upload_date': '20140919',
},
'add_ie': ['BrightcoveNew'],
'skip': 'Not accessible from Travis CI server',
}, {
'url': 'http://www.aljazeera.com/video/news/2017/05/sierra-leone-709-carat-diamond-auctioned-170511100111930.html',
'only_matching': True,
}]
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/665003303001/default_default/index.html?videoId=%s'
def _real_extract(self, url):
program_name = self._match_id(url)
webpage = self._download_webpage(url, program_name)
brightcove_id = self._search_regex(
r'RenderPagesVideo\(\'(.+?)\'', webpage, 'brightcove id')
return self.url_result(self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, 'BrightcoveNew', brightcove_id)
| unlicense | 6,753,880,729,555,490,000 | 44.151515 | 145 | 0.620805 | false |
jeffwdoak/ElectronicDOS | ElectronicDOS/electronicdos.py | 1 | 29089 | #!/usr/bin/env python
# electronicdos.py v0.7 03-06-2014 Jeff Doak [email protected]
# Changelog:
# v0.6 - Added method to write multiple densities of states to the same file.
# This method is untested as of 11/5/2013
# v0.5 - Added various l-, m-, and s-summation methods.
# - Added non-collinear calculation flag. (Methods do not currently handle
# non-collinear calculations, however.)
# v0.4 - Cleaned up sum_site_dos method.
# v0.3 - Adding to method sum_site_dos to include summing over several sets of
# atoms concurrently without needing to call the method multiple times.
# v0.1 - Class name is the same as a class I use to integrate a DOS and
# calculate finite temperature properties. I should either combine them
# at some point, or rename one of the classes.
import numpy as np
import sys, subprocess, re
import unitcell as uc
class ElectronicDOS:
"""
Class to store and perform operations on an electronic density of states,
as calculated by VASP. ElectronicDOS reads in a VASP-formatted DOSCAR file,
including site- and lm-decomposed densities of states. The resulting
densities of states can be summed over different sites and orbitals.
Instance attributes of ElectronicDOS:
- dosfile - name of DOSCAR-like file. Defaults to DOSCAR.
- outfile - name of OUTCAR-like file. Defaults to OUTCAR.
- posfile - name of POSCAR-like file. Defaults to POSCAR.
- ispin - flag to specify if DOS is spin-polarized (0 - not SP, 1 - SP).
- lorbit - flag to specify LORBIT setting of DOS file.
- rwigs - flag to specify whether RWIGS was set in calculation.
- e_min - minimum energy in DOSCAR.
- e_max - maximum energy in DOSCAR.
- efermi - Fermi energy as calculated by VASP.
- n_atoms - number of atoms in unit cell of calculation.
- n_dos - number of energy points in electronic DOS.
- energy - numpy array containing list of energies at which DOS is
evaluated.
- tot_dos - numpy array containing total electronic DOS. First column of
array contains total DOS if not spin-polarized or spin-up DOS
if spin-polarized. Second column of array contains zeros if not
spin-polarized or -1*spin-down DOS if spin-polarized.
- site_dos - numpy array containing atomic site- and lm-projected DOS.
Array is 3-dimensional. First index corresponds to the atom
number, second index corresponds to energy of DOS, and third
index corresponds to l, m, and spin DOS. If not
spin-polarized, last index goes in order l-m,...,l,...,l+m.
If spin-polarized, last index goes: l-m+up,l-m-dn,...,l+up,
l-dn,.../l+m+up,l+m-dn.
"""
def __init__(self,dosfile="DOSCAR",outfile="OUTCAR",posfile="POSCAR"):
self.dosfile = dosfile
self.outfile = outfile
self.posfile = posfile
self.read_outcar()
self.read_doscar()
self.unit_cell = uc.UnitCell(self.posfile)
def read_outcar(self):
"""
Reads DOS parameters from OUTCAR file.
"""
# Default values of OUTCAR parameters:
self.ispin = 0
self.lorbit = 0
self.rwigs = 0
self.noncoll = 0
try:
outcar = open(self.outfile,'r')
except IOError:
print "Error reading OUTCAR file."
print "Default values for input settings will be used."
return
file = outcar.read()
outcar.close()
# Set ispin flag
spin_str = r"ISPIN *= *([0-9]*)"
spin_reg = re.compile(spin_str)
if spin_reg.findall(file)[0] == '1':
self.ispin = 0
else:
self.ispin = 1
# Set lorbit flag
orbit_str = r"LORBIT *= *([0-9]*)"
orbit_reg = re.compile(orbit_str)
self.lorbit = int(orbit_reg.findall(file)[0])
# Set rwigs flag
rwigs_str = r"RWIGS *= *[-0-9 .]*"
rwigs_reg = re.compile(rwigs_str)
temp = rwigs_reg.findall(file)[-1].split()
temp.pop(0); temp.pop(0)
for i in range(len(temp)):
if i != '-1.00': # Default RWIGS is a string of -1.00
self.rwigs = 1
break
# Set noncoll flag
noncoll_str = r"LNONCOLLINEAR *= *([FT])"
noncoll_reg = re.compile(noncoll_str)
if noncoll_reg.findall(file)[0] == "F":
self.noncoll = 0
else:
self.noncoll = 1
return
def read_doscar(self):
"""
Reads in a doscar file to grab the density of states as a function of
energy.
"""
try:
input_ = open(self.dosfile,'r')
except IOError:
print "Error reading "+dosfile+" file."
print "Program will now exit."
sys.exit(1)
# Read then discard header information
self.n_atoms = int(input_.readline().split()[0])
for i in range(4):
input_.readline()
# Read in Fermi Energy
line = input_.readline().split()
self.e_max = float(line[0])
self.e_min = float(line[1])
self.n_dos = int(line[2])
self.e_fermi = float(line[3])
# Read in total electronic DOS
energy = []; tot_dos = []
for i in range(self.n_dos):
tot_dos.append([])
line = input_.readline().split()
energy.append(float(line[0]))
if self.ispin == 0: # non-spin polarized or non-collinear calc
tot_dos[i].append(float(line[1])) # DOS includes spin up and down
#tot_dos[i].append(0.0)
else: # spin-polarized calculation
tot_dos[i].append(float(line[1]))
tot_dos[i].append(-1.*float(line[2]))
self.energy = np.array(energy)
#self.tot_dos = np.array(tot_dos)/float(self.n_atoms)
self.tot_dos = np.array(tot_dos)
# Read in site-projected electronic DOS.
if (self.lorbit >= 10) or (self.rwigs == 1 and self.lorbit < 10):
site_dos = []
# Loop over each atom in the calculation
for i in range(self.n_atoms):
site_dos.append([])
input_.readline() # Discard site-projected header line
for j in range(len(self.energy)):
site_dos[i].append([])
line = input_.readline().split()
for k in range(1,len(line)):
if self.ispin == 0:
site_dos[i][j].append(float(line[k]))
else:
site_dos[i][j].append((-1.)**(k-1)*float(line[k]))
self.site_dos = np.array(site_dos)
def sum_site_dos(self,list_,dos=None):
"""
Sums site-projected DOS over a list of atoms. If list_ is a 2-d list,
each set of atoms will be summed over seperately, and the returned array
will have the same dimensionality as the input dos. If list_ is a 1-d
list, the returned array will have one less dimension than the input
dos.
"""
# Use site- and orbital-decomposed DOS if no input DOS given
if dos == None:
dos = self.site_dos
# Determine shape of dos, and set shape of summed_dos to be 1 dimension
# less.
#print "Shape of list_ is: "+str(np.shape(list_))
#print "Shape of dos is: "+str(np.shape(dos))
if len(np.shape(dos)) == 3:
#print "dos is 3-d array"
a = len(dos[0,0])
summed_dos = np.zeros((len(list_),self.n_dos,a))
else:
#print "dos is 2-d array"
summed_dos = np.zeros((len(list_),self.n_dos))
#print "Shape of summed_dos is: "+str(np.shape(summed_dos))
# Assume list_ is a 1-d array
#print "Shape of summed_dos[0] is: "+str(np.shape(summed_dos[0]))
#print "Shape of dos[0] is: "+str(np.shape(dos[0]))
for i in range(len(list_)):
try:
len(list_[i])
# list_[i] is an array
for j in list_[i]:
summed_dos[i] += dos[j]
except TypeError:
# list_[i] is not an array
array_flag = False
summed_dos[i] = dos[list_[i]]
return summed_dos
def sum_lms_dos(self):
"""
Sums l-, m-, and s-projected DOS for each site-projected DOS.
Returns an array cotaining the total DOS proejcted onto each atom.
Should work for collinear non-spin-polarized, collinear spin-polarized,
and non-collinear calculations.
"""
summed_dos = np.zeros((self.n_atoms,self.n_dos))
if self.noncoll == 0:
if self.ispin == 0:
# collinear, non-spin-polarized
for i in range(self.n_atoms):
for j in range(self.n_dos):
for k in range(len(self.site_dos[0,0])):
summed_dos[i,j] += self.site_dos[i,j,k]
else:
# collinear, spin-polarized
for i in range(self.n_atoms):
for j in range(self.n_dos):
for k in range(len(self.site_dos[0,0])):
summed_dos[i,j] += self.site_dos[i,j,k]
else:
# non-collinear
for i in range(self.n_atoms):
for j in range(self.n_dos):
for k in range(0,len(self.site_dos[0,0]),4):
summed_dos[i,j] += self.site_dos[i,j,k]
return summed_dos
def sum_lm_dos(self):
"""
Sums l- and m-projected DOS for each site-projected DOS, leaving the
spin-up and spin-down differences. For collinear non-spin-polarized
calculations, half the DOS is plotted spin-up and half is plotted
spin-down. For non-collinear calculations, the total magnitiziation
density along the x-, y-, and z-axes are returned, with no change in
sign (the sign of the returned magnitization density along x, y, and z
are all positive).
"""
if self.noncoll == 0:
summed_dos = np.zeros((self.n_atoms,self.n_dos,2))
if self.ispin == 0:
# collinear, non-spin-polarized
for i in range(self.n_atoms):
for j in range(self.n_dos):
for k in range(len(self.site_dos[0,0])):
summed_dos[i,j,0] += self.site_dos[i,j,k]/2.
summed_dos[i,j,1] -= self.site_dos[i,j,k]/2.
else:
# collinear, spin-polarized
for i in range(self.n_atoms):
for j in range(self.n_dos):
for k in range(len(self.site_dos[0,0])):
if k%2 == 0: # k is even, corresponding to spin-up DOS
summed_dos[i,j,0] += self.site_dos[i,j,k]
else: # k is odd, corresponding to spin-down DOS
summed_dos[i,j,1] -= self.site_dos[i,j,k]
else:
# non-collinear
x = range(len(self.site_dos[0,0]))
y = range(0,len(self.site_dos[0,0]),4)
set = [ i for i in x if i not in y ]
summed_dos = np.zeros((self.n_atoms,self.n_dos,3))
for i in range(self.n_atoms):
for j in range(self.n_dos):
for k in set:
summed_dos[i,j,k%4-1] += self.site_dos[i,j,k]
return summed_dos
def determine_l_list(self):
"""
Determines now many l-orbitals are in the site-projected DOS.
This method assumes that the # of l-orbitals is the same for each atom
in the DOS.
"""
if self.noncoll == 0:
if self.ispin == 0:
# collinear, non-spin-polarized
if len(self.site_dos[0,0]) == 1: # s-orbitals only
l_list = [1]
elif len(self.site_dos[0,0]) == 4: # s- and p-orbitals
l_list = [1,3]
elif len(self.site_dos[0,0]) == 9: # s-, p-, and d-orbitals
l_list = [1,3,5]
elif len(self.site_dos[0,0]) == 16: # s-, p-, d-, and f-orbitals
l_list = [1,3,5,7]
else:
print "Unexpected number of lm-orbitals found."
print "Program will now quit."
sys.exit(1)
else:
# collinear, spin-polarized
if len(self.site_dos[0,0]) == 2: # s-orbitals only
l_list = [2]
elif len(self.site_dos[0,0]) == 8: # s- and p-orbitals
l_list = [2,6]
elif len(self.site_dos[0,0]) == 18: # s-, p-, and d-orbitals
l_list = [2,6,10]
elif len(self.site_dos[0,0]) == 32: # s-, p-, d-, and f-orbitals
l_list = [2,6,10,14]
else:
print "Unexpected number of lms-orbitals found."
print "Program will now quit."
sys.exit(1)
else:
# non-collinear
if len(self.site_dos[0,0]) == 4: # s-orbitals only
l_list = [4]
elif len(self.site_dos[0,0]) == 16: # s- and p-orbitals
l_list = [4,12]
elif len(self.site_dos[0,0]) == 36: # s-, p-, and d-orbitals
l_list = [4,12,20]
elif len(self.site_dos[0,0]) == 64: # s-, p-, d-, and f-orbitals
l_list = [4,12,20,28]
else:
print "Unexpected number of lms-orbitals found."
print "Program will now quit."
sys.exit(1)
return l_list
def sum_ms_dos(self):
"""
Sums m- and s-projected DOS for each site-projected DOS.
Returns an array containing the atom-, l-projected DOS.
Works for non-collinear calculations.
"""
l_list = self.determine_l_list()
# Perform the summation
summed_dos = np.zeros((self.n_atoms,self.n_dos,len(l_list)))
if self.noncoll == 0:
if self.ispin == 0:
# collinear non-spin-polarized
for i in range(self.n_atoms):
for j in range(self.n_dos):
n = 0
for l in range(len(l_list)):
for m in range(l_list[l]):
summed_dos[i,j,l] += self.site_dos[i,j,n]
n += 1
else:
# collinear spin-polarized
for i in range(self.n_atoms):
for j in range(self.n_dos):
n = 0
for l in range(len(l_list)):
for m in range(l_list[l]):
summed_dos[i,j,l] += self.site_dos[i,j,n]
n += 1
else:
# non-collinear
for i in range(self.n_atoms):
for j in range(self.n_dos):
for l in range(len(l_list)):
for m in range(0,l_list[l],4):
summed_dos[i,j,l] += self.site_dos[i,j,4*l+m]
return summed_dos
def sum_m_dos(self):
"""
Sums the m-projected DOS for each site-projected DOS. For
non-spin-polarized calculations, the spin-up and spin-down contributions
to each l-orbit are 1/2 total density of each l-orbit. For non-collinear
calculations, the atom- and l-projected magnitization density along the
x-, y-, and z-axes are summed over each m quantum number.
"""
l_list = self.determine_l_list()
# Perform summation
if self.noncoll == 0:
summed_dos = np.zeros((self.n_atoms,self.n_dos,2*len(l_list)))
if self.ispin == 0:
# collinear non-spin-polarized
for i in range(self.n_atoms):
for j in range(self.n_dos):
n = 0
for l in range(len(l_list)):
for m in range(l_list[l]):
summed_dos[i,j,2*l] += self.site_dos[i,j,n]/2.
summed_dos[i,j,2*l+1] -= self.site_dos[i,j,n]/2.
n += 1
else:
# collinear spin-polarized
for i in range(self.n_atoms):
for j in range(self.n_dos):
n = 0
for l in range(len(l_list)):
for m in range(l_list[l]):
summed_dos[i,j,2*l+n%2] += self.site_dos[i,j,n]*(-1.)**(n)
n += 1
else:
# non-collinear
summed_dos = np.zeros((self.n_atoms,self.n_dos,3*len(l_list)))
for i in range(self.n_atoms):
for j in range(self.n_dos):
for l in range(len(l_list)):
x = range(l_list[l])
y = range(0,l_list[l],4)
set = [ i for i in x if i not in y ]
for m in set:
summed_dos[i,j,3*l+m%3-1] += self.site_dos[i,j,4*l+m]
return summed_dos
def shift_energy(self,energy):
"""
Adds the constant energy to the energy scale.
"""
self.e_min += energy
self.e_max += energy
self.e_fermi += energy
self.energy += energy
def scale_dos(self,scale,dos=None):
"""
Returns dos scaled by the factor scale.
"""
if dos == None:
dos = self.tot_dos
return dos*scale
def get_band_gaps(self,dos=None,tol=1e-3):
"""
Returns a list of tuples containing the lower and upper bounds of each
gap in the supplied dos. If no dos supplied, the total dos is used.
"""
if dos == None:
dos = self.tot_dos
else:
dos = np.array(dos)
gaps = []
if np.shape(dos)[0] == self.n_dos:
# First entry of dos is list of energy points
if len(np.shape(dos)) == 3:
for i in range(len(dos[0])):
gaps.append([])
for j in range(len(dos[0,i])):
gaps.append([])
# Look at first energy of DOS
if np.abs(dos[0,i,j]) < tol:
# DOS starts in a gap
flag = 1
#gaps[i][j].append([self.energy[0]])
gaps[i].append([])
else:
# DOS starts in a band
flag = 0
# Loop over all DOS points except first and last
for k in range(1,self.n_dos):
if (np.abs(dos[k,i,j]) > tol) and (flag == 1):
# Found upper bound of a gap
flag = 0
gaps[i][j][-1].append(self.energy[k])
elif (np.abs(dos[k,i,j]) < tol) and (flag == 0):
# Found lower bound of a gap
flag = 1
gaps[i][j].append([self.energy[k-1]])
elif len(np.shape(dos)) == 2:
for i in range(len(dos[0])):
gaps.append([])
if np.abs(dos[0,i]) < tol:
# DOS starts in a gap
flag = 1
#gaps[i].append([self.energy[0]])
gaps[i].append([])
else:
# DOS starts in a band
flag = 0
for k in range(1,self.n_dos):
if (np.abs(dos[k,i]) > tol) and (flag == 1):
# Found upper bound of a gap
flag = 0
gaps[i][-1].append(self.energy[k])
elif (np.abs(dos[k,i]) < tol) and (flag == 0):
# Found lower bound of a gap
flag = 1
gaps[i].append([self.energy[k-1]])
else:
print "Shape of dos is unexpected."
print "Program will now quit!"
sys.exit(1)
elif np.shape(dos)[1] == self.n_dos:
# First entry of dos is list of sites
# Second entry of dos is list of energy points
if len(np.shape(dos)) == 3:
for i in range(len(dos)):
gaps.append([])
for j in range(len(dos[i,0])):
gaps.append([])
if np.abs(dos[i,0,j]) < tol:
# DOS starts in a gap
flag = 1
#gaps[i][j].append([self.energy[0]])
gaps[i].append([])
else:
# DOS starts in a band
flag = 0
for k in range(1,self.n_dos):
if (np.abs(dos[i,k,j]) > tol) and (flag == 1):
# Found upper bound of a gap
flag = 0
gaps[i][j][-1].append(self.energy[k])
elif (np.abs(dos[i,k,j]) < tol) and (flag == 0):
# Found lower bound of a gap
flag = 1
gaps[i][j].append([self.energy[k-1]])
elif len(np.shape(dos)) == 2:
for i in range(len(dos)):
gaps.append([])
if np.abs(dos[i,0]) < tol:
# DOS starts in a gap
flag = 1
#gaps[i].append([self.energy[0]])
gaps[i].append([])
else:
# DOS starts in a band
flag = 0
for k in range(1,self.n_dos):
if (np.abs(dos[i,k]) > tol) and (flag == 1):
# Found upper bound of a gap
flag = 0
gaps[i][-1].append(self.energy[k])
elif (np.abs(dos[i,k]) < tol) and (flag == 0):
# Found lower bound of a gap
flag = 1
gaps[i].append([self.energy[k-1]])
else:
print "Shape of dos is unexpected."
print "Program will now quit!"
sys.exit(1)
else:
# dos has a strange/incorrect formatting!
print "Unexpected formatting for dos file."
print "Program will now quit!"
sys.exit(1)
return np.array(gaps)
def write_dos(self,dos=None):
"""
Returns a string containing an electronic DOS or DOS's formatted for
plotting.
"""
if dos == None:
dos = self.site_dos
else:
dos = np.array(dos)
output = ""
if np.shape(dos)[0] == self.n_dos:
# First entry of dos is list of energy points
for i in range(self.n_dos):
output += str(self.energy[i])
for j in range(len(dos[i])):
output += " "+str(dos[i,j])
output += "\n"
elif np.shape(dos)[1] == self.n_dos:
# First entry of dos is list of sites
if len(np.shape(dos)) == 3:
for i in range(self.n_dos):
output += str(self.energy[i])
for j in range(len(dos)):
for k in range(len(dos[j,i])):
output += " "+str(dos[j,i,k])
output += "\n"
elif len(np.shape(dos)) == 2:
for i in range(self.n_dos):
output += str(self.energy[i])
for j in range(len(dos)):
output += " "+str(dos[j,i])
output += "\n"
else:
# dos has a strange/incorrect formatting!
print "Unexpected formatting for dos file."
print "Program will now quit!"
sys.exit(1)
return output
def write_doses(self,dos_list):
"""
Returns a string containing a set of electronic DOS's formatted for
plotting.
"""
output = ""
for e in range(self.n_dos):
output += str(self.energy[e])
for i in range(len(dos_list)):
if np.shape(dos_list[i])[0] == self.n_dos:
# First energy of dos_list[i] is list of energy points
for j in range(len(dos_list[i][e])):
output += " "+str(dos_list[i][e,j])
elif np.shape(dos_list[i])[1] == self.n_dos:
# First entry of dos_list[i] is list of sites
if len(np.shape(dos_list[i])) == 3:
for j in range(len(dos_list[i])):
for k in range(len(dos_list[i][j,e])):
output += " "+str(dos_list[i][j,e,k])
elif len(np.shape(dos_list[i])) == 2:
for j in range(len(dos_list[i])):
output += " "+str(dos_list[i][j,e])
output += "\n"
return output
def generate_z_list(self,tol=1e-8):
"""
Returns a 2-d list of each plane of atoms, where the first index of list
contains each plane of atoms along the z-axis, and the second index
contains a list of atoms in each of the planes.
"""
z_positions = self.unit_cell.atom_positions[:,2]
z_sorted = np.argsort(z_positions)
result = []
result.append([z_sorted[0]])
for i in range(1,len(z_sorted)):
if abs(z_positions[z_sorted[i]] - z_positions[z_sorted[i-1]]) < tol:
result[-1].append(z_sorted[i])
else:
result.append([z_sorted[i]])
return np.array(result)
def make_z_dos(self):
"""
Sums site-projected DOS over lists of atoms lying in the same plane
along the z-axis of the unit cell.
"""
summed_dos = self.sum_lms_dos()
z_lists = self.generate_z_list()
z_dos = []
for i in range(len(z_lists)):
z_dos.append(self.sum_site_dos(z_lists[i],summed_dos))
return np.array(z_dos)
def make_half_dos(self):
"""
Sums site-projected DOS over two halves of the unit cell divided in two
along the z-direction.
"""
summed_dos = self.sum_lms_dos()
z_lists = self.generate_z_list()
half = len(z_lists)/2
half_dos = []
half_dos.append(self.sum_site_dos(z_lists[0:half].flatten(),summed_dos))
half_dos.append(self.sum_site_dos(z_lists[half:].flatten(),summed_dos))
return np.array(half_dos)
def dos_spline(self,dos=None):
"""
Fits a DOS to a cubic spline. Useful for adding/subtracting DOS curves
that have not been sampled on the same set of energies.
"""
from scipy.interpolate import InterpolatedUnivariateSpline
if dos == None:
dos = self.tot_dos[:,0]
splinedos = InterpolatedUnivariateSpline(self.energy,dos)
return splinedos
def dos_difference(self,dos1,dos2):
"""
Subtracts two DOS curves. Assumes that dos1 and dos2 are cubic-splines.
"""
difference = dos1(self.energy) - dos2(self.energy)
return difference
if __name__ == "__main__":
dos = ElectronicDOS()
dos.shift_energy(-1.*dos.e_fermi)
print np.shape(dos.tot_dos)
#print dos for each half of the interface
#sorted = dos.make_half_dos()
#sorted = dos.scale_dos(1./dos.n_atoms,sorted)
#dos_text = dos.write_dos(sorted)
sorted = dos.make_z_dos()
gaps = dos.get_band_gaps(dos=sorted,tol=1e-5)
print np.shape(gaps)
for i in range(len(gaps)):
for j in range(len(gaps[i])):
for k in range(len(gaps[i][j])):
print i,gaps[i][j][k]
print str(i+1),gaps[i][j][k]
print
| mit | -1,919,375,270,554,150,100 | 42.158754 | 90 | 0.47475 | false |
nocko/cfmi | tests/test_permissions.py | 1 | 1617 | from tests import TestCase, dbfixture
from fixture import DataTestCase
from tests.fixtures import (UserData, ProjectData, SubjectData, SessionData)
from cfmi.database.newsite import User, Project, Subject, Session
from sqlalchemy import and_
class PermissionsTestCase(TestCase):
@dbfixture.with_data(UserData, ProjectData)
def test_pi_auth_for_project(self, data):
for project in Project.query:
assert project.auth(project.pi)
@dbfixture.with_data(UserData, ProjectData)
def test_ra_auth_for_project(self, data):
for project in Project.query:
for user in project.users:
if not user.disabled:
assert project.auth(user)
@dbfixture.with_data(UserData, ProjectData)
def test_non_pi_non_user_reject(self, data):
user = User.query.filter(User.username=='pi2').one()
projs = Project.query.filter(and_(Project.pi!=user,
~Project.users.contains(user)))
for project in projs:
assert not project.auth(user)
@dbfixture.with_data(UserData, ProjectData)
def test_admin_auth(self, data):
admin = User.query.filter(User.permission_level==3).one()
assert admin.is_superuser()
projs = Project.query
for project in projs:
assert project.auth(admin)
@dbfixture.with_data(UserData, ProjectData)
def test_disabled_no_access(self, data):
disabled = User.query.filter(User.permission_level==0).first()
for project in Project.query:
assert not project.auth(disabled)
| bsd-3-clause | -8,015,727,402,655,858,000 | 39.425 | 76 | 0.654917 | false |
itabulous/mysql-connector-python | lib/mysql/connector/connection_cext.py | 14 | 20740 | # MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Connection class using the C Extension
"""
# Detection of abstract methods in pylint is not working correctly
#pylint: disable=W0223
from . import errors
from .catch23 import INT_TYPES
from .constants import (
CharacterSet, FieldFlag, ServerFlag, ShutdownType, ClientFlag
)
from .abstracts import MySQLConnectionAbstract, MySQLCursorAbstract
from .protocol import MySQLProtocol
HAVE_CMYSQL = False
try:
import _mysql_connector # pylint: disable=F0401
from .cursor_cext import (
CMySQLCursor, CMySQLCursorRaw,
CMySQLCursorBuffered, CMySQLCursorBufferedRaw, CMySQLCursorPrepared,
CMySQLCursorDict, CMySQLCursorBufferedDict, CMySQLCursorNamedTuple,
CMySQLCursorBufferedNamedTuple)
from _mysql_connector import MySQLInterfaceError # pylint: disable=F0401
except ImportError as exc:
raise ImportError(
"MySQL Connector/Python C Extension not available ({0})".format(
str(exc)
))
else:
HAVE_CMYSQL = True
class CMySQLConnection(MySQLConnectionAbstract):
"""Class initiating a MySQL Connection using Connector/C"""
def __init__(self, **kwargs):
"""Initialization"""
if not HAVE_CMYSQL:
raise RuntimeError(
"MySQL Connector/Python C Extension not available")
self._cmysql = None
self._connection_timeout = 2
self._columns = []
self.converter = None
super(CMySQLConnection, self).__init__(**kwargs)
if len(kwargs) > 0:
self.connect(**kwargs)
def _do_handshake(self):
"""Gather information of the MySQL server before authentication"""
self._handshake = {
'protocol': self._cmysql.get_proto_info(),
'server_version_original': self._cmysql.get_server_info(),
'server_threadid': self._cmysql.thread_id(),
'charset': None,
'server_status': None,
'auth_plugin': None,
'auth_data': None,
'capabilities': self._cmysql.st_server_capabilities(),
}
self._server_version = self._check_server_version(
self._handshake['server_version_original']
)
@property
def _server_status(self):
"""Returns the server status attribute of MYSQL structure"""
return self._cmysql.st_server_status()
def set_unicode(self, value=True):
"""Toggle unicode mode
Set whether we return string fields as unicode or not.
Default is True.
"""
self._use_unicode = value
if self._cmysql:
self._cmysql.use_unicode(value)
if self.converter:
self.converter.set_unicode(value)
@property
def autocommit(self):
"""Get whether autocommit is on or off"""
value = self.info_query("SELECT @@session.autocommit")[0]
return True if value == 1 else False
@autocommit.setter
def autocommit(self, value): # pylint: disable=W0221
"""Toggle autocommit"""
try:
self._cmysql.autocommit(value)
self._autocommit = value
except MySQLInterfaceError as exc:
raise errors.get_mysql_exception(msg=exc.msg, errno=exc.errno,
sqlstate=exc.sqlstate)
@property
def database(self):
"""Get the current database"""
return self.info_query("SELECT DATABASE()")[0]
@database.setter
def database(self, value): # pylint: disable=W0221
"""Set the current database"""
self._cmysql.select_db(value)
@property
def in_transaction(self):
"""MySQL session has started a transaction"""
return self._server_status & ServerFlag.STATUS_IN_TRANS
def _open_connection(self):
charset_name = CharacterSet.get_info(self._charset_id)[0]
self._cmysql = _mysql_connector.MySQL(
buffered=self._buffered,
raw=self._raw,
charset_name=charset_name,
connection_timeout=int(self._connection_timeout or 10),
use_unicode=self._use_unicode,
auth_plugin=self._auth_plugin)
cnx_kwargs = {
'host': self._host,
'user': self._user,
'password': self._password,
'database': self._database,
'port': self._port,
'client_flags': self._client_flags,
'unix_socket': self._unix_socket,
'compress': self.isset_client_flag(ClientFlag.COMPRESS)
}
if self.isset_client_flag(ClientFlag.SSL):
cnx_kwargs.update({
'ssl_ca': self._ssl['ca'],
'ssl_cert': self._ssl['cert'],
'ssl_key': self._ssl['key'],
'ssl_verify_cert': self._ssl['verify_cert']
})
try:
self._cmysql.connect(**cnx_kwargs)
except MySQLInterfaceError as exc:
raise errors.get_mysql_exception(msg=exc.msg, errno=exc.errno,
sqlstate=exc.sqlstate)
self._do_handshake()
def close(self):
"""Disconnect from the MySQL server"""
if self._cmysql:
try:
self._cmysql.close()
except MySQLInterfaceError as exc:
raise errors.get_mysql_exception(msg=exc.msg, errno=exc.errno,
sqlstate=exc.sqlstate)
self._cmysql = None
disconnect = close
def is_connected(self):
"""Reports whether the connection to MySQL Server is available"""
if self._cmysql:
return self._cmysql.ping()
return False
def ping(self, reconnect=False, attempts=1, delay=0):
"""Check availability of the MySQL server
When reconnect is set to True, one or more attempts are made to try
to reconnect to the MySQL server using the reconnect()-method.
delay is the number of seconds to wait between each retry.
When the connection is not available, an InterfaceError is raised. Use
the is_connected()-method if you just want to check the connection
without raising an error.
Raises InterfaceError on errors.
"""
errmsg = "Connection to MySQL is not available"
try:
connected = self._cmysql.ping()
except AttributeError:
pass # Raise or reconnect later
else:
if connected:
return
if reconnect:
self.reconnect(attempts=attempts, delay=delay)
else:
raise errors.InterfaceError(errmsg)
def set_character_set_name(self, charset):
"""Sets the default character set name for current connection.
"""
self._cmysql.set_character_set(charset)
def info_query(self, query):
"""Send a query which only returns 1 row"""
self._cmysql.query(query)
first_row = ()
if self._cmysql.have_result_set:
first_row = self._cmysql.fetch_row()
if self._cmysql.fetch_row():
self._cmysql.free_result()
raise errors.InterfaceError(
"Query should not return more than 1 row")
self._cmysql.free_result()
return first_row
@property
def connection_id(self):
"""MySQL connection ID"""
try:
return self._cmysql.thread_id()
except MySQLInterfaceError:
pass # Just return None
return None
def get_rows(self, count=None, binary=False, columns=None):
"""Get all or a subset of rows returned by the MySQL server"""
if not (self._cmysql and self.unread_result):
raise errors.InternalError("No result set available")
rows = []
if count is not None and count <= 0:
raise AttributeError("count should be 1 or higher, or None")
counter = 0
try:
row = self._cmysql.fetch_row()
while row:
if self.converter:
row = list(row)
for i, _ in enumerate(row):
row[i] = self.converter.to_python(self._columns[i],
row[i])
row = tuple(row)
rows.append(row)
counter += 1
if count and counter == count:
break
row = self._cmysql.fetch_row()
except MySQLInterfaceError as exc:
self.free_result()
raise errors.get_mysql_exception(msg=exc.msg, errno=exc.errno,
sqlstate=exc.sqlstate)
return rows
def get_row(self, binary=False, columns=None):
"""Get the next rows returned by the MySQL server"""
try:
return self.get_rows(count=1, binary=binary, columns=columns)[0]
except IndexError:
# No row available
return None
def next_result(self):
"""Reads the next result"""
if self._cmysql:
self._cmysql.consume_result()
return self._cmysql.next_result()
return None
def free_result(self):
"""Frees the result"""
if self._cmysql:
self._cmysql.free_result()
def commit(self):
"""Commit current transaction"""
if self._cmysql:
self._cmysql.commit()
def rollback(self):
"""Rollback current transaction"""
if self._cmysql:
self._cmysql.consume_result()
self._cmysql.rollback()
def cmd_init_db(self, database):
"""Change the current database"""
try:
self._cmysql.select_db(database)
except MySQLInterfaceError as exc:
raise errors.get_mysql_exception(msg=exc.msg, errno=exc.errno,
sqlstate=exc.sqlstate)
def fetch_eof_columns(self):
"""Fetch EOF and column information"""
if not self._cmysql.have_result_set:
raise errors.InterfaceError("No result set")
fields = self._cmysql.fetch_fields()
self._columns = []
for col in fields:
self._columns.append((
col[4],
int(col[8]),
None,
None,
None,
None,
~int(col[9]) & FieldFlag.NOT_NULL,
int(col[9])
))
return {
'eof': {
'status_flag': self._server_status,
'warning_count': self._cmysql.st_warning_count(),
},
'columns': self._columns,
}
def fetch_eof_status(self):
"""Fetch EOF and status information"""
if self._cmysql:
return {
'warning_count': self._cmysql.st_warning_count(),
'field_count': self._cmysql.st_field_count(),
'insert_id': self._cmysql.insert_id(),
'affected_rows': self._cmysql.affected_rows(),
'server_status': self._server_status,
}
return None
def cmd_query(self, query, raw=False, buffered=False, raw_as_string=False):
"""Send a query to the MySQL server"""
self.handle_unread_result()
try:
if not isinstance(query, bytes):
query = query.encode('utf-8')
self._cmysql.query(query,
raw=raw, buffered=buffered,
raw_as_string=raw_as_string)
except MySQLInterfaceError as exc:
raise errors.get_mysql_exception(exc.errno, msg=exc.msg,
sqlstate=exc.sqlstate)
except AttributeError:
if self._unix_socket:
addr = self._unix_socket
else:
addr = self._host + ':' + str(self._port)
raise errors.OperationalError(
errno=2055, values=(addr, 'Connection not available.'))
self._columns = []
if not self._cmysql.have_result_set:
# No result
return self.fetch_eof_status()
return self.fetch_eof_columns()
_execute_query = cmd_query
def cursor(self, buffered=None, raw=None, prepared=None, cursor_class=None,
dictionary=None, named_tuple=None):
"""Instantiates and returns a cursor using C Extension
By default, CMySQLCursor is returned. Depending on the options
while connecting, a buffered and/or raw cursor is instantiated
instead. Also depending upon the cursor options, rows can be
returned as dictionary or named tuple.
Dictionary and namedtuple based cursors are available with buffered
output but not raw.
It is possible to also give a custom cursor through the
cursor_class parameter, but it needs to be a subclass of
mysql.connector.cursor_cext.CMySQLCursor.
Raises ProgrammingError when cursor_class is not a subclass of
CursorBase. Raises ValueError when cursor is not available.
Returns instance of CMySQLCursor or subclass.
:param buffered: Return a buffering cursor
:param raw: Return a raw cursor
:param prepared: Return a cursor which uses prepared statements
:param cursor_class: Use a custom cursor class
:param dictionary: Rows are returned as dictionary
:param named_tuple: Rows are returned as named tuple
:return: Subclass of CMySQLCursor
:rtype: CMySQLCursor or subclass
"""
self.handle_unread_result()
if not self.is_connected():
raise errors.OperationalError("MySQL Connection not available.")
if cursor_class is not None:
if not issubclass(cursor_class, MySQLCursorAbstract):
raise errors.ProgrammingError(
"Cursor class needs be to subclass"
" of cursor_cext.CMySQLCursor")
return (cursor_class)(self)
buffered = buffered or self._buffered
raw = raw or self._raw
cursor_type = 0
if buffered is True:
cursor_type |= 1
if raw is True:
cursor_type |= 2
if dictionary is True:
cursor_type |= 4
if named_tuple is True:
cursor_type |= 8
if prepared is True:
cursor_type |= 16
types = {
0: CMySQLCursor, # 0
1: CMySQLCursorBuffered,
2: CMySQLCursorRaw,
3: CMySQLCursorBufferedRaw,
4: CMySQLCursorDict,
5: CMySQLCursorBufferedDict,
8: CMySQLCursorNamedTuple,
9: CMySQLCursorBufferedNamedTuple,
16: CMySQLCursorPrepared
}
try:
return (types[cursor_type])(self)
except KeyError:
args = ('buffered', 'raw', 'dictionary', 'named_tuple', 'prepared')
raise ValueError('Cursor not available with given criteria: ' +
', '.join([args[i] for i in range(5)
if cursor_type & (1 << i) != 0]))
@property
def num_rows(self):
"""Returns number of rows of current result set"""
if not self._cmysql.have_result_set:
raise errors.InterfaceError("No result set")
return self._cmysql.num_rows()
@property
def warning_count(self):
"""Returns number of warnings"""
if not self._cmysql:
return 0
return self._cmysql.warning_count()
@property
def result_set_available(self):
"""Check if a result set is available"""
if not self._cmysql:
return False
return self._cmysql.have_result_set
@property
def unread_result(self):
"""Check if there are unread results or rows"""
return self.result_set_available
@property
def more_results(self):
"""Check if there are more results"""
return self._cmysql.more_results()
def prepare_for_mysql(self, params):
"""Prepare parameters for statements
This method is use by cursors to prepared parameters found in the
list (or tuple) params.
Returns dict.
"""
if isinstance(params, (list, tuple)):
result = self._cmysql.convert_to_mysql(*params)
elif isinstance(params, dict):
result = {}
for key, value in params.items():
result[key] = self._cmysql.convert_to_mysql(value)[0]
else:
raise ValueError("Could not process parameters")
return result
def consume_results(self):
"""Consume the current result
This method consume the result by reading (consuming) all rows.
"""
self._cmysql.consume_result()
def cmd_change_user(self, username='', password='', database='',
charset=33):
"""Change the current logged in user"""
try:
self._cmysql.change_user(username, password, database)
except MySQLInterfaceError as exc:
raise errors.get_mysql_exception(msg=exc.msg, errno=exc.errno,
sqlstate=exc.sqlstate)
self._charset_id = charset
self._post_connection()
def cmd_refresh(self, options):
"""Send the Refresh command to the MySQL server"""
try:
self._cmysql.refresh(options)
except MySQLInterfaceError as exc:
raise errors.get_mysql_exception(msg=exc.msg, errno=exc.errno,
sqlstate=exc.sqlstate)
return self.fetch_eof_status()
def cmd_quit(self):
"""Close the current connection with the server"""
self.close()
def cmd_shutdown(self, shutdown_type=None):
"""Shut down the MySQL Server"""
if not self._cmysql:
raise errors.OperationalError("MySQL Connection not available")
if shutdown_type:
if not ShutdownType.get_info(shutdown_type):
raise errors.InterfaceError("Invalid shutdown type")
level = shutdown_type
else:
level = ShutdownType.SHUTDOWN_DEFAULT
try:
self._cmysql.shutdown(level)
except MySQLInterfaceError as exc:
raise errors.get_mysql_exception(msg=exc.msg, errno=exc.errno,
sqlstate=exc.sqlstate)
self.close()
def cmd_statistics(self):
"""Return statistics from the MySQL server"""
self.handle_unread_result()
try:
stat = self._cmysql.stat()
return MySQLProtocol().parse_statistics(stat, with_header=False)
except (MySQLInterfaceError, errors.InterfaceError) as exc:
raise errors.get_mysql_exception(msg=exc.msg, errno=exc.errno,
sqlstate=exc.sqlstate)
def cmd_process_kill(self, mysql_pid):
"""Kill a MySQL process"""
if not isinstance(mysql_pid, INT_TYPES):
raise ValueError("MySQL PID must be int")
self.info_query("KILL {0}".format(mysql_pid))
def handle_unread_result(self):
"""Check whether there is an unread result"""
if self.can_consume_results:
self.consume_results()
elif self.unread_result:
raise errors.InternalError("Unread result found")
| gpl-2.0 | -3,262,062,547,057,617,000 | 33.974705 | 79 | 0.577242 | false |
voutilad/courtlistener | cl/lib/sunburnt/dates.py | 5 | 3043 | from __future__ import absolute_import
import datetime, math, re, warnings
try:
import mx.DateTime
except ImportError:
warnings.warn(
"mx.DateTime not found, retricted to Python datetime objects",
ImportWarning)
mx = None
# mlr: Overrides the import above so that Django templates won't crash.
# this is the simple fix to the issue here: https://github.com/tow/sunburnt/issues/26
mx = None
year = r'[+/-]?\d+'
tzd = r'Z|((?P<tzd_sign>[-+])(?P<tzd_hour>\d\d):(?P<tzd_minute>\d\d))'
extended_iso_template = r'(?P<year>' + year + r""")
(-(?P<month>\d\d)
(-(?P<day>\d\d)
([T%s](?P<hour>\d\d)
:(?P<minute>\d\d)
(:(?P<second>\d\d)
(.(?P<fraction>\d+))?)?
(""" + tzd + """)?)?
)?)?"""
extended_iso = extended_iso_template % " "
extended_iso_re = re.compile('^' + extended_iso + '$', re.X)
def datetime_from_w3_datestring(s):
""" We need to extend ISO syntax (as permitted by the standard) to allow
for dates before 0AD and after 9999AD. This is how to parse such a string"""
m = extended_iso_re.match(s)
if not m:
raise ValueError
d = m.groupdict()
d['year'] = int(d['year'])
d['month'] = int(d['month'] or 1)
d['day'] = int(d['day'] or 1)
d['hour'] = int(d['hour'] or 0)
d['minute'] = int(d['minute'] or 0)
d['fraction'] = d['fraction'] or '0'
d['second'] = float("%s.%s" % ((d['second'] or '0'), d['fraction']))
del d['fraction']
if d['tzd_sign']:
if d['tzd_sign'] == '+':
tzd_sign = 1
elif d['tzd_sign'] == '-':
tzd_sign = -1
try:
tz_delta = datetime_delta_factory(tzd_sign * int(d['tzd_hour']),
tzd_sign * int(d['tzd_minute']))
except DateTimeRangeError:
raise ValueError(e.args[0])
else:
tz_delta = datetime_delta_factory(0, 0)
del d['tzd_sign']
del d['tzd_hour']
del d['tzd_minute']
try:
dt = datetime_factory(**d) + tz_delta
except DateTimeRangeError:
raise ValueError(e.args[0])
return dt
class DateTimeRangeError(ValueError):
pass
if mx:
def datetime_factory(**kwargs):
try:
return mx.DateTime.DateTimeFrom(**kwargs)
except mx.DateTime.RangeError:
raise DateTimeRangeError(e.args[0])
else:
def datetime_factory(**kwargs):
second = kwargs.get('second')
if second is not None:
f, i = math.modf(second)
kwargs['second'] = int(i)
kwargs['microsecond'] = int(f * 1000000)
try:
return datetime.datetime(**kwargs)
except ValueError, e:
raise DateTimeRangeError(e.args[0])
if mx:
def datetime_delta_factory(hours, minutes):
return mx.DateTime.DateTimeDelta(0, hours, minutes)
else:
def datetime_delta_factory(hours, minutes):
return datetime.timedelta(hours=hours, minutes=minutes)
| agpl-3.0 | -8,970,570,287,296,813,000 | 30.697917 | 85 | 0.548472 | false |
cbclab/MDT | mdt/data/components/standard/compartment_models/GDRCylinders.py | 1 | 2785 | from mdt import CompartmentTemplate, FreeParameterTemplate
from mdt.model_building.parameter_functions.transformations import ScaleTransform
__author__ = 'Robbert Harms'
__date__ = '2018-09-15'
__maintainer__ = 'Robbert Harms'
__email__ = '[email protected]'
__licence__ = 'LGPL v3'
class GDRCylinders(CompartmentTemplate):
"""Gamma Distributed Radii cylinders, for use in AxCaliber modelling."""
parameters = ('g', 'b', 'G', 'Delta', 'delta', 'd', 'theta', 'phi', 'shape', 'scale', '@cache')
dependencies = ('VanGelderenCylinder', 'SphericalToCartesian', 'gamma_ppf', 'gamma_pdf')
cl_code = '''
uint nmr_radii = 16;
double radius_spacing = (*cache->upper_radius - *cache->lower_radius) / nmr_radii;
double direction_2 = pown(dot(g, SphericalToCartesian(theta, phi)), 2);
double diffusivity_par = -b * d * direction_2;
double radius;
double diffusivity_perp;
double weight_sum = 0;
double signal_sum = 0;
for(uint i = 0; i < nmr_radii; i++){
radius = *cache->lower_radius + (i + 0.5) * radius_spacing;
diffusivity_perp = (1 - direction_2) * VanGelderenCylinder(G, Delta, delta, d, radius);
signal_sum += cache->weights[i] * exp(diffusivity_par + diffusivity_perp);
weight_sum += cache->weights[i];
}
return signal_sum / weight_sum;
'''
cache_info = {
'fields': ['double lower_radius',
'double upper_radius',
('double', 'weights', 16)],
'cl_code': '''
*cache->lower_radius = gamma_ppf(0.01, shape, scale);
*cache->upper_radius = gamma_ppf(0.99, shape, scale);
const uint nmr_radii = 16;
double radius_spacing = (*cache->upper_radius - *cache->lower_radius) / nmr_radii;
double radius;
for(uint i = 0; i < nmr_radii; i++){
radius = *cache->lower_radius + (i + 0.5) * radius_spacing;
// area without * M_PI since it is a constant
cache->weights[i] = gamma_pdf(radius, shape, scale) * (radius * radius);
}
'''
}
extra_optimization_maps = [lambda d: {'R': d['shape'] * d['scale'],
'R_variance': d['shape'] * d['scale'] * d['scale']}]
class shape(FreeParameterTemplate):
init_value = 2
lower_bound = 1e-3
upper_bound = 25
sampling_proposal_std = 0.01
class scale(FreeParameterTemplate):
init_value = 1e-6
lower_bound = 0.01e-6
upper_bound = 20e-6
parameter_transform = ScaleTransform(1e6)
sampling_proposal_std = 0.01e-6
| lgpl-3.0 | -5,009,310,789,852,433,000 | 38.785714 | 99 | 0.554399 | false |
Fleurer/flask-oauthlib | example/twitter.py | 14 | 2514 | # coding: utf-8
from flask import Flask
from flask import g, session, request, url_for, flash
from flask import redirect, render_template
from flask_oauthlib.client import OAuth
app = Flask(__name__)
app.debug = True
app.secret_key = 'development'
oauth = OAuth(app)
twitter = oauth.remote_app(
'twitter',
consumer_key='xBeXxg9lyElUgwZT6AZ0A',
consumer_secret='aawnSpNTOVuDCjx7HMh6uSXetjNN8zWLpZwCEU4LBrk',
base_url='https://api.twitter.com/1.1/',
request_token_url='https://api.twitter.com/oauth/request_token',
access_token_url='https://api.twitter.com/oauth/access_token',
authorize_url='https://api.twitter.com/oauth/authenticate',
)
@twitter.tokengetter
def get_twitter_token():
if 'twitter_oauth' in session:
resp = session['twitter_oauth']
return resp['oauth_token'], resp['oauth_token_secret']
@app.before_request
def before_request():
g.user = None
if 'twitter_oauth' in session:
g.user = session['twitter_oauth']
@app.route('/')
def index():
tweets = None
if g.user is not None:
resp = twitter.request('statuses/home_timeline.json')
if resp.status == 200:
tweets = resp.data
else:
flash('Unable to load tweets from Twitter.')
return render_template('index.html', tweets=tweets)
@app.route('/tweet', methods=['POST'])
def tweet():
if g.user is None:
return redirect(url_for('login', next=request.url))
status = request.form['tweet']
if not status:
return redirect(url_for('index'))
resp = twitter.post('statuses/update.json', data={
'status': status
})
if resp.status == 403:
flash('Your tweet was too long.')
elif resp.status == 401:
flash('Authorization error with Twitter.')
else:
flash('Successfully tweeted your tweet (ID: #%s)' % resp.data['id'])
return redirect(url_for('index'))
@app.route('/login')
def login():
callback_url = url_for('oauthorized', next=request.args.get('next'))
return twitter.authorize(callback=callback_url or request.referrer or None)
@app.route('/logout')
def logout():
session.pop('twitter_oauth', None)
return redirect(url_for('index'))
@app.route('/oauthorized')
def oauthorized():
resp = twitter.authorized_response()
if resp is None:
flash('You denied the request to sign in.')
else:
session['twitter_oauth'] = resp
return redirect(url_for('index'))
if __name__ == '__main__':
app.run()
| bsd-3-clause | 6,920,193,633,168,002,000 | 25.744681 | 79 | 0.649165 | false |
johnctitus/troposphere | examples/ElastiCacheRedis.py | 2 | 21814 | #!/usr/bin/env python
"""
Converted from ElastiCache_Redis.template located at:
http://aws.amazon.com/cloudformation/aws-cloudformation-templates/
In addition to troposphere, this script requires awacs (Amazon Web Access
Control Subsystem)
"""
from __future__ import absolute_import, division, print_function
import troposphere.ec2 as ec2
import troposphere.elasticache as elasticache
import troposphere.iam as iam
import awacs
from awacs.aws import (Allow,
Statement,
Principal,
Policy)
from awacs.sts import AssumeRole
from troposphere import (Base64,
cloudformation,
FindInMap,
GetAtt,
Join,
Parameter,
Output,
Ref,
Tags,
Template)
from troposphere.policies import (CreationPolicy,
ResourceSignal)
def main():
"""
Create a ElastiCache Redis Node and EC2 Instance
"""
template = Template()
# Description
template.add_description(
'AWS CloudFormation Sample Template ElastiCache_Redis:'
'Sample template showing how to create an Amazon'
'ElastiCache Redis Cluster. **WARNING** This template'
'creates an Amazon EC2 Instance and an Amazon ElastiCache'
'Cluster. You will be billed for the AWS resources used'
'if you create a stack from this template.')
# Mappings
template.add_mapping('AWSInstanceType2Arch', {
't1.micro': {'Arch': 'PV64'},
't2.micro': {'Arch': 'HVM64'},
't2.small': {'Arch': 'HVM64'},
't2.medium': {'Arch': 'HVM64'},
'm1.small': {'Arch': 'PV64'},
'm1.medium': {'Arch': 'PV64'},
'm1.large': {'Arch': 'PV64'},
'm1.xlarge': {'Arch': 'PV64'},
'm2.xlarge': {'Arch': 'PV64'},
'm2.2xlarge': {'Arch': 'PV64'},
'm2.4xlarge': {'Arch': 'PV64'},
'm3.medium': {'Arch': 'HVM64'},
'm3.large': {'Arch': 'HVM64'},
'm3.xlarge': {'Arch': 'HVM64'},
'm3.2xlarge': {'Arch': 'HVM64'},
'c1.medium': {'Arch': 'PV64'},
'c1.xlarge': {'Arch': 'PV64'},
'c3.large': {'Arch': 'HVM64'},
'c3.xlarge': {'Arch': 'HVM64'},
'c3.2xlarge': {'Arch': 'HVM64'},
'c3.4xlarge': {'Arch': 'HVM64'},
'c3.8xlarge': {'Arch': 'HVM64'},
'c4.large': {'Arch': 'HVM64'},
'c4.xlarge': {'Arch': 'HVM64'},
'c4.2xlarge': {'Arch': 'HVM64'},
'c4.4xlarge': {'Arch': 'HVM64'},
'c4.8xlarge': {'Arch': 'HVM64'},
'g2.2xlarge': {'Arch': 'HVMG2'},
'r3.large': {'Arch': 'HVM64'},
'r3.xlarge': {'Arch': 'HVM64'},
'r3.2xlarge': {'Arch': 'HVM64'},
'r3.4xlarge': {'Arch': 'HVM64'},
'r3.8xlarge': {'Arch': 'HVM64'},
'i2.xlarge': {'Arch': 'HVM64'},
'i2.2xlarge': {'Arch': 'HVM64'},
'i2.4xlarge': {'Arch': 'HVM64'},
'i2.8xlarge': {'Arch': 'HVM64'},
'd2.xlarge': {'Arch': 'HVM64'},
'd2.2xlarge': {'Arch': 'HVM64'},
'd2.4xlarge': {'Arch': 'HVM64'},
'd2.8xlarge': {'Arch': 'HVM64'},
'hi1.4xlarge': {'Arch': 'HVM64'},
'hs1.8xlarge': {'Arch': 'HVM64'},
'cr1.8xlarge': {'Arch': 'HVM64'},
'cc2.8xlarge': {'Arch': 'HVM64'}
})
template.add_mapping('AWSRegionArch2AMI', {
'us-east-1': {'PV64': 'ami-0f4cfd64',
'HVM64': 'ami-0d4cfd66',
'HVMG2': 'ami-5b05ba30'},
'us-west-2': {'PV64': 'ami-d3c5d1e3',
'HVM64': 'ami-d5c5d1e5',
'HVMG2': 'ami-a9d6c099'},
'us-west-1': {'PV64': 'ami-85ea13c1',
'HVM64': 'ami-87ea13c3',
'HVMG2': 'ami-37827a73'},
'eu-west-1': {'PV64': 'ami-d6d18ea1',
'HVM64': 'ami-e4d18e93',
'HVMG2': 'ami-72a9f105'},
'eu-central-1': {'PV64': 'ami-a4b0b7b9',
'HVM64': 'ami-a6b0b7bb',
'HVMG2': 'ami-a6c9cfbb'},
'ap-northeast-1': {'PV64': 'ami-1a1b9f1a',
'HVM64': 'ami-1c1b9f1c',
'HVMG2': 'ami-f644c4f6'},
'ap-southeast-1': {'PV64': 'ami-d24b4280',
'HVM64': 'ami-d44b4286',
'HVMG2': 'ami-12b5bc40'},
'ap-southeast-2': {'PV64': 'ami-ef7b39d5',
'HVM64': 'ami-db7b39e1',
'HVMG2': 'ami-b3337e89'},
'sa-east-1': {'PV64': 'ami-5b098146',
'HVM64': 'ami-55098148',
'HVMG2': 'NOT_SUPPORTED'},
'cn-north-1': {'PV64': 'ami-bec45887',
'HVM64': 'ami-bcc45885',
'HVMG2': 'NOT_SUPPORTED'}
})
template.add_mapping('Region2Principal', {
'us-east-1': {'EC2Principal': 'ec2.amazonaws.com',
'OpsWorksPrincipal': 'opsworks.amazonaws.com'},
'us-west-2': {'EC2Principal': 'ec2.amazonaws.com',
'OpsWorksPrincipal': 'opsworks.amazonaws.com'},
'us-west-1': {'EC2Principal': 'ec2.amazonaws.com',
'OpsWorksPrincipal': 'opsworks.amazonaws.com'},
'eu-west-1': {'EC2Principal': 'ec2.amazonaws.com',
'OpsWorksPrincipal': 'opsworks.amazonaws.com'},
'ap-southeast-1': {'EC2Principal': 'ec2.amazonaws.com',
'OpsWorksPrincipal': 'opsworks.amazonaws.com'},
'ap-northeast-1': {'EC2Principal': 'ec2.amazonaws.com',
'OpsWorksPrincipal': 'opsworks.amazonaws.com'},
'ap-southeast-2': {'EC2Principal': 'ec2.amazonaws.com',
'OpsWorksPrincipal': 'opsworks.amazonaws.com'},
'sa-east-1': {'EC2Principal': 'ec2.amazonaws.com',
'OpsWorksPrincipal': 'opsworks.amazonaws.com'},
'cn-north-1': {'EC2Principal': 'ec2.amazonaws.com.cn',
'OpsWorksPrincipal': 'opsworks.amazonaws.com.cn'},
'eu-central-1': {'EC2Principal': 'ec2.amazonaws.com',
'OpsWorksPrincipal': 'opsworks.amazonaws.com'}
})
# Parameters
cachenodetype = template.add_parameter(Parameter(
'ClusterNodeType',
Description='The compute and memory capacity of the nodes in the Redis'
' Cluster',
Type='String',
Default='cache.m1.small',
AllowedValues=['cache.m1.small',
'cache.m1.large',
'cache.m1.xlarge',
'cache.m2.xlarge',
'cache.m2.2xlarge',
'cache.m2.4xlarge',
'cache.c1.xlarge'],
ConstraintDescription='must select a valid Cache Node type.',
))
instancetype = template.add_parameter(Parameter(
'InstanceType',
Description='WebServer EC2 instance type',
Type='String',
Default='t2.micro',
AllowedValues=['t1.micro',
't2.micro',
't2.small',
't2.medium',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'c1.medium',
'c1.xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'c4.large',
'c4.xlarge',
'c4.2xlarge',
'c4.4xlarge',
'c4.8xlarge',
'g2.2xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge',
'hi1.4xlarge',
'hs1.8xlarge',
'cr1.8xlarge',
'cc2.8xlarge',
'cg1.4xlarge'],
ConstraintDescription='must be a valid EC2 instance type.',
))
keyname = template.add_parameter(Parameter(
'KeyName',
Description='Name of an existing EC2 KeyPair to enable SSH access'
' to the instance',
Type='AWS::EC2::KeyPair::KeyName',
ConstraintDescription='must be the name of an existing EC2 KeyPair.',
))
sshlocation = template.add_parameter(Parameter(
'SSHLocation',
Description='The IP address range that can be used to SSH to'
' the EC2 instances',
Type='String',
MinLength='9',
MaxLength='18',
Default='0.0.0.0/0',
AllowedPattern='(\\d{1,3})\\.(\\d{1,3})\\.'
'(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})',
ConstraintDescription='must be a valid IP CIDR range of the'
' form x.x.x.x/x.'
))
# Resources
webserverrole = template.add_resource(iam.Role(
'WebServerRole',
AssumeRolePolicyDocument=Policy(
Statement=[
Statement(
Effect=Allow,
Action=[AssumeRole],
Principal=Principal('Service',
[FindInMap('Region2Principal',
Ref('AWS::Region'),
'EC2Principal')]),
)
]
),
Path='/',
))
template.add_resource(iam.PolicyType(
'WebServerRolePolicy',
PolicyName='WebServerRole',
PolicyDocument=awacs.aws.Policy(
Statement=[awacs.aws.Statement(
Action=[awacs.aws.Action("elasticache",
"DescribeCacheClusters")],
Resource=["*"],
Effect=awacs.aws.Allow
)]
),
Roles=[Ref(webserverrole)],
))
webserverinstanceprofile = template.add_resource(iam.InstanceProfile(
'WebServerInstanceProfile',
Path='/',
Roles=[Ref(webserverrole)],
))
webserversg = template.add_resource(ec2.SecurityGroup(
'WebServerSecurityGroup',
GroupDescription='Enable HTTP and SSH access',
SecurityGroupIngress=[
ec2.SecurityGroupRule(
IpProtocol='tcp',
FromPort='22',
ToPort='22',
CidrIp=Ref(sshlocation),
),
ec2.SecurityGroupRule(
IpProtocol='tcp',
FromPort='80',
ToPort='80',
CidrIp='0.0.0.0/0',
)
]
))
webserverinstance = template.add_resource(ec2.Instance(
'WebServerInstance',
Metadata=cloudformation.Metadata(
cloudformation.Init({
'config': cloudformation.InitConfig(
packages={
'yum': {
'httpd': [],
'php': [],
'php-devel': [],
'gcc': [],
'make': []
}
},
files=cloudformation.InitFiles({
'/var/www/html/index.php': cloudformation.InitFile(
content=Join('', [
'<?php\n',
'echo \"<h1>AWS CloudFormation sample'
' application for Amazon ElastiCache'
' Redis Cluster</h1>\";\n',
'\n',
'$cluster_config = json_decode('
'file_get_contents(\'/tmp/cacheclusterconfig\''
'), true);\n',
'$endpoint = $cluster_config[\'CacheClusters'
'\'][0][\'CacheNodes\'][0][\'Endpoint\'][\'Add'
'ress\'];\n',
'$port = $cluster_config[\'CacheClusters\'][0]'
'[\'CacheNodes\'][0][\'Endpoint\'][\'Port\'];'
'\n',
'\n',
'echo \"<p>Connecting to Redis Cache Cluster '
'node \'{$endpoint}\' on port {$port}</p>\";'
'\n',
'\n',
'$redis=new Redis();\n',
'$redis->connect($endpoint, $port);\n',
'$redis->set(\'testkey\', \'Hello World!\');'
'\n',
'$return = $redis->get(\'testkey\');\n',
'\n',
'echo \"<p>Retrieved value: $return</p>\";'
'\n',
'?>\n'
]),
mode='000644',
owner='apache',
group='apache'
),
'/etc/cron.d/get_cluster_config':
cloudformation.InitFile(
content='*/5 * * * * root'
' /usr/local/bin/get_cluster_config',
mode='000644',
owner='root',
group='root'
),
'/usr/local/bin/get_cluster_config':
cloudformation.InitFile(
content=Join('', [
'#! /bin/bash\n',
'aws elasticache describe-cache-clusters ',
' --cache-cluster-id ',
Ref('RedisCluster'),
' --show-cache-node-info'
' --region ', Ref('AWS::Region'),
' > /tmp/cacheclusterconfig\n'
]),
mode='000755',
owner='root',
group='root'
),
'/usr/local/bin/install_phpredis':
cloudformation.InitFile(
content=Join('', [
'#! /bin/bash\n',
'cd /tmp\n',
'wget https://github.com/nicolasff/'
'phpredis/zipball/master -O phpredis.zip'
'\n',
'unzip phpredis.zip\n',
'cd nicolasff-phpredis-*\n',
'phpize\n',
'./configure\n',
'make && make install\n',
'touch /etc/php.d/redis.ini\n',
'echo extension=redis.so > /etc/php.d/'
'redis.ini\n'
]),
mode='000755',
owner='root',
group='root'
),
'/etc/cfn/cfn-hup.conf': cloudformation.InitFile(
content=Join('', [
'[main]\n',
'stack=', Ref('AWS::StackId'), '\n',
'region=', Ref('AWS::Region'), '\n'
]),
mode='000400',
owner='root',
group='root'
),
'/etc/cfn/hooks.d/cfn-auto-reloader.conf':
cloudformation.InitFile(
content=Join('', [
'[cfn-auto-reloader-hook]\n',
'triggers=post.update\n',
'path=Resources.WebServerInstance.Metadata'
'.AWS::CloudFormation::Init\n',
'action=/opt/aws/bin/cfn-init -v ',
' --stack ', Ref('AWS::StackName'),
' --resource WebServerInstance ',
' --region ', Ref('AWS::Region'),
'\n',
'runas=root\n'
]),
# Why doesn't the Amazon template have this?
# mode='000400',
# owner='root',
# group='root'
),
}),
commands={
'01-install_phpredis': {
'command': '/usr/local/bin/install_phpredis'
},
'02-get-cluster-config': {
'command': '/usr/local/bin/get_cluster_config'
}
},
services={
"sysvinit": cloudformation.InitServices({
"httpd": cloudformation.InitService(
enabled=True,
ensureRunning=True,
),
"cfn-hup": cloudformation.InitService(
enabled=True,
ensureRunning=True,
files=['/etc/cfn/cfn-hup.conf',
'/etc/cfn/hooks.d/'
'cfn-auto-reloader.conf']
),
}),
},
)
})
),
ImageId=FindInMap('AWSRegionArch2AMI', Ref('AWS::Region'),
FindInMap('AWSInstanceType2Arch',
Ref(instancetype), 'Arch')),
InstanceType=Ref(instancetype),
SecurityGroups=[Ref(webserversg)],
KeyName=Ref(keyname),
IamInstanceProfile=Ref(webserverinstanceprofile),
UserData=Base64(Join('', [
'#!/bin/bash -xe\n',
'yum update -y aws-cfn-bootstrap\n',
'# Setup the PHP sample application\n',
'/opt/aws/bin/cfn-init -v ',
' --stack ', Ref('AWS::StackName'),
' --resource WebServerInstance ',
' --region ', Ref('AWS::Region'), '\n',
'# Signal the status of cfn-init\n',
'/opt/aws/bin/cfn-signal -e $? ',
' --stack ', Ref('AWS::StackName'),
' --resource WebServerInstance ',
' --region ', Ref('AWS::Region'), '\n'
])),
CreationPolicy=CreationPolicy(
ResourceSignal=ResourceSignal(Timeout='PT15M')
),
Tags=Tags(Application=Ref('AWS::StackId'),
Details='Created using Troposhpere')
))
redisclustersg = template.add_resource(elasticache.SecurityGroup(
'RedisClusterSecurityGroup',
Description='Lock the cluster down',
))
template.add_resource(elasticache.SecurityGroupIngress(
'RedisClusterSecurityGroupIngress',
CacheSecurityGroupName=Ref(redisclustersg),
EC2SecurityGroupName=Ref(webserversg),
))
template.add_resource(elasticache.CacheCluster(
'RedisCluster',
Engine='redis',
CacheNodeType=Ref(cachenodetype),
NumCacheNodes='1',
CacheSecurityGroupNames=[Ref(redisclustersg)],
))
# Outputs
template.add_output([
Output(
'WebsiteURL',
Description='Application URL',
Value=Join('', [
'http://',
GetAtt(webserverinstance, 'PublicDnsName'),
])
)
])
# Print CloudFormation Template
print(template.to_json())
if __name__ == '__main__':
main()
| bsd-2-clause | 3,090,292,442,574,632,000 | 40.392789 | 79 | 0.390987 | false |
HarmonyEnterpriseSolutions/harmony-platform | src/gnue/forms/uidrivers/java/widgets/list_.py | 1 | 1699 |
from gnue.forms.input.GFKeyMapper import KeyMapper
from src.gnue.forms.uidrivers.java.widgets._base import UIWidget
from src.gnue.forms.uidrivers.java.widgets._remote import List
_all__ = ["UIList"]
# =============================================================================
# Interface implementation for a grid widget
# =============================================================================
class UIList(UIWidget):
def _create_widget_ (self, event):
self.widget = List(self, self._gfObject.label or "", self._gfObject.style)
self.getParent().addWidget(self)
def is_growable(self):
return True
def _ui_set_values_(self, values):
self.widget.uiSetValues(values)
def _ui_set_value_(self, index, value):
self.widget.uiSetValue(index, value)
def _ui_select_row_(self, index):
self.widget.uiSelectRow(index)
def addWidget(self, ui_widget):
"""
Add a given UI widget to the Notebook.
@param ui_widget: widget to add to the page
"""
self.widget.uiAdd(ui_widget.widget)
def onSelectionChanged(self, index):
self._gfObject._event_item_focused(index)
def onSetFocus(self):
self._gfObject._event_set_focus()
# navigable
def _ui_set_focus_(self):
self.widget.uiSetFocus()
def onKeyPressed(self, keycode, shiftDown, ctrlDown, altDown):
command, args = KeyMapper.getEvent(keycode, shiftDown, ctrlDown, altDown)
if command:
self._request(command, triggerName=args)
# =============================================================================
# Configuration data
# =============================================================================
configuration = {
'baseClass': UIList,
'provides' : 'GFList',
'container': True
}
| gpl-2.0 | -2,706,123,211,487,668,700 | 25.546875 | 79 | 0.584461 | false |
MihaiMoldovanu/ansible | lib/ansible/modules/system/openwrt_init.py | 26 | 6453 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Andrew Gaffney <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: openwrt_init
author:
- "Andrew Gaffney (@agaffney)"
version_added: "2.3"
short_description: Manage services on OpenWrt.
description:
- Controls OpenWrt services on remote hosts.
options:
name:
required: true
description:
- Name of the service.
aliases: ['service']
state:
required: false
default: null
choices: [ 'started', 'stopped', 'restarted', 'reloaded' ]
description:
- C(started)/C(stopped) are idempotent actions that will not run commands unless necessary.
C(restarted) will always bounce the service. C(reloaded) will always reload.
enabled:
required: false
choices: [ "yes", "no" ]
default: null
description:
- Whether the service should start on boot. B(At least one of state and enabled are required.)
pattern:
required: false
description:
- If the service does not respond to the 'running' command, name a
substring to look for as would be found in the output of the I(ps)
command as a stand-in for a 'running' result. If the string is found,
the service will be assumed to be running.
notes:
- One option other than name is required.
requirements:
- An OpenWrt system (with python)
'''
EXAMPLES = '''
# Example action to start service httpd, if not running
- openwrt_init:
state: started
name: httpd
# Example action to stop service cron, if running
- openwrt_init:
name: cron
state: stopped
# Example action to reload service httpd, in all cases
- openwrt_init:
name: httpd
state: reloaded
# Example action to enable service httpd
- openwrt_init:
name: httpd
enabled: yes
'''
RETURN = '''
'''
import os
import glob
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes, to_native
module = None
init_script = None
# ===============================
# Check if service is enabled
def is_enabled():
(rc, out, err) = module.run_command("%s enabled" % init_script)
if rc == 0:
return True
return False
# ===========================================
# Main control flow
def main():
global module, init_script
# init
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True, type='str', aliases=['service']),
state = dict(choices=['started', 'stopped', 'restarted', 'reloaded'], type='str'),
enabled = dict(type='bool'),
pattern = dict(required=False, default=None),
),
supports_check_mode=True,
required_one_of=[['state', 'enabled']],
)
# initialize
service = module.params['name']
init_script = '/etc/init.d/' + service
rc = 0
out = err = ''
result = {
'name': service,
'changed': False,
}
# check if service exists
if not os.path.exists(init_script):
module.fail_json(msg='service %s does not exist' % service)
# Enable/disable service startup at boot if requested
if module.params['enabled'] is not None:
# do we need to enable the service?
enabled = is_enabled()
# default to current state
result['enabled'] = enabled
# Change enable/disable if needed
if enabled != module.params['enabled']:
result['changed'] = True
if module.params['enabled']:
action = 'enable'
else:
action = 'disable'
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s" % (init_script, action))
# openwrt init scripts can return a non-zero exit code on a successful 'enable'
# command if the init script doesn't contain a STOP value, so we ignore the exit
# code and explicitly check if the service is now in the desired state
if is_enabled() != module.params['enabled']:
module.fail_json(msg="Unable to %s service %s: %s" % (action, service, err))
result['enabled'] = not enabled
if module.params['state'] is not None:
running = False
# check if service is currently running
if module.params['pattern']:
# Find ps binary
psbin = module.get_bin_path('ps', True)
# this should be busybox ps, so we only want/need to the 'w' option
(rc, psout, pserr) = module.run_command('%s w' % psbin)
# If rc is 0, set running as appropriate
if rc == 0:
lines = psout.split("\n")
for line in lines:
if module.params['pattern'] in line and not "pattern=" in line:
# so as to not confuse ./hacking/test-module
running = True
break
else:
(rc, out, err) = module.run_command("%s running" % init_script)
if rc == 0:
running = True
# default to desired state
result['state'] = module.params['state']
# determine action, if any
action = None
if module.params['state'] == 'started':
if not running:
action = 'start'
result['changed'] = True
elif module.params['state'] == 'stopped':
if running:
action = 'stop'
result['changed'] = True
else:
action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded
result['state'] = 'started'
result['changed'] = True
if action:
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s" % (init_script, action))
if rc != 0:
module.fail_json(msg="Unable to %s service %s: %s" % (action, service, err))
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | -7,408,766,062,804,798,000 | 30.632353 | 106 | 0.562839 | false |
orbro/eth-enablers | ethereum-datetime-master/tests/datetime/test_second.py | 2 | 1724 | import pytest
@pytest.mark.parametrize(
'timestamp,second',
(
(63071999, 59),
(63072000, 0),
(63072001, 1),
(63072002, 2),
(63072003, 3),
(63072004, 4),
(63072005, 5),
(63072006, 6),
(63072007, 7),
(63072008, 8),
(63072009, 9),
(63072010, 10),
(63072011, 11),
(63072012, 12),
(63072013, 13),
(63072014, 14),
(63072015, 15),
(63072016, 16),
(63072017, 17),
(63072018, 18),
(63072019, 19),
(63072020, 20),
(63072021, 21),
(63072022, 22),
(63072023, 23),
(63072024, 24),
(63072025, 25),
(63072026, 26),
(63072027, 27),
(63072028, 28),
(63072029, 29),
(63072030, 30),
(63072031, 31),
(63072032, 32),
(63072033, 33),
(63072034, 34),
(63072035, 35),
(63072036, 36),
(63072037, 37),
(63072038, 38),
(63072039, 39),
(63072040, 40),
(63072041, 41),
(63072042, 42),
(63072043, 43),
(63072044, 44),
(63072045, 45),
(63072046, 46),
(63072047, 47),
(63072048, 48),
(63072049, 49),
(63072050, 50),
(63072051, 51),
(63072052, 52),
(63072053, 53),
(63072054, 54),
(63072055, 55),
(63072056, 56),
(63072057, 57),
(63072058, 58),
(63072059, 59),
(63072060, 0),
),
)
def test_get_second_from_timestamp(deployed_contracts, timestamp, second):
crontab = deployed_contracts.DateTime
assert crontab.getSecond(timestamp) == second
| gpl-3.0 | 3,559,308,486,806,880,000 | 22.944444 | 74 | 0.456497 | false |
crennui/Cloud-On-Key | DataBaseFiles.py | 1 | 6414 | #---------------------------------IMPORTS-------------------------------------
import random
import sqlite3
from string import letters
import os
import pythoncom
import win32com.client
import mammoth
#------------------------------CONSTANTS---------------------------------------
FILES_PATH = os.path.dirname(os.path.abspath(__file__))+"/files/"
CREATE_PERMISSIONS = '''CREATE TABLE permissions
(user_id TEXT, user_file_name TEXT ,server_file_name TEXT, permission_type TEXT, owner TEXT)'''
#-------------------------------------------------------------------------
class DataBaseFiles():
def __init__(self):
self.table_path = "users/files.db"
self.conn = sqlite3.connect(self.table_path, check_same_thread=False)
self.c = self.conn.cursor()
def create_table(self):
"""
The function creates a table by the name files in the table_path file.
"""
try:
self.c.execute('''CREATE TABLE files (owner TEXT, user_file_name TEXT, server_file_name TEXT)''')
return True
except Exception:
return False
def create_permissions_table(self):
"""
The function creates a table by the name permissions in the table_path file.
"""
try:
self.c.execute(CREATE_PERMISSIONS)
return True
except Exception:
return False
def add_permission(self, user_id, user_file_name, server_file_name, permission_type, owner):
t = (user_id, user_file_name, server_file_name, permission_type, owner)
self.c.execute("INSERT INTO permissions VALUES(?,?,?,?,?)", t)
self.conn.commit()
def insert_file(self, owner, user_file_name, data=""):
"""
The function adds a new file to the database, if the user_file_name is already used the
function return false.
"""
try:
server_file_name = self.generate_name() + "." + user_file_name.split(".")[1]
print server_file_name
t = (owner, user_file_name, server_file_name)
self.c.execute("INSERT INTO files VALUES (?,?,?)", t)
self.conn.commit()
f = open("files/"+server_file_name, "w")
f.write(data)
f.close()
return True
except Exception:
return False
def print_table(self):
self.c.execute("SELECT * FROM files")
data = self.c.fetchall()
print data
def delete_files_table(self):
self.c.execute("DROP table if exists files")
self.conn.commit()
def delete_permission_table(self):
self.c.execute("DROP table if exists permissions")
self.conn.commit()
def delete_file(self, server_file_name, user_id):
t = (server_file_name, user_id)
self.c.execute('DELETE FROM files WHERE server_file_name=? AND owner=?', t)
self.c.execute('DELETE FROM permissions WHERE server_file_name=? AND user_id=?', t)
os.remove(FILES_PATH+server_file_name)
self.conn.commit()
return True
def get_files_by_owner_id(self, owner):
"""
returns a list of the files the user owned (The names that the user gave).
None if no files are found.
"""
t = (owner,)
return self.c.execute('SELECT * FROM files WHERE owner=?', t).fetchall()
def generate_name(self):
"""
The function returns a random name of a file.
The name has to start with a letter, and then a number between 0 and 1000.
The name can only appear once in the server.
"""
server_file_name = random.choice(letters)
server_file_name += str(random.randint(0, 1000))
s_f_n = (server_file_name,)
while self.c.execute('SELECT * FROM files WHERE server_file_name=?', s_f_n).fetchone():
server_file_name = random.choice(letters)
server_file_name += random.randint(0, 1000)
s_f_n = (server_file_name,)
return server_file_name
def get_user_files_list(self, user_id):
"""
returns a list of the user_file_name of all the files with the user
owned or shared with.
"""
t = (user_id, )
list_of_files = self.c.execute("SELECT * FROM files WHERE owner=?", t).fetchall()
list_of_files_not_owned = self.c.execute("SELECT * FROM permissions WHERE user_id=?", t).fetchall()
return [file_name[1] for file_name in list_of_files] + [file_name[1] for file_name in list_of_files_not_owned]
def user_to_server_file_name_owned(self, user_file_name, user_id):
t = (user_file_name, user_id)
k = self.c.execute("SELECT server_file_name FROM files WHERE user_file_name=? AND owner=?", t).fetchone()
print k
return k
def user_to_server_file_name_not_owned(self, user_file_name, user_id):
t = (user_file_name, user_id)
k = self.c.execute("SELECT server_file_name FROM permissions WHERE user_file_name=? AND user_id=?", t).fetchone()
print str(k[0]) + "premmmmmm"
return k
def html_to_word(self, server_file_name, user_file_name):
pythoncom.CoInitialize()
word = win32com.client.Dispatch('Word.Application')
doc = word.Documents.Add(FILES_PATH+server_file_name)
print FILES_PATH+user_file_name.split(".")[0]+'.docx'
doc.SaveAs2(FILES_PATH+user_file_name.split(".")[0]+'.docx', FileFormat=12)
doc.Close()
word.Quit()
return True
def word_to_html(self, owner, new_file_name):
with open(FILES_PATH+new_file_name, "rb") as docx_file:
result = mammoth.convert_to_html(docx_file)
html = result.value
#messages = result.messages
self.insert_file(owner, new_file_name.replace(".docx", ".txt"), html)
os.remove("files/"+new_file_name)
print html
def reset_tables():
db = DataBaseFiles()
db.delete_files_table()
db.delete_permission_table()
db.create_table()
db.create_permissions_table()
#db.add_permission("33", "hello.txt", "j154.txt", "r", "36")
db.print_table()
if __name__ == "__main__":
#reset_tables()
#db.html_to_word("g74.txt", "madara.txt")
db = DataBaseFiles()
db.delete_permission_table()
db.create_permissions_table()
a="""c.execute("DROP table if exists users")
conn.commit()
printing()""" | mit | 5,939,270,535,538,892,000 | 36.95858 | 121 | 0.583567 | false |
synedra/api-design-with-apiaryio-python-and-flask | app.py | 2 | 2811 | #!flask/bin/python
from flask import Flask, jsonify, abort, make_response, request
app = Flask(__name__)
folders = [
{
"id": 1,
"name": "Health",
"description": "This represents projects that are related to health",
"parent": 0,
"meta": "NULL"
},
{
"id": 2,
"name": "Diet",
"description": "A collection of projects related to Diet",
"parent": 1,
"meta": "NULL"
}
]
@app.route('/folder/<int:folder_id>', methods = ['GET'])
def get_folder(folder_id):
folder = filter(lambda t: t['id'] == folder_id, folders)
if len(folder) == 0:
abort(404)
return jsonify( folder[0] )
@app.route('/folder', methods = ['GET'])
def get_folders():
return jsonify( { 'folders': folders } )
@app.route('/folder', methods = ['POST'])
def create_folder():
if (not request.json) or (not 'name' in request.json) or (not 'description' in request.json):
abort(400)
folder = {
'id': folders[-1]['id'] + 1,
'name': request.json['name'],
'description': request.json['description'],
'parent': request.json['parent'],
'meta': request.json.get('meta', "NULL")
}
folders.append(folder)
return jsonify( folder ), 201
@app.route('/folder/<int:folder_id>', methods = ['PATCH'])
def update_folder(folder_id):
folder = filter(lambda t: t['id'] == folder_id, folders)
if len(folder) == 0:
abort(404)
if not request.json:
abort(400)
if 'name' in request.json and type(request.json['name']) != unicode:
abort(400)
if 'description' in request.json and type(request.json['description']) is not unicode:
abort(400)
if 'parent' in request.json and type(request.json['parent']) is not int:
abort(400)
if 'meta' in request.json and type(request.json['meta']) is not str:
abort(400)
folder[0]['name'] = request.json.get('name', folder[0]['name'])
folder[0]['description'] = request.json.get('description', folder[0]['description'])
folder[0]['parent'] = request.json.get('parent', folder[0]['parent'])
folder[0]['meta'] = request.json.get('meta', folder[0]['meta'])
return jsonify( folder[0] )
@app.route('/folder/<int:folder_id>', methods = ['DELETE'])
def delete_folder(folder_id):
folder = filter(lambda t: t['id'] == folder_id, folders)
if len(folder) == 0:
abort(404)
folders.remove(folder[0])
return jsonify( { "result": True } )
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify( { "error": "Resource not found" } ), 404)
@app.errorhandler(400)
def create_failed(error):
return make_response(jsonify( { "error": "Resource modification failed" } ), 400)
if __name__ == '__main__':
app.run(debug = True)
| mit | -2,258,629,987,339,273,700 | 31.686047 | 97 | 0.595162 | false |
mardom/GalSim | examples/demo10.py | 1 | 11798 | # Copyright 2012, 2013 The GalSim developers:
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
#
# GalSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GalSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GalSim. If not, see <http://www.gnu.org/licenses/>
#
"""
Demo #10
The tenth script in our tutorial about using GalSim in python scripts: examples/demo*.py.
(This file is designed to be viewed in a window 100 characters wide.)
This script uses both a variable PSF and variable shear, taken from a power spectrum, along
the lines of a Great10 (Kitching, et al, 2012) image. The galaxies are placed on a grid
(10 x 10 in this case, rather than 100 x 100 in the interest of time.) Each postage stamp
is 48 x 48 pixels. Instead of putting the PSF images on a separate image, we package them
as the second HDU in the file. For the galaxies, we use a random selection from 5 specific
RealGalaxy objects, selected to be 5 particularly irregular ones. (These are taken from
the same catalog of 100 objects that demo6 used.) The galaxies are oriented in a ring
test (Nakajima & Bernstein 2007) of 20 each.
New features introduced in this demo:
- rng = galsim.BaseDeviate(seed)
- obj = galsim.RealGalaxy(real_galaxy_catalog, id)
- obj = galsim.Convolve([list], real_space)
- ps = galsim.PowerSpectrum(e_power_function, b_power_function)
- g1,g2 = ps.buildGrid(grid_spacing, ngrid, rng)
- g1,g2 = ps.getShear(pos)
- galsim.random.permute(rng, list1, list2, ...)
- Choosing PSF parameters as a function of (x,y)
- Selecting RealGalaxy by ID rather than index.
- Putting the PSF image in a second HDU in the same file as the main image.
- Using PowerSpectrum for the applied shear.
- Doing a full ring test (i.e. not just 90 degree rotated pairs)
"""
import sys
import os
import math
import numpy
import logging
import time
import galsim
def main(argv):
"""
Make images using variable PSF and shear:
- The main image is 10 x 10 postage stamps.
- Each postage stamp is 48 x 48 pixels.
- The second HDU has the corresponding PSF image.
- Applied shear is from a power spectrum P(k) ~ k^1.8.
- Galaxies are real galaxies oriented in a ring test of 20 each.
- The PSF is Gaussian with FWHM, ellipticity and position angle functions of (x,y)
- Noise is Poisson using a nominal sky value of 1.e6.
"""
logging.basicConfig(format="%(message)s", level=logging.INFO, stream=sys.stdout)
logger = logging.getLogger("demo10")
# Define some parameters we'll use below.
# Normally these would be read in from some parameter file.
n_tiles = 10 # number of tiles in each direction.
stamp_size = 48 # pixels
pixel_scale = 0.44 # arcsec / pixel
sky_level = 1.e6 # ADU / arcsec^2
# The random seed is used for both the power spectrum realization and the random properties
# of the galaxies.
random_seed = 3339201
# Make output directory if not already present.
if not os.path.isdir('output'):
os.mkdir('output')
file_name = os.path.join('output','power_spectrum.fits')
# These will be created for each object below. The values we'll use will be functions
# of (x,y) relative to the center of the image. (r = sqrt(x^2+y^2))
# psf_fwhm = 0.9 + 0.5 * (r/100)^2 -- arcsec
# psf_e = 0.4 * (r/100)^1.5 -- large value at the edge, so visible by eye.
# psf_beta = atan2(y/x) + pi/2 -- tangential pattern
gal_signal_to_noise = 100 # Great08 "LowNoise" run
gal_dilation = 3 # Make the galaxies a bit larger than their original size.
logger.info('Starting demo script 10')
# Read in galaxy catalog
cat_file_name = 'real_galaxy_catalog_example.fits'
# This script is designed to be run from the examples directory so dir is a relative path.
# But the '../examples/' part lets bin/demo10 also be run from the bin directory.
dir = '../examples/data'
real_galaxy_catalog = galsim.RealGalaxyCatalog(cat_file_name, dir=dir)
real_galaxy_catalog.preload()
logger.info('Read in %d real galaxies from catalog', real_galaxy_catalog.nobjects)
# List of IDs to use. We select 5 particularly irregular galaxies for this demo.
# Then we'll choose randomly from this list.
id_list = [ 106416, 106731, 108402, 116045, 116448 ]
# Make the 5 galaxies we're going to use here rather than remake them each time.
# This means the Fourier transforms of the real galaxy images don't need to be recalculated
# each time, so it's a bit more efficient.
gal_list = [ galsim.RealGalaxy(real_galaxy_catalog, id=id) for id in id_list ]
# Make the galaxies a bit larger than their original observed size.
for gal in gal_list:
gal.applyDilation(gal_dilation)
# Setup the PowerSpectrum object we'll be using:
ps = galsim.PowerSpectrum(lambda k : k**1.8)
# The argument here is "e_power_function" which defines the E-mode power to use.
# There is also a b_power_function if you want to include any B-mode power:
# ps = galsim.PowerSpectrum(e_power_function, b_power_function)
# You may even omit the e_power_function argument and have a pure B-mode power spectrum.
# ps = galsim.PowerSpectrum(b_power_function = b_power_function)
# All the random number generator classes derive from BaseDeviate.
# When we construct another kind of deviate class from any other
# kind of deviate class, the two share the same underlying random number
# generator. Sometimes it can be clearer to just construct a BaseDeviate
# explicitly and then construct anything else you need from that.
# Note: A BaseDeviate cannot be used to generate any values. It can
# only be used in the constructor for other kinds of deviates.
# The seeds for the objects are random_seed..random_seed+nobj-1 (which comes later),
# so use the next one.
nobj = n_tiles * n_tiles
rng = galsim.BaseDeviate(random_seed+nobj)
# Now have the PowerSpectrum object build a grid of shear values for us to use.
grid_g1, grid_g2 = ps.buildGrid(grid_spacing=stamp_size*pixel_scale, ngrid=n_tiles, rng=rng)
# Setup the images:
gal_image = galsim.ImageF(stamp_size * n_tiles , stamp_size * n_tiles, scale=pixel_scale)
psf_image = galsim.ImageF(stamp_size * n_tiles , stamp_size * n_tiles, scale=pixel_scale)
im_center = gal_image.bounds.trueCenter()
# We will place the tiles in a random order. To do this, we make two lists for the
# ix and iy values. Then we apply a random permutation to the lists (in tandem).
ix_list = []
iy_list = []
for ix in range(n_tiles):
for iy in range(n_tiles):
ix_list.append(ix)
iy_list.append(iy)
# This next function will use the given random number generator, rng, and use it to
# randomly permute any number of lists. All lists will have the same random permutation
# applied.
galsim.random.permute(rng, ix_list, iy_list)
# Build each postage stamp:
for k in range(nobj):
# The usual random number generator using a different seed for each galaxy.
rng = galsim.BaseDeviate(random_seed+k)
# Determine the bounds for this stamp and its center position.
ix = ix_list[k]
iy = iy_list[k]
b = galsim.BoundsI(ix*stamp_size+1 , (ix+1)*stamp_size,
iy*stamp_size+1 , (iy+1)*stamp_size)
sub_gal_image = gal_image[b]
sub_psf_image = psf_image[b]
pos = b.trueCenter() - im_center
pos = galsim.PositionD(pos.x * pixel_scale , pos.y * pixel_scale)
# The image comes out as about 211 arcsec across, so we define our variable
# parameters in terms of (r/100 arcsec), so roughly the scale size of the image.
r = math.sqrt(pos.x**2 + pos.y**2) / 100
psf_fwhm = 0.9 + 0.5 * r**2 # arcsec
psf_e = 0.4 * r**1.5
psf_beta = (math.atan2(pos.y,pos.x) + math.pi/2) * galsim.radians
# Define the PSF profile
psf = galsim.Gaussian(fwhm=psf_fwhm)
psf.applyShear(e=psf_e, beta=psf_beta)
# Define the pixel
pix = galsim.Pixel(pixel_scale)
# Define the galaxy profile:
# For this demo, we are doing a ring test where the same galaxy profile is drawn at many
# orientations stepped uniformly in angle, making a ring in e1-e2 space.
# We're drawing each profile at 20 different orientations and then skipping to the
# next galaxy in the list. So theta steps by 1/20 * 360 degrees:
theta = k/20. * 360. * galsim.degrees
# The index needs to increment every 20 objects so we use k/20 using integer math.
index = k / 20
gal = gal_list[index]
# This makes a new copy so we're not changing the object in the gal_list.
gal = gal.createRotated(theta)
# Apply the shear from the power spectrum. We should either turn the gridded shears
# grid_g1[iy, ix] and grid_g2[iy, ix] into gridded reduced shears using a utility called
# galsim.lensing.theoryToObserved, or use ps.getShear() which by default gets the reduced
# shear. ps.getShear() is also more flexible because it can get the shear at positions that
# are not on the original grid, as long as they are contained within the bounds of the full
# grid. So in this example we'll use ps.getShear().
alt_g1,alt_g2 = ps.getShear(pos)
gal.applyShear(g1=alt_g1, g2=alt_g2)
# Apply half-pixel shift in a random direction.
shift_r = pixel_scale * 0.5
ud = galsim.UniformDeviate(rng)
theta = ud() * 2. * math.pi
dx = shift_r * math.cos(theta)
dy = shift_r * math.sin(theta)
gal.applyShift(dx,dy)
# Make the final image, convolving with psf and pix
final = galsim.Convolve([psf,pix,gal])
# Draw the image
final.draw(sub_gal_image)
# Now add noise to get our desired S/N
# See demo5.py for more info about how this works.
sky_level_pixel = sky_level * pixel_scale**2
noise = galsim.PoissonNoise(rng, sky_level=sky_level_pixel)
sub_gal_image.addNoiseSNR(noise, gal_signal_to_noise)
# Draw the PSF image:
# We use real space convolution to avoid some of the
# artifacts that can show up with Fourier convolution.
# The level of the artifacts is quite low, but when drawing with
# no noise, they are apparent with ds9's zscale viewing.
final_psf = galsim.Convolve([psf,pix], real_space=True)
# For the PSF image, we also shift the PSF by the same amount.
final_psf.applyShift(dx,dy)
# No noise on PSF images. Just draw it as is.
final_psf.draw(sub_psf_image)
logger.info('Galaxy (%d,%d): position relative to center = %s', ix,iy,str(pos))
logger.info('Done making images of postage stamps')
# Now write the images to disk.
images = [ gal_image , psf_image ]
galsim.fits.writeMulti(images, file_name)
logger.info('Wrote image to %r',file_name)
if __name__ == "__main__":
main(sys.argv)
| gpl-3.0 | 693,719,504,685,542,000 | 43.353383 | 100 | 0.668842 | false |
zverevalexei/trex-http-proxy | trex_client/external_libs/jsonrpclib-pelix-0.2.5/setup.py | 5 | 2518 | #!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Installation script
:authors: Josh Marshall, Thomas Calmant
:copyright: Copyright 2015, isandlaTech
:license: Apache License 2.0
:version: 0.2.5
..
Copyright 2015 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Module version
__version_info__ = (0, 2, 5)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
# ------------------------------------------------------------------------------
setup(
name="jsonrpclib-pelix",
version=__version__,
license="Apache License 2.0",
author="Thomas Calmant",
author_email="[email protected]",
url="http://github.com/tcalmant/jsonrpclib/",
description=
"This project is an implementation of the JSON-RPC v2.0 specification "
"(backwards-compatible) as a client library, for Python 2.6+ and Python 3."
"This version is a fork of jsonrpclib by Josh Marshall, "
"usable with Pelix remote services.",
long_description=open("README.rst").read(),
packages=["jsonrpclib"],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4'],
tests_require=['unittest2'] if sys.version_info < (2, 7) else []
)
| mit | 7,261,472,671,894,225,000 | 33.027027 | 80 | 0.626291 | false |
prarthitm/edxplatform | openedx/core/djangoapps/external_auth/tests/test_ssl.py | 1 | 17658 | """
Provides unit tests for SSL based authentication portions
of the external_auth app.
"""
# pylint: disable=no-member
from contextlib import contextmanager
import copy
from mock import Mock, patch
from django.conf import settings
from django.contrib.auth import SESSION_KEY
from django.contrib.auth.models import AnonymousUser, User
from django.contrib.sessions.middleware import SessionMiddleware
from django.core.urlresolvers import reverse
from django.test.client import Client
from django.test.client import RequestFactory
from django.test.utils import override_settings
from openedx.core.djangoapps.external_auth.models import ExternalAuthMap
import openedx.core.djangoapps.external_auth.views as external_auth_views
from openedx.core.djangolib.testing.utils import skip_unless_cms, skip_unless_lms
from student.models import CourseEnrollment
from student.roles import CourseStaffRole
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
FEATURES_WITH_SSL_AUTH = settings.FEATURES.copy()
FEATURES_WITH_SSL_AUTH['AUTH_USE_CERTIFICATES'] = True
FEATURES_WITH_SSL_AUTH_IMMEDIATE_SIGNUP = FEATURES_WITH_SSL_AUTH.copy()
FEATURES_WITH_SSL_AUTH_IMMEDIATE_SIGNUP['AUTH_USE_CERTIFICATES_IMMEDIATE_SIGNUP'] = True
FEATURES_WITH_SSL_AUTH_AUTO_ACTIVATE = FEATURES_WITH_SSL_AUTH_IMMEDIATE_SIGNUP.copy()
FEATURES_WITH_SSL_AUTH_AUTO_ACTIVATE['BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'] = True
FEATURES_WITHOUT_SSL_AUTH = settings.FEATURES.copy()
FEATURES_WITHOUT_SSL_AUTH['AUTH_USE_CERTIFICATES'] = False
CACHES_ENABLE_GENERAL = copy.deepcopy(settings.CACHES)
CACHES_ENABLE_GENERAL['general']['BACKEND'] = 'django.core.cache.backends.locmem.LocMemCache'
@override_settings(FEATURES=FEATURES_WITH_SSL_AUTH)
@override_settings(CACHES=CACHES_ENABLE_GENERAL)
class SSLClientTest(ModuleStoreTestCase):
"""
Tests SSL Authentication code sections of external_auth
"""
AUTH_DN = '/C=US/ST=Massachusetts/O=Massachusetts Institute of Technology/OU=Client CA v1/CN={0}/emailAddress={1}'
USER_NAME = 'test_user_ssl'
USER_EMAIL = '[email protected]'
MOCK_URL = '/'
@contextmanager
def _create_ssl_request(self, url):
"""Creates a basic request for SSL use."""
request = self.factory.get(url)
request.META['SSL_CLIENT_S_DN'] = self.AUTH_DN.format(self.USER_NAME, self.USER_EMAIL)
request.user = AnonymousUser()
middleware = SessionMiddleware()
middleware.process_request(request)
request.session.save()
with patch('edxmako.request_context.get_current_request', return_value=request):
yield request
@contextmanager
def _create_normal_request(self, url):
"""Creates sessioned request without SSL headers"""
request = self.factory.get(url)
request.user = AnonymousUser()
middleware = SessionMiddleware()
middleware.process_request(request)
request.session.save()
with patch('edxmako.request_context.get_current_request', return_value=request):
yield request
def setUp(self):
"""Setup test case by adding primary user."""
super(SSLClientTest, self).setUp()
self.client = Client()
self.factory = RequestFactory()
self.mock = Mock()
@skip_unless_lms
def test_ssl_login_with_signup_lms(self):
"""
Validate that an SSL login creates an eamap user and
redirects them to the signup page.
"""
with self._create_ssl_request('/') as request:
response = external_auth_views.ssl_login(request)
# Response should contain template for signup form, eamap should have user, and internal
# auth should not have a user
self.assertIn('<form role="form" id="register-form" method="post"', response.content)
try:
ExternalAuthMap.objects.get(external_id=self.USER_EMAIL)
except ExternalAuthMap.DoesNotExist, ex:
self.fail('User did not get properly added to external auth map, exception was {0}'.format(str(ex)))
with self.assertRaises(User.DoesNotExist):
User.objects.get(email=self.USER_EMAIL)
@skip_unless_cms
def test_ssl_login_with_signup_cms(self):
"""
Validate that an SSL login creates an eamap user and
redirects them to the signup page on CMS.
"""
self.client.get(
reverse('contentstore.views.login_page'),
SSL_CLIENT_S_DN=self.AUTH_DN.format(self.USER_NAME, self.USER_EMAIL)
)
try:
ExternalAuthMap.objects.get(external_id=self.USER_EMAIL)
except ExternalAuthMap.DoesNotExist, ex:
self.fail('User did not get properly added to external auth map, exception was {0}'.format(str(ex)))
with self.assertRaises(User.DoesNotExist):
User.objects.get(email=self.USER_EMAIL)
@skip_unless_lms
@override_settings(FEATURES=FEATURES_WITH_SSL_AUTH_IMMEDIATE_SIGNUP)
def test_ssl_login_without_signup_lms(self):
"""
Test IMMEDIATE_SIGNUP feature flag and ensure the user account is automatically created
and the user is redirected to slash.
"""
with self._create_ssl_request('/') as request:
external_auth_views.ssl_login(request)
# Assert our user exists in both eamap and Users, and that we are logged in
try:
ExternalAuthMap.objects.get(external_id=self.USER_EMAIL)
except ExternalAuthMap.DoesNotExist, ex:
self.fail('User did not get properly added to external auth map, exception was {0}'.format(str(ex)))
try:
User.objects.get(email=self.USER_EMAIL)
except ExternalAuthMap.DoesNotExist, ex:
self.fail('User did not get properly added to internal users, exception was {0}'.format(str(ex)))
@skip_unless_cms
@override_settings(FEATURES=FEATURES_WITH_SSL_AUTH_IMMEDIATE_SIGNUP)
def test_ssl_login_without_signup_cms(self):
"""
Test IMMEDIATE_SIGNUP feature flag and ensure the user account is
automatically created on CMS, and that we are redirected
to courses.
"""
response = self.client.get(
reverse('contentstore.views.login_page'),
SSL_CLIENT_S_DN=self.AUTH_DN.format(self.USER_NAME, self.USER_EMAIL)
)
self.assertEqual(response.status_code, 302)
self.assertIn('/course', response['location'])
# Assert our user exists in both eamap and Users, and that we are logged in
try:
ExternalAuthMap.objects.get(external_id=self.USER_EMAIL)
except ExternalAuthMap.DoesNotExist, ex:
self.fail('User did not get properly added to external auth map, exception was {0}'.format(str(ex)))
try:
User.objects.get(email=self.USER_EMAIL)
except ExternalAuthMap.DoesNotExist, ex:
self.fail('User did not get properly added to internal users, exception was {0}'.format(str(ex)))
@skip_unless_lms
@override_settings(FEATURES=FEATURES_WITH_SSL_AUTH_IMMEDIATE_SIGNUP)
def test_default_login_decorator_ssl(self):
"""
Make sure that SSL login happens if it is enabled on protected
views instead of showing the login form.
"""
response = self.client.get(reverse('dashboard'), follows=True)
self.assertEqual(response.status_code, 302)
self.assertIn(reverse('signin_user'), response['location'])
response = self.client.get(
reverse('dashboard'), follow=True,
SSL_CLIENT_S_DN=self.AUTH_DN.format(self.USER_NAME, self.USER_EMAIL))
self.assertEquals(('http://testserver/dashboard', 302),
response.redirect_chain[-1])
self.assertIn(SESSION_KEY, self.client.session)
@skip_unless_lms
@override_settings(FEATURES=FEATURES_WITH_SSL_AUTH_IMMEDIATE_SIGNUP)
def test_registration_page_bypass(self):
"""
This tests to make sure when immediate signup is on that
the user doesn't get presented with the registration page.
"""
response = self.client.get(
reverse('register_user'), follow=True,
SSL_CLIENT_S_DN=self.AUTH_DN.format(self.USER_NAME, self.USER_EMAIL))
self.assertEquals(('http://testserver/dashboard', 302),
response.redirect_chain[-1])
self.assertIn(SESSION_KEY, self.client.session)
@skip_unless_cms
@override_settings(FEATURES=FEATURES_WITH_SSL_AUTH_IMMEDIATE_SIGNUP)
def test_cms_registration_page_bypass(self):
"""
This tests to make sure when immediate signup is on that
the user doesn't get presented with the registration page.
"""
response = self.client.get(
reverse('signup'), follow=True,
SSL_CLIENT_S_DN=self.AUTH_DN.format(self.USER_NAME, self.USER_EMAIL)
)
self.assertEqual(response.status_code, 404)
# assert that we are logged in
self.assertIn(SESSION_KEY, self.client.session)
# Now that we are logged in, make sure we don't see the registration page
response = self.client.get(reverse('signup'), follow=True)
self.assertEqual(response.status_code, 404)
@skip_unless_lms
@override_settings(FEATURES=FEATURES_WITH_SSL_AUTH_IMMEDIATE_SIGNUP)
def test_signin_page_bypass(self):
"""
This tests to make sure when ssl authentication is on
that user doesn't get presented with the login page if they
have a certificate.
"""
# Test that they do signin if they don't have a cert
response = self.client.get(reverse('signin_user'))
self.assertEqual(200, response.status_code)
self.assertIn('login-and-registration-container', response.content)
# And get directly logged in otherwise
response = self.client.get(
reverse('signin_user'), follow=True,
SSL_CLIENT_S_DN=self.AUTH_DN.format(self.USER_NAME, self.USER_EMAIL))
self.assertEquals(('http://testserver/dashboard', 302),
response.redirect_chain[-1])
self.assertIn(SESSION_KEY, self.client.session)
@skip_unless_lms
@override_settings(FEATURES=FEATURES_WITH_SSL_AUTH_IMMEDIATE_SIGNUP)
def test_ssl_bad_eamap(self):
"""
This tests the response when a user exists but their eamap
password doesn't match their internal password.
The internal password use for certificates has been removed
and this should not fail.
"""
# Create account, break internal password, and activate account
with self._create_ssl_request('/') as request:
external_auth_views.ssl_login(request)
user = User.objects.get(email=self.USER_EMAIL)
user.set_password('not autogenerated')
user.is_active = True
user.save()
# Make sure we can still login
self.client.get(
reverse('signin_user'), follow=True,
SSL_CLIENT_S_DN=self.AUTH_DN.format(self.USER_NAME, self.USER_EMAIL))
self.assertIn(SESSION_KEY, self.client.session)
@skip_unless_lms
@override_settings(FEATURES=FEATURES_WITHOUT_SSL_AUTH)
def test_ssl_decorator_no_certs(self):
"""Make sure no external auth happens without SSL enabled"""
dec_mock = external_auth_views.ssl_login_shortcut(self.mock)
with self._create_normal_request(self.MOCK_URL) as request:
request.user = AnonymousUser()
# Call decorated mock function to make sure it passes
# the call through without hitting the external_auth functions and
# thereby creating an external auth map object.
dec_mock(request)
self.assertTrue(self.mock.called)
self.assertEqual(0, len(ExternalAuthMap.objects.all()))
@skip_unless_lms
def test_ssl_login_decorator(self):
"""Create mock function to test ssl login decorator"""
dec_mock = external_auth_views.ssl_login_shortcut(self.mock)
# Test that anonymous without cert doesn't create authmap
with self._create_normal_request(self.MOCK_URL) as request:
dec_mock(request)
self.assertTrue(self.mock.called)
self.assertEqual(0, len(ExternalAuthMap.objects.all()))
# Test valid user
self.mock.reset_mock()
with self._create_ssl_request(self.MOCK_URL) as request:
dec_mock(request)
self.assertFalse(self.mock.called)
self.assertEqual(1, len(ExternalAuthMap.objects.all()))
# Test logged in user gets called
self.mock.reset_mock()
with self._create_ssl_request(self.MOCK_URL) as request:
request.user = UserFactory()
dec_mock(request)
self.assertTrue(self.mock.called)
@skip_unless_lms
@override_settings(FEATURES=FEATURES_WITH_SSL_AUTH_IMMEDIATE_SIGNUP)
def test_ssl_decorator_auto_signup(self):
"""
Test that with auto signup the decorator
will bypass registration and call retfun.
"""
dec_mock = external_auth_views.ssl_login_shortcut(self.mock)
with self._create_ssl_request(self.MOCK_URL) as request:
dec_mock(request)
# Assert our user exists in both eamap and Users
try:
ExternalAuthMap.objects.get(external_id=self.USER_EMAIL)
except ExternalAuthMap.DoesNotExist, ex:
self.fail('User did not get properly added to external auth map, exception was {0}'.format(str(ex)))
try:
User.objects.get(email=self.USER_EMAIL)
except ExternalAuthMap.DoesNotExist, ex:
self.fail('User did not get properly added to internal users, exception was {0}'.format(str(ex)))
self.assertEqual(1, len(ExternalAuthMap.objects.all()))
self.assertTrue(self.mock.called)
@skip_unless_lms
@override_settings(FEATURES=FEATURES_WITH_SSL_AUTH_AUTO_ACTIVATE)
def test_ssl_lms_redirection(self):
"""
Auto signup auth user and ensure they return to the original
url they visited after being logged in.
"""
course = CourseFactory.create(
org='MITx',
number='999',
display_name='Robot Super Course'
)
with self._create_ssl_request('/') as request:
external_auth_views.ssl_login(request)
user = User.objects.get(email=self.USER_EMAIL)
CourseEnrollment.enroll(user, course.id)
course_private_url = '/courses/MITx/999/Robot_Super_Course/courseware'
self.assertNotIn(SESSION_KEY, self.client.session)
response = self.client.get(
course_private_url,
follow=True,
SSL_CLIENT_S_DN=self.AUTH_DN.format(self.USER_NAME, self.USER_EMAIL),
HTTP_ACCEPT='text/html'
)
self.assertEqual(('http://testserver{0}'.format(course_private_url), 302),
response.redirect_chain[-1])
self.assertIn(SESSION_KEY, self.client.session)
@skip_unless_cms
@override_settings(FEATURES=FEATURES_WITH_SSL_AUTH_AUTO_ACTIVATE)
def test_ssl_cms_redirection(self):
"""
Auto signup auth user and ensure they return to the original
url they visited after being logged in.
"""
course = CourseFactory.create(
org='MITx',
number='999',
display_name='Robot Super Course'
)
with self._create_ssl_request('/') as request:
external_auth_views.ssl_login(request)
user = User.objects.get(email=self.USER_EMAIL)
CourseEnrollment.enroll(user, course.id)
CourseStaffRole(course.id).add_users(user)
course_private_url = reverse('course_handler', args=(unicode(course.id),))
self.assertNotIn(SESSION_KEY, self.client.session)
response = self.client.get(
course_private_url,
follow=True,
SSL_CLIENT_S_DN=self.AUTH_DN.format(self.USER_NAME, self.USER_EMAIL),
HTTP_ACCEPT='text/html'
)
self.assertEqual(('http://testserver{0}'.format(course_private_url), 302),
response.redirect_chain[-1])
self.assertIn(SESSION_KEY, self.client.session)
@skip_unless_lms
@override_settings(FEATURES=FEATURES_WITH_SSL_AUTH_AUTO_ACTIVATE)
def test_ssl_logout(self):
"""
Because the branding view is cached for anonymous users and we
use that to login users, the browser wasn't actually making the
request to that view as the redirect was being cached. This caused
a redirect loop, and this test confirms that that won't happen.
Test is only in LMS because we don't use / in studio to login SSL users.
"""
response = self.client.get(
reverse('dashboard'), follow=True,
SSL_CLIENT_S_DN=self.AUTH_DN.format(self.USER_NAME, self.USER_EMAIL))
self.assertEquals(('http://testserver/dashboard', 302),
response.redirect_chain[-1])
self.assertIn(SESSION_KEY, self.client.session)
response = self.client.get(
reverse('logout'), follow=True,
SSL_CLIENT_S_DN=self.AUTH_DN.format(self.USER_NAME, self.USER_EMAIL)
)
# Make sure that even though we logged out, we have logged back in
self.assertIn(SESSION_KEY, self.client.session)
| agpl-3.0 | 3,430,336,830,766,060,000 | 41.244019 | 118 | 0.657209 | false |
KaranToor/MA450 | google-cloud-sdk/.install/.backup/lib/googlecloudsdk/third_party/apis/clouduseraccounts/alpha/resources.py | 6 | 1409 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resource definitions for cloud platform apis."""
import enum
BASE_URL = 'https://www.googleapis.com/clouduseraccounts/alpha/'
class Collections(enum.Enum):
"""Collections for all supported apis."""
GLOBALACCOUNTSOPERATIONS = (
'globalAccountsOperations',
'projects/{project}/global/operations/{operation}',
{},
[u'project', u'operation']
)
GROUPS = (
'groups',
'projects/{project}/global/groups/{groupName}',
{},
[u'project', u'groupName']
)
USERS = (
'users',
'projects/{project}/global/users/{user}',
{},
[u'project', u'user']
)
def __init__(self, collection_name, path, flat_paths, params):
self.collection_name = collection_name
self.path = path
self.flat_paths = flat_paths
self.params = params
| apache-2.0 | 5,275,429,883,453,584,000 | 28.354167 | 74 | 0.680625 | false |
daveferrara1/linkchecker | linkcheck/mem.py | 9 | 2697 | # -*- coding: iso-8859-1 -*-
# Copyright: Jean Brouwers
# License:
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Copied from the Python Cookbook recipe at
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/286222
To find the memory usage in a particular section of code these
functions are typically used as follows::
m0 = memory()
...
m1 = memory(m0)
"""
import os
_proc_status = '/proc/%d/status' % os.getpid()
_scale = {'kB': 1024.0, 'mB': 1024.0*1024.0,
'KB': 1024.0, 'MB': 1024.0*1024.0}
def _VmB (VmKey):
"""Parse /proc/<pid>/status file for given key.
@return: requested number value of status entry
@rtype: float
"""
if os.name != 'posix':
# not supported
return 0.0
global _proc_status, _scale
# get pseudo file /proc/<pid>/status
try:
t = open(_proc_status)
v = t.read()
t.close()
except IOError:
# unsupported platform (non-Linux?)
return 0.0
# get VmKey line e.g. 'VmRSS: 9999 kB\n ...'
i = v.index(VmKey)
v = v[i:].split(None, 3) # whitespace
if len(v) < 3:
return 0.0 # invalid format?
# convert Vm value to bytes
return float(v[1]) * _scale[v[2]]
def memory (since=0.0):
"""Get memory usage.
@return: memory usage in bytes
@rtype: float
"""
return _VmB('VmSize:') - since
def resident (since=0.0):
"""Get resident memory usage.
@return: resident memory usage in bytes
@rtype: float
"""
return _VmB('VmRSS:') - since
def stacksize (since=0.0):
"""Get stack size.
@return: stack size in bytes
@rtype: float
"""
return _VmB('VmStk:') - since
| gpl-2.0 | -8,855,158,881,855,813,000 | 28.966667 | 79 | 0.662217 | false |
rtmux/rt-thread-lite | tools/tools/clang-analyze.py | 19 | 1998 | """
Tool-specific initialization for Clang static analyzer
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
__revision__ = "tools/clang-analyze.py 2013-09-06 grissiom"
import os
import os.path
import SCons.Action
import SCons.Builder
import SCons.Defaults
import SCons.Tool
import SCons.Util
import rtconfig
def generate(env):
assert(rtconfig.CROSS_TOOL == 'clang-analyze')
# let gnu_tools setup a basic env(learnt from SCons/Tools/mingw.py)
gnu_tools = ['gcc', 'g++', 'gnulink', 'ar', 'gas', 'm4']
for tool in gnu_tools:
SCons.Tool.Tool(tool)(env)
# then we could stand on the shoulders of gaints
env['CC'] = 'ccc-analyzer'
env['CXX'] = 'c++-analyzer'
env['AS'] = 'true'
env['AR'] = 'true'
env['LINK'] = 'true'
env['CFLAGS'] = ['-fsyntax-only', '-Wall', '-Wno-invalid-source-encoding']
env['LINKFLAGS'] = '-Wl,--gc-sections'
env['ARFLAGS'] = '-rc'
# only check, don't compile. ccc-analyzer use CCC_CC as the CC.
# fsyntax-only will give us some additional warning messages
env['ENV']['CCC_CC'] = 'clang'
env['ENV']['CCC_CXX'] = 'clang++'
# setup the output dir and format
env['ENV']['CCC_ANALYZER_HTML'] = './build/'
env['ENV']['CCC_ANALYZER_OUTPUT_FORMAT'] = 'html'
# Some setting from the platform also have to be overridden:
env['OBJSUFFIX'] = '.o'
env['LIBPREFIX'] = 'lib'
env['LIBSUFFIX'] = '.a'
if rtconfig.EXEC_PATH:
if not os.path.exists(rtconfig.EXEC_PATH):
print
print 'warning: rtconfig.EXEC_PATH(%s) does not exists.' % rtconfig.EXEC_PATH
print
return
env.AppendENVPath('PATH', rtconfig.EXEC_PATH)
def exists(env):
return env.Detect(['ccc-analyzer', 'c++-analyzer'])
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-2.0 | -5,962,454,063,972,973,000 | 27.956522 | 89 | 0.634134 | false |
testmana2/test | Plugins/VcsPlugins/vcsMercurial/GpgExtension/HgGpgSignDialog.py | 2 | 5172 | # -*- coding: utf-8 -*-
# Copyright (c) 2011 - 2015 Detlev Offenbach <[email protected]>
#
"""
Module implementing a dialog to enter data for signing a revision.
"""
from __future__ import unicode_literals
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtWidgets import QDialog, QDialogButtonBox
from .Ui_HgGpgSignDialog import Ui_HgGpgSignDialog
class HgGpgSignDialog(QDialog, Ui_HgGpgSignDialog):
"""
Class implementing a dialog to enter data for signing a revision.
"""
def __init__(self, tagsList, branchesList, bookmarksList=None,
parent=None):
"""
Constructor
@param tagsList list of tags (list of strings)
@param branchesList list of branches (list of strings)
@param bookmarksList list of bookmarks (list of strings)
@param parent reference to the parent widget (QWidget)
"""
super(HgGpgSignDialog, self).__init__(parent)
self.setupUi(self)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False)
self.tagCombo.addItems(sorted(tagsList))
self.branchCombo.addItems(["default"] + sorted(branchesList))
if bookmarksList is not None:
self.bookmarkCombo.addItems(sorted(bookmarksList))
else:
self.bookmarkButton.setHidden(True)
self.bookmarkCombo.setHidden(True)
def __updateOK(self):
"""
Private slot to update the OK button.
"""
enabled = True
if self.idButton.isChecked():
enabled = enabled and self.idEdit.text() != ""
elif self.tagButton.isChecked():
enabled = enabled and self.tagCombo.currentText() != ""
elif self.branchButton.isChecked():
enabled = enabled and self.branchCombo.currentText() != ""
elif self.bookmarkButton.isChecked():
enabled = enabled and self.bookmarkCombo.currentText() != ""
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(enabled)
@pyqtSlot(bool)
def on_idButton_toggled(self, checked):
"""
Private slot to handle changes of the ID select button.
@param checked state of the button (boolean)
"""
self.__updateOK()
@pyqtSlot(bool)
def on_tagButton_toggled(self, checked):
"""
Private slot to handle changes of the Tag select button.
@param checked state of the button (boolean)
"""
self.__updateOK()
@pyqtSlot(bool)
def on_branchButton_toggled(self, checked):
"""
Private slot to handle changes of the Branch select button.
@param checked state of the button (boolean)
"""
self.__updateOK()
@pyqtSlot(bool)
def on_bookmarkButton_toggled(self, checked):
"""
Private slot to handle changes of the Bookmark select button.
@param checked state of the button (boolean)
"""
self.__updateOK()
@pyqtSlot(str)
def on_idEdit_textChanged(self, txt):
"""
Private slot to handle changes of the ID edit.
@param txt text of the edit (string)
"""
self.__updateOK()
@pyqtSlot(str)
def on_tagCombo_editTextChanged(self, txt):
"""
Private slot to handle changes of the Tag combo.
@param txt text of the combo (string)
"""
self.__updateOK()
@pyqtSlot(str)
def on_branchCombo_editTextChanged(self, txt):
"""
Private slot to handle changes of the Branch combo.
@param txt text of the combo (string)
"""
self.__updateOK()
@pyqtSlot(str)
def on_bookmarkCombo_editTextChanged(self, txt):
"""
Private slot to handle changes of the Bookmark combo.
@param txt text of the combo (string)
"""
self.__updateOK()
def getData(self):
"""
Public method to retrieve the entered data.
@return tuple giving the revision, a flag indicating not to commit
the signature, a commit message, an ID of the key to be used,
a flag indicating a local signature and a flag indicating a forced
signature (string, boolean, string, string, boolean, boolean)
"""
if self.numberButton.isChecked():
rev = "rev({0})".format(self.numberSpinBox.value())
elif self.idButton.isChecked():
rev = "id({0})".format(self.idEdit.text())
elif self.tagButton.isChecked():
rev = self.tagCombo.currentText()
elif self.branchButton.isChecked():
rev = self.branchCombo.currentText()
elif self.bookmarkButton.isChecked():
rev = self.bookmarkCombo.currentText()
else:
rev = ""
return (
rev,
self.nocommitCheckBox.isChecked(),
self.messageEdit.toPlainText(),
self.keyEdit.text(),
self.localCheckBox.isChecked(),
self.forceCheckBox.isChecked()
)
| gpl-3.0 | -5,592,240,277,841,571,000 | 30.925926 | 78 | 0.58894 | false |
unbit/sftpclone | sftpclone/t/stub_sftp.py | 1 | 6952 | #!/usr/bin/env python
# coding=utf-8
# Copyright (C) 2003-2009 Robey Pointer <[email protected]>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
A stub SFTP server for loopback SFTP testing.
"""
import os
from paramiko import ServerInterface, SFTPServerInterface, SFTPServer, SFTPAttributes, \
SFTPHandle, SFTP_OK, AUTH_SUCCESSFUL, AUTH_FAILED, OPEN_SUCCEEDED, RSAKey
from paramiko.common import o666
from sftpclone.t.utils import t_path
USERNAME = "test"
PASSWORD = "secret"
RSA_KEY = t_path("id_rsa")
SERVER_ROOT = "server_root"
class StubServer (ServerInterface):
good_pub_key = RSAKey(filename=RSA_KEY)
def check_auth_password(self, username, password):
if username == USERNAME and password == PASSWORD:
return AUTH_SUCCESSFUL
return AUTH_FAILED
def check_auth_publickey(self, username, key):
if username == USERNAME and key == self.good_pub_key:
return AUTH_SUCCESSFUL
return AUTH_FAILED
def check_channel_request(self, kind, chanid):
return OPEN_SUCCEEDED
class StubSFTPHandle (SFTPHandle):
def stat(self):
try:
return SFTPAttributes.from_stat(os.fstat(self.readfile.fileno()))
except OSError as e:
return SFTPServer.convert_errno(e.errno)
def chattr(self, attr):
# python doesn't have equivalents to fchown or fchmod, so we have to
# use the stored filename
try:
SFTPServer.set_file_attr(self.filename, attr)
return SFTP_OK
except OSError as e:
return SFTPServer.convert_errno(e.errno)
class StubSFTPServer (SFTPServerInterface):
ROOT = t_path(SERVER_ROOT)
def _realpath(self, path):
return self.ROOT + self.canonicalize(path)
def list_folder(self, path):
path = self._realpath(path)
try:
out = []
flist = os.listdir(path)
for fname in flist:
attr = SFTPAttributes.from_stat(
os.lstat(os.path.join(path, fname))
)
attr.filename = fname.encode("utf-8")
out.append(attr)
return out
except OSError as e:
return SFTPServer.convert_errno(e.errno)
def stat(self, path):
path = self._realpath(path)
try:
return SFTPAttributes.from_stat(os.stat(path))
except OSError as e:
return SFTPServer.convert_errno(e.errno)
def lstat(self, path):
path = self._realpath(path)
try:
return SFTPAttributes.from_stat(os.lstat(path))
except OSError as e:
return SFTPServer.convert_errno(e.errno)
def open(self, path, flags, attr):
path = self._realpath(path)
try:
binary_flag = getattr(os, 'O_BINARY', 0)
flags |= binary_flag
mode = getattr(attr, 'st_mode', None)
if mode is not None:
fd = os.open(path, flags, mode)
else:
# os.open() defaults to 0777 which is
# an odd default mode for files
fd = os.open(path, flags, o666)
except OSError as e:
return SFTPServer.convert_errno(e.errno)
if (flags & os.O_CREAT) and (attr is not None):
attr._flags &= ~attr.FLAG_PERMISSIONS
SFTPServer.set_file_attr(path, attr)
if flags & os.O_WRONLY:
if flags & os.O_APPEND:
fstr = 'ab'
else:
fstr = 'wb'
elif flags & os.O_RDWR:
if flags & os.O_APPEND:
fstr = 'a+b'
else:
fstr = 'r+b'
else:
# O_RDONLY (== 0)
fstr = 'rb'
try:
f = os.fdopen(fd, fstr)
except OSError as e:
return SFTPServer.convert_errno(e.errno)
fobj = StubSFTPHandle(flags)
fobj.filename = path
fobj.readfile = f
fobj.writefile = f
return fobj
def remove(self, path):
path = self._realpath(path)
try:
os.remove(path)
except OSError as e:
return SFTPServer.convert_errno(e.errno)
return SFTP_OK
def rename(self, oldpath, newpath):
oldpath = self._realpath(oldpath)
newpath = self._realpath(newpath)
try:
os.rename(oldpath, newpath)
except OSError as e:
return SFTPServer.convert_errno(e.errno)
return SFTP_OK
def mkdir(self, path, attr):
path = self._realpath(path)
try:
os.mkdir(path)
if attr is not None:
SFTPServer.set_file_attr(path, attr)
except OSError as e:
return SFTPServer.convert_errno(e.errno)
return SFTP_OK
def rmdir(self, path):
path = self._realpath(path)
try:
os.rmdir(path)
except OSError as e:
return SFTPServer.convert_errno(e.errno)
return SFTP_OK
def chattr(self, path, attr):
path = self._realpath(path)
try:
SFTPServer.set_file_attr(path, attr)
except OSError as e:
return SFTPServer.convert_errno(e.errno)
return SFTP_OK
def symlink(self, target_path, path):
path = self._realpath(path)
if (len(target_path) > 0) and (target_path[0] == '/'):
# absolute symlink
target_path = os.path.join(self.ROOT, target_path[1:])
try:
os.symlink(target_path, path)
except OSError as e:
return SFTPServer.convert_errno(e.errno)
return SFTP_OK
def readlink(self, path):
path = self._realpath(path)
try:
symlink = os.readlink(path)
except OSError as e:
return SFTPServer.convert_errno(e.errno)
# if it's absolute, remove the root
if os.path.isabs(symlink):
if symlink[:len(self.ROOT)] == self.ROOT:
symlink = symlink[len(self.ROOT):]
if (len(symlink) == 0) or (symlink[0] != '/'):
symlink = '/' + symlink
else:
symlink = '<error>'
return symlink
| mit | 6,117,981,290,005,401,000 | 30.6 | 88 | 0.57897 | false |
kpj/SDEMotif | tests/test_reaction_finder.py | 1 | 12339 | from unittest import TestCase, skipIf
import io
from reaction_finder import *
class TestMatcher(TestCase):
def test_simple_match(self):
cgroups = {'foo': 3, 'bar': 1}
rspec = {'foo': 2, 'bar': 1}
self.assertTrue(match(cgroups, rspec))
def test_simple_mismatch(self):
cgroups = {'foo': 1, 'bar': 1}
rspec = {'foo': 2, 'bar': 1}
self.assertFalse(match(cgroups, rspec))
class TestPairFinder(TestCase):
def test_simple_finder(self):
cdata = {
'fooC': {'groups': {'H': 3, 'O': 2}},
'barC': {'groups': {'H': 4, 'O': 1}},
'bazC': {'groups': {'H': 0, 'O': 3}}
}
rdata = {
'rea1': {
'c1': {'H': 3, 'O': 2},
'c2': {'H': 4, 'O': 0},
'group_trans': {'H': -1, 'O': 2},
'mass_trans': 1
},
'rea2': {
'c1': {'H': 0, 'O': 1},
'c2': {'H': 1, 'O': 1},
'group_trans': {'H': 1, 'O': 0},
'mass_trans': 1
},
'rea3': {
'c1': {'H': 4, 'O': 0},
'c2': None,
'group_trans': {'H': -2, 'O': 1},
'mass_trans': 1
}
}
res = check_pair('fooC', 'barC', cdata, rdata)
self.assertEqual(sorted(res), ['rea1', 'rea2'])
res = check_pair('barC', 'fooC', cdata, rdata)
self.assertEqual(res, ['rea2'])
res = check_pair('fooC', 'bazC', cdata, rdata)
self.assertEqual(res, [])
res = check_pair('bazC', 'fooC', cdata, rdata)
self.assertEqual(res, ['rea2'])
res = check_pair('barC', 'bazC', cdata, rdata)
self.assertEqual(res, [])
res = check_pair('bazC', 'barC', cdata, rdata)
self.assertEqual(res, ['rea2'])
res = check_pair('barC', None, cdata, rdata)
self.assertEqual(res, ['rea3'])
class TestCompoundGuesser(TestCase):
def test_simple_generation(self):
cdata = {
'fooC': {'groups': {'H': 3, 'O': 2}, 'mass': 1, 'atoms': {'N': 1, 'C': 1}},
'barC': {'groups': {'H': 4, 'O': 1}, 'mass': 1, 'atoms': {'N': 3, 'C': 0}},
'bazC': {'groups': {'H': 0, 'O': 3}, 'mass': 1, 'atoms': {'N': 1, 'C': 2}}
}
rdata = {
'rea1': {
'c1': {'H': 3, 'O': 2},
'c2': {'H': 4, 'O': 0},
'group_trans': {'H': -1, 'O': 2},
'mass_trans': 1,
'atom_trans': {'c1': True, 'c2': True, 'N': 1, 'C': -2}
},
'rea2': {
'c1': {'H': 0, 'O': 1},
'c2': {'H': 1, 'O': 1},
'group_trans': {'H': 1, 'O': 0},
'mass_trans': 1,
'atom_trans': {'c1': True, 'c2': True, 'N': 0, 'C': 0}
}
}
combs = {
'rea1': [('fooC', 'barC')]
}
res = guess_new_compounds(combs, cdata, rdata)
self.assertTrue(len(res), 1)
self.assertEqual(
res['(fooC) {rea1} (barC)']['groups'],
{'H': 6, 'O': 5})
self.assertEqual(res['(fooC) {rea1} (barC)']['mass'], 3)
self.assertEqual(
res['(fooC) {rea1} (barC)']['atoms'],
{'N': 5, 'C': -1})
def test_guess_with_none(self):
cdata = {
'fooC': {
'groups': {'H': 3, 'O': 2},
'mass': 2,
'atoms': {'N': 4, 'C': 3}
},
}
rdata = {
'rea1': {
'c1': {'H': 3, 'O': 2},
'c2': None,
'group_trans': {'H': -2, 'O': 0},
'mass_trans': 1,
'atom_trans': {'c1': True, 'c2': False, 'N': -2, 'C': 1}
}
}
combs = {
'rea1': [('fooC', None)]
}
res = guess_new_compounds(combs, cdata, rdata)
self.assertTrue(len(res), 1)
self.assertEqual(
res['(fooC) {rea1} (None)']['groups'],
{'H': 1, 'O': 2})
self.assertEqual(res['(fooC) {rea1} (None)']['mass'], 3)
self.assertEqual(
res['(fooC) {rea1} (None)']['atoms'],
{'N': 2, 'C': 4})
def test_negative_group_number(self):
cdata = {
'fooC': {'groups': {'H': 1, 'O': 1}, 'mass': -4},
'barC': {'groups': {'H': 1, 'O': 1}, 'mass': -4},
}
rdata = {
'rea1': {
'c1': {'H': 0, 'O': 0},
'c2': {'H': 0, 'O': 0},
'group_trans': {'H': -4, 'O': -3},
'mass_trans': 6,
'atom_trans': {'c1': False, 'c2': False}
}
}
combs = {
'rea1': [('fooC', 'barC')]
}
res = guess_new_compounds(combs, cdata, rdata)
self.assertTrue(len(res), 1)
self.assertEqual(
res['(fooC) {rea1} (barC)']['groups'],
{'H': -2, 'O': -1})
self.assertEqual(res['(fooC) {rea1} (barC)']['mass'], -2)
class TestFileInput(TestCase):
def test_compound_reader(self):
data = read_compounds_file('./tests/data/compounds.csv')
self.assertEqual(len(data), 3)
self.assertEqual(
data['foo']['groups'], {
'-H': 1,
'-O': 4
})
self.assertEqual(
data['bar']['groups'], {
'-H': 5,
'-O': 0
})
self.assertEqual(
data['baz']['groups'], {
'-H': 3,
'-O': 3
})
self.assertEqual(
data['foo']['atoms'], {
'N': 0,
'C': 5
})
self.assertEqual(
data['bar']['atoms'], {
'N': 2,
'C': 3
})
self.assertEqual(
data['baz']['atoms'], {
'N': 1,
'C': 1
})
self.assertEqual(data['foo']['mass'], 1.5)
self.assertEqual(data['bar']['mass'], 1.7)
self.assertEqual(data['baz']['mass'], 1.1)
def test_reaction_reader(self):
data = read_reactions_file('./tests/data/reactions.csv')
self.assertEqual(len(data), 3)
self.assertEqual(
data['rea1'], {
'c1': {'-H': 3, '-O': 2},
'c2': {'-H': 4, '-O': 0},
'group_trans': {'-H': -1, '-O': 2},
'mass_trans': -1.1,
'atom_trans': {'c1': True, 'c2': True, 'H': -2, 'O': -1}
})
self.assertEqual(
data['rea2'], {
'c1': {'-H': 0, '-O': 1},
'c2': {'-H': 1, '-O': 1},
'group_trans': {'-H': 1, '-O': 0},
'mass_trans': 2.2,
'atom_trans': {'c1': True, 'c2': False, 'O': 1}
})
self.assertEqual(
data['rea3'], {
'c1': {'-H': 2, '-O': 3},
'c2': None,
'group_trans': {'-H': -2, '-O': -1},
'mass_trans': -3.3,
'atom_trans': {'c1': True, 'c2': False, 'H': -2, 'O': -1}
})
class IntegrationTest(TestCase):
def setUp(self):
self.compounds = io.StringIO("""Name,-H,-O,-N,M-H
c1,1,2,3,1.2
c2,2,1,3,2.3
""")
self.reactions = io.StringIO("""Reaction,Requirement Matrix - Compound 1,,,Requirement Matrix - Compound 2,,,Result Matrix,,,Transformation,Mass Addendum
,-H,-O,-N,-H,-O,-N,-H,-O,-N,,
r1, 1, 2, 3, 2, 1, 3, 1, 1,-6,,1.1
r2, 4, 4, 0, 1, 2, 0, 1, 0,-1,,-2.2
r3, 4, 4, 0, X, , , 0, 0, 1,,3.3
""")
def test_interactions(self):
comps = read_compounds_file(self.compounds)
reacts = read_reactions_file(self.reactions)
# first iteration
res = iterate_once(comps, reacts)
self.assertEqual(len(res), 1)
self.assertIn('(c1) {r1} (c2)', res)
self.assertEqual(
res['(c1) {r1} (c2)']['groups'],
{'-H': 4, '-O': 4, '-N': 0})
self.assertEqual(res['(c1) {r1} (c2)']['mass'], 4.6)
# second iteration
comps.update(res)
nres = iterate_once(comps, reacts)
self.assertEqual(len(nres), 4)
self.assertIn('(c1) {r1} (c2)', nres)
self.assertIn('((c1) {r1} (c2)) {r2} (c1)', nres)
self.assertIn('((c1) {r1} (c2)) {r3} (None)', nres)
self.assertIn('((c1) {r1} (c2)) {r2} ((c1) {r1} (c2))', nres)
self.assertEqual(
nres['(c1) {r1} (c2)']['groups'],
{'-H': 4, '-O': 4, '-N': 0})
self.assertEqual(
nres['((c1) {r1} (c2)) {r2} (c1)']['groups'],
{'-H': 6, '-O': 6, '-N': 2})
self.assertEqual(
nres['((c1) {r1} (c2)) {r3} (None)']['groups'],
{'-H': 4, '-O': 4, '-N': 1})
self.assertEqual(
nres['((c1) {r1} (c2)) {r2} ((c1) {r1} (c2))']['groups'],
{'-N': -1, '-O': 8, '-H': 9})
self.assertEqual(nres['(c1) {r1} (c2)']['mass'], 4.6)
self.assertAlmostEqual(nres['((c1) {r1} (c2)) {r2} (c1)']['mass'], 3.6)
class TestNameParser(TestCase):
def test_basic_name(self):
name = '(Caffeic acid) {C-C linkage} (Rhamnazin)'
c1, r, c2 = parse_compound_name(name)
self.assertEqual(c1, 'Caffeic acid')
self.assertEqual(r, 'C-C linkage')
self.assertEqual(c2, 'Rhamnazin')
def test_nested_name1(self):
name = '(Phloretin) {C-C linkage} ((Abscisic acid) {C-C linkage} (Aurantinidin))'
c1, r, c2 = parse_compound_name(name)
self.assertEqual(c1, 'Phloretin')
self.assertEqual(r, 'C-C linkage')
self.assertEqual(c2, '(Abscisic acid) {C-C linkage} (Aurantinidin)')
def test_nested_name2(self):
name = '((Abscisic acid) {C-C linkage} (Aurantinidin)) {Condensation - Ester Formation} (Eudesmic acid)'
c1, r, c2 = parse_compound_name(name)
self.assertEqual(c1, '(Abscisic acid) {C-C linkage} (Aurantinidin)')
self.assertEqual(r, 'Condensation - Ester Formation')
self.assertEqual(c2, 'Eudesmic acid')
def test_rwb_name(self):
name = '(Ononitol) {Oxidation - (O) Addition} (None)'
c1, r, c2 = parse_compound_name(name)
self.assertEqual(c1, 'Ononitol')
self.assertEqual(r, 'Oxidation - (O) Addition')
self.assertEqual(c2, 'None')
class TestAtomTransformationParser(TestCase):
def test_easy_case(self):
inp = 'M1 + M2 - H2O'
res = parse_atom_transformation(inp)
self.assertEqual(res, {
'c1': True,
'c2': True,
'H': -2,
'O': -1
})
def test_no_c2(self):
inp = 'M1 + H2'
res = parse_atom_transformation(inp)
self.assertEqual(res, {
'c1': True,
'c2': False,
'H': 2
})
class TestAssignmentPrediction(TestCase):
@skipIf('TRAVIS' in os.environ and os.environ['TRAVIS'] == 'true', 'Skip on Travis CI.')
def test_simple_case(self):
motifs = [
('A', 'B', 'C')
]
data = {
'A': {'intensities': [[1,2,3]]},
'B': {'intensities': [[1,2,1]]},
'C': {'intensities': [[2,1,2]]}
}
tmp, _ = find_optimal_assignments(motifs, data, reps=1, null_model=False)
self.assertEqual(len(tmp), 1)
res = tmp[0]
self.assertEqual(res['A']['intensities'], [[1,2,3]])
self.assertEqual(res['B']['intensities'], [[1,2,1]])
self.assertEqual(res['C']['intensities'], [[2,1,2]])
@skipIf('TRAVIS' in os.environ and os.environ['TRAVIS'] == 'true', 'Skip on Travis CI.')
def test_involved_case(self):
motifs = [
('A', 'B', 'C')
]
data = {
'A': {'intensities': [[1,2,3], [3,3,2]]},
'B': {'intensities': [[6,7,8]]},
'C': {'intensities': [[20,30,40]]}
}
tmp, _ = find_optimal_assignments(motifs, data, reps=1, null_model=False)
self.assertEqual(len(tmp), 1)
res = tmp[0]
self.assertEqual(res['A']['intensities'], [[1,2,3]])
self.assertEqual(res['B']['intensities'], [[6,7,8]])
self.assertEqual(res['C']['intensities'], [[20,30,40]])
| mit | -444,327,277,526,551,550 | 31.556728 | 161 | 0.426534 | false |
perlygatekeeper/glowing-robot | Little_Alchemy_2/Scraper_python/env/lib/python3.7/site-packages/pip/_vendor/urllib3/util/__init__.py | 19 | 1082 | from __future__ import absolute_import
# For backwards compatibility, provide imports that used to be here.
from .connection import is_connection_dropped
from .request import make_headers
from .response import is_fp_closed
from .ssl_ import (
SSLContext,
HAS_SNI,
IS_PYOPENSSL,
IS_SECURETRANSPORT,
assert_fingerprint,
resolve_cert_reqs,
resolve_ssl_version,
ssl_wrap_socket,
PROTOCOL_TLS,
)
from .timeout import (
current_time,
Timeout,
)
from .retry import Retry
from .url import (
get_host,
parse_url,
split_first,
Url,
)
from .wait import (
wait_for_read,
wait_for_write
)
__all__ = (
'HAS_SNI',
'IS_PYOPENSSL',
'IS_SECURETRANSPORT',
'SSLContext',
'PROTOCOL_TLS',
'Retry',
'Timeout',
'Url',
'assert_fingerprint',
'current_time',
'is_connection_dropped',
'is_fp_closed',
'get_host',
'parse_url',
'make_headers',
'resolve_cert_reqs',
'resolve_ssl_version',
'split_first',
'ssl_wrap_socket',
'wait_for_read',
'wait_for_write'
)
| artistic-2.0 | -796,773,465,658,617,600 | 18.321429 | 68 | 0.625693 | false |
bchappet/dnfpy | src/dnfpyUtils/cellular/diffusionLinCA.py | 1 | 1759 | # -*- coding: utf-8 -*-
'''
'''
import numpy as np
from dnfpy.cellular.cellularMap import CellularMap
import dnfpy.core.utils as utils
#python3 main.py --model ModelCellular --params "{'model':'DiffusionLinCA'}" --scenario None
class DiffusionLinCA(CellularMap):
"""
m : state max (when activated)
pt : probability of tranmiting the activation
activation : child (boolean) if true, cell is excited
obstacle : child (boolean) if true there is obstacle => cell is always 0
"""
def __init__(self,name,size,pt=1.0,m=100,activation=None,obstacle=None,**kwargs):
super().__init__(name=name,size=size,pt=pt,m=m,activation=activation,obstacle=obstacle,**kwargs)
def init(self,size):
return np.zeros((size,size),dtype=np.uint8)
def _compute(self,size,pt,m,activation,obstacle):
X = self._data
N = X[0:-2,1:-1]
W = X[1:-1,0:-2]
E = X[1:-1,2:]
S = X[2: ,1:-1]
x = X[1:-1,1:-1]
#random = np.random.random((size-2,size-2)) < pt
getN = (N > 0) & ((N != x+1) & (N != x-1))
getE = (E > 0) & ((E != x+1) & (E != x-1)) & (E != N)
getW = (W > 0) & ((W != x+1) & (W != x-1)) & (W != N) & (W != E)
getS = (S > 0) & ((S != x+1) & (S != x-1)) & (S != N) & (S != E) & (S != W)
x[getN] += N[getN] - 1
x[getE] += E[getE] - 1
x[getW] += W[getW] - 1
x[getS] += S[getS] - 1
#x[decrement] -= 1
if activation:
X[activation & (X==0)] = m
if obstacle:
X[obstacle] = 0
def onClick(self,x,y):
size = self.getArg('size')
m = self.getArg("m")
self._data[y,x] = m
#U += utils.gauss2d(size,False,0.5,10,x,y)
| gpl-2.0 | -6,591,113,280,932,857,000 | 24.128571 | 104 | 0.502558 | false |
klahnakoski/SpotManager | vendor/mo_json/encoder.py | 1 | 16186 | # encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Contact: Kyle Lahnakoski ([email protected])
#
from __future__ import absolute_import, division, unicode_literals
import json
import math
import time
from datetime import date, datetime, timedelta
from decimal import Decimal
from json.encoder import encode_basestring
from math import floor
from mo_dots import Data, FlatList, Null, NullType, SLOT, is_data, is_list, unwrap
from mo_future import PYPY, binary_type, is_binary, is_text, long, sort_using_key, text, utf8_json_encoder, xrange
from mo_json import ESCAPE_DCT, float2json, scrub
from mo_logs import Except
from mo_logs.strings import quote
from mo_times import Timer
from mo_times.dates import Date
from mo_times.durations import Duration
json_decoder = json.JSONDecoder().decode
_get = object.__getattribute__
_ = Except
# THIS FILE EXISTS TO SERVE AS A FAST REPLACEMENT FOR JSON ENCODING
# THE DEFAULT JSON ENCODERS CAN NOT HANDLE A DIVERSITY OF TYPES *AND* BE FAST
#
# 1) WHEN USING cPython, WE HAVE NO COMPILER OPTIMIZATIONS: THE BEST STRATEGY IS TO
# CONVERT THE MEMORY STRUCTURE TO STANDARD TYPES AND SEND TO THE INSANELY FAST
# DEFAULT JSON ENCODER
# 2) WHEN USING PYPY, WE USE CLEAR-AND-SIMPLE PROGRAMMING SO THE OPTIMIZER CAN DO
# ITS JOB. ALONG WITH THE UnicodeBuilder WE GET NEAR C SPEEDS
COMMA = u","
QUOTE = u'"'
COLON = u":"
QUOTE_COLON = QUOTE + COLON
COMMA_QUOTE = COMMA + QUOTE
PRETTY_COMMA = u", "
PRETTY_COLON = u": "
if PYPY:
# UnicodeBuilder IS ABOUT 2x FASTER THAN list()
from __pypy__.builders import UnicodeBuilder
else:
class UnicodeBuilder(list):
def __init__(self, length=None):
list.__init__(self)
def build(self):
return u"".join(self)
append = UnicodeBuilder.append
_dealing_with_problem = False
def pypy_json_encode(value, pretty=False):
"""
pypy DOES NOT OPTIMIZE GENERATOR CODE WELL
"""
global _dealing_with_problem
if pretty:
return pretty_json(value)
try:
_buffer = UnicodeBuilder(2048)
_value2json(value, _buffer)
output = _buffer.build()
return output
except Exception as e:
# THE PRETTY JSON WILL PROVIDE MORE DETAIL ABOUT THE SERIALIZATION CONCERNS
from mo_logs import Log
if _dealing_with_problem:
Log.error("Serialization of JSON problems", e)
else:
Log.warning("Serialization of JSON problems", e)
_dealing_with_problem = True
try:
return pretty_json(value)
except Exception as f:
Log.error("problem serializing object", f)
finally:
_dealing_with_problem = False
class cPythonJSONEncoder(object):
def __init__(self, sort_keys=True):
object.__init__(self)
self.encoder = utf8_json_encoder
def encode(self, value, pretty=False):
if pretty:
return pretty_json(value)
try:
with Timer("scrub", too_long=0.1):
scrubbed = scrub(value)
param = {"size": 0}
with Timer("encode {{size}} characters", param=param, too_long=0.1):
output = text(self.encoder(scrubbed))
param["size"] = len(output)
return output
except Exception as e:
from mo_logs.exceptions import Except
from mo_logs import Log
e = Except.wrap(e)
Log.warning("problem serializing {{type}}", type=text(repr(value)), cause=e)
raise e
def ujson_encode(value, pretty=False):
if pretty:
return pretty_json(value)
try:
scrubbed = scrub(value)
return ujson_dumps(scrubbed, ensure_ascii=False, sort_keys=True, escape_forward_slashes=False).decode('utf8')
except Exception as e:
from mo_logs.exceptions import Except
from mo_logs import Log
e = Except.wrap(e)
Log.warning("problem serializing {{type}}", type=text(repr(value)), cause=e)
raise e
def _value2json(value, _buffer):
try:
_class = value.__class__
if value is None:
append(_buffer, u"null")
return
elif value is True:
append(_buffer, u"true")
return
elif value is False:
append(_buffer, u"false")
return
type = value.__class__
if type is binary_type:
append(_buffer, QUOTE)
try:
v = value.decode('utf8')
except Exception as e:
problem_serializing(value, e)
for c in v:
append(_buffer, ESCAPE_DCT.get(c, c))
append(_buffer, QUOTE)
elif type is text:
append(_buffer, QUOTE)
for c in value:
append(_buffer, ESCAPE_DCT.get(c, c))
append(_buffer, QUOTE)
elif type is dict:
if not value:
append(_buffer, u"{}")
else:
_dict2json(value, _buffer)
return
elif type is Data:
d = _get(value, SLOT) # MIGHT BE A VALUE NOT A DICT
_value2json(d, _buffer)
return
elif type in (int, long, Decimal):
append(_buffer, text(value))
elif type is float:
if math.isnan(value) or math.isinf(value):
append(_buffer, u'null')
else:
append(_buffer, float2json(value))
elif type in (set, list, tuple, FlatList):
_list2json(value, _buffer)
elif type is date:
append(_buffer, float2json(time.mktime(value.timetuple())))
elif type is datetime:
append(_buffer, float2json(time.mktime(value.timetuple())))
elif type is Date:
append(_buffer, float2json(value.unix))
elif type is timedelta:
append(_buffer, float2json(value.total_seconds()))
elif type is Duration:
append(_buffer, float2json(value.seconds))
elif type is NullType:
append(_buffer, u"null")
elif is_data(value):
if not value:
append(_buffer, u"{}")
else:
_dict2json(value, _buffer)
return
elif hasattr(value, '__data__'):
d = value.__data__()
_value2json(d, _buffer)
elif hasattr(value, '__json__'):
j = value.__json__()
append(_buffer, j)
elif hasattr(value, '__iter__'):
_iter2json(value, _buffer)
else:
from mo_logs import Log
Log.error(text(repr(value)) + " is not JSON serializable")
except Exception as e:
from mo_logs import Log
Log.error(text(repr(value)) + " is not JSON serializable", cause=e)
def _list2json(value, _buffer):
if not value:
append(_buffer, u"[]")
else:
sep = u"["
for v in value:
append(_buffer, sep)
sep = COMMA
_value2json(v, _buffer)
append(_buffer, u"]")
def _iter2json(value, _buffer):
append(_buffer, u"[")
sep = u""
for v in value:
append(_buffer, sep)
sep = COMMA
_value2json(v, _buffer)
append(_buffer, u"]")
def _dict2json(value, _buffer):
try:
prefix = u"{\""
for k, v in value.items():
append(_buffer, prefix)
prefix = COMMA_QUOTE
if is_binary(k):
k = k.decode('utf8')
for c in k:
append(_buffer, ESCAPE_DCT.get(c, c))
append(_buffer, QUOTE_COLON)
_value2json(v, _buffer)
append(_buffer, u"}")
except Exception as e:
from mo_logs import Log
Log.error(text(repr(value)) + " is not JSON serializable", cause=e)
ARRAY_ROW_LENGTH = 80
ARRAY_ITEM_MAX_LENGTH = 30
ARRAY_MAX_COLUMNS = 10
INDENT = " "
def pretty_json(value):
try:
if value is False:
return "false"
elif value is True:
return "true"
elif value == None:
return "null"
elif is_data(value):
try:
value = unwrap(value)
items = sort_using_key(value.items(), lambda r: r[0])
values = [encode_basestring(k) + PRETTY_COLON + pretty_json(v) for k, v in items if v != None]
if not values:
return "{}"
elif len(values) == 1:
return "{" + values[0] + "}"
else:
return "{\n" + ",\n".join(indent(v) for v in values) + "\n}"
except Exception as e:
from mo_logs import Log
from mo_math import OR
if OR(not is_text(k) for k in value.keys()):
Log.error(
"JSON must have string keys: {{keys}}:",
keys=[k for k in value.keys()],
cause=e
)
Log.error(
"problem making dict pretty: keys={{keys}}:",
keys=[k for k in value.keys()],
cause=e
)
elif value.__class__ in (binary_type, text):
if is_binary(value):
value = value.decode('utf8')
try:
if "\n" in value and value.strip():
return pretty_json({"$concat": value.split("\n"), "separator": "\n"})
else:
return quote(value)
except Exception as e:
from mo_logs import Log
try:
Log.note("try explicit convert of string with length {{length}}", length=len(value))
acc = [QUOTE]
for c in value:
try:
try:
c2 = ESCAPE_DCT[c]
except Exception:
c2 = c
c3 = text(c2)
acc.append(c3)
except BaseException:
pass
# Log.warning("odd character {{ord}} found in string. Ignored.", ord= ord(c)}, cause=g)
acc.append(QUOTE)
output = u"".join(acc)
Log.note("return value of length {{length}}", length=len(output))
return output
except BaseException as f:
Log.warning("can not convert {{type}} to json", type=f.__class__.__name__, cause=f)
return "null"
elif is_list(value):
if not value:
return "[]"
if ARRAY_MAX_COLUMNS == 1:
return "[\n" + ",\n".join([indent(pretty_json(v)) for v in value]) + "\n]"
if len(value) == 1:
j = pretty_json(value[0])
if j.find("\n") >= 0:
return "[\n" + indent(j) + "\n]"
else:
return "[" + j + "]"
js = [pretty_json(v) for v in value]
max_len = max(*[len(j) for j in js])
if max_len <= ARRAY_ITEM_MAX_LENGTH and max(*[j.find("\n") for j in js]) == -1:
# ALL TINY VALUES
num_columns = max(1, min(ARRAY_MAX_COLUMNS, int(floor((ARRAY_ROW_LENGTH + 2.0) / float(max_len + 2))))) # +2 TO COMPENSATE FOR COMMAS
if len(js) <= num_columns: # DO NOT ADD \n IF ONLY ONE ROW
return "[" + PRETTY_COMMA.join(js) + "]"
if num_columns == 1: # DO NOT rjust IF THERE IS ONLY ONE COLUMN
return "[\n" + ",\n".join([indent(pretty_json(v)) for v in value]) + "\n]"
content = ",\n".join(
PRETTY_COMMA.join(j.rjust(max_len) for j in js[r:r + num_columns])
for r in xrange(0, len(js), num_columns)
)
return "[\n" + indent(content) + "\n]"
pretty_list = js
output = ["[\n"]
for i, p in enumerate(pretty_list):
try:
if i > 0:
output.append(",\n")
output.append(indent(p))
except Exception:
from mo_logs import Log
Log.warning("problem concatenating string of length {{len1}} and {{len2}}",
len1=len("".join(output)),
len2=len(p)
)
output.append("\n]")
try:
return "".join(output)
except Exception as e:
from mo_logs import Log
Log.error("not expected", cause=e)
elif hasattr(value, '__data__'):
d = value.__data__()
return pretty_json(d)
elif hasattr(value, '__json__'):
j = value.__json__()
if j == None:
return " null " # TODO: FIND OUT WHAT CAUSES THIS
return pretty_json(json_decoder(j))
elif scrub(value) is None:
return "null"
elif hasattr(value, '__iter__'):
return pretty_json(list(value))
elif hasattr(value, '__call__'):
return "null"
else:
try:
if int(value) == value:
return text(int(value))
except Exception:
pass
try:
if float(value) == value:
return text(float(value))
except Exception:
pass
return pypy_json_encode(value)
except Exception as e:
problem_serializing(value, e)
def problem_serializing(value, e=None):
"""
THROW ERROR ABOUT SERIALIZING
"""
from mo_logs import Log
try:
typename = type(value).__name__
except Exception:
typename = "<error getting name>"
try:
rep = text(repr(value))
except Exception as _:
rep = None
if rep == None:
Log.error(
"Problem turning value of type {{type}} to json",
type=typename,
cause=e
)
else:
Log.error(
"Problem turning value ({{value}}) of type {{type}} to json",
value=rep,
type=typename,
cause=e
)
def indent(value, prefix=INDENT):
try:
content = value.rstrip()
suffix = value[len(content):]
lines = content.splitlines()
return prefix + (u"\n" + prefix).join(lines) + suffix
except Exception as e:
raise Exception(u"Problem with indent of value (" + e.message + u")\n" + value)
def value_compare(a, b):
if a == None:
if b == None:
return 0
return -1
elif b == None:
return 1
if a > b:
return 1
elif a < b:
return -1
else:
return 0
def datetime2milli(d, type):
try:
if type == datetime:
diff = d - datetime(1970, 1, 1)
else:
diff = d - date(1970, 1, 1)
return long(diff.total_seconds()) * long(1000) + long(diff.microseconds / 1000)
except Exception as e:
problem_serializing(d, e)
def unicode_key(key):
"""
CONVERT PROPERTY VALUE TO QUOTED NAME OF SAME
"""
if not isinstance(key, (text, binary_type)):
from mo_logs import Log
Log.error("{{key|quote}} is not a valid key", key=key)
return quote(text(key))
# OH HUM, cPython with uJSON, OR pypy WITH BUILTIN JSON?
# http://liangnuren.wordpress.com/2012/08/13/python-json-performance/
# http://morepypy.blogspot.ca/2011/10/speeding-up-json-encoding-in-pypy.html
if PYPY:
json_encoder = pypy_json_encode
else:
# from ujson import dumps as ujson_dumps
# json_encoder = ujson_encode
json_encoder = cPythonJSONEncoder().encode
| mpl-2.0 | 9,049,147,450,840,627,000 | 30.551657 | 150 | 0.515878 | false |
zofuthan/edx-platform | common/lib/xmodule/xmodule/modulestore/__init__.py | 34 | 55201 | """
This module provides an abstraction for working with XModuleDescriptors
that are stored in a database an accessible using their Location as an identifier
"""
import logging
import re
import json
import datetime
from pytz import UTC
from collections import defaultdict
import collections
from contextlib import contextmanager
import threading
from operator import itemgetter
from sortedcontainers import SortedListWithKey
from abc import ABCMeta, abstractmethod
from contracts import contract, new_contract
from xblock.plugin import default_select
from .exceptions import InvalidLocationError, InsufficientSpecificationError
from xmodule.errortracker import make_error_tracker
from xmodule.assetstore import AssetMetadata
from opaque_keys.edx.keys import CourseKey, UsageKey, AssetKey
from opaque_keys.edx.locations import Location # For import backwards compatibility
from xblock.runtime import Mixologist
from xblock.core import XBlock
log = logging.getLogger('edx.modulestore')
new_contract('CourseKey', CourseKey)
new_contract('AssetKey', AssetKey)
new_contract('AssetMetadata', AssetMetadata)
new_contract('XBlock', XBlock)
LIBRARY_ROOT = 'library.xml'
COURSE_ROOT = 'course.xml'
class ModuleStoreEnum(object):
"""
A class to encapsulate common constants that are used with the various modulestores.
"""
class Type(object):
"""
The various types of modulestores provided
"""
split = 'split'
mongo = 'mongo'
xml = 'xml'
class RevisionOption(object):
"""
Revision constants to use for Module Store operations
Note: These values are passed into store APIs and only used at run time
"""
# both DRAFT and PUBLISHED versions are queried, with preference to DRAFT versions
draft_preferred = 'rev-opt-draft-preferred'
# only DRAFT versions are queried and no PUBLISHED versions
draft_only = 'rev-opt-draft-only'
# # only PUBLISHED versions are queried and no DRAFT versions
published_only = 'rev-opt-published-only'
# all revisions are queried
all = 'rev-opt-all'
class Branch(object):
"""
Branch constants to use for stores, such as Mongo, that have only 2 branches: DRAFT and PUBLISHED
Note: These values are taken from server configuration settings, so should not be changed without alerting DevOps
"""
draft_preferred = 'draft-preferred'
published_only = 'published-only'
class BranchName(object):
"""
Branch constants to use for stores, such as Split, that have named branches
"""
draft = 'draft-branch'
published = 'published-branch'
library = 'library'
class UserID(object):
"""
Values for user ID defaults
"""
# Note: we use negative values here to (try to) not collide
# with user identifiers provided by actual user services.
# user ID to use for all management commands
mgmt_command = -1
# user ID to use for primitive commands
primitive_command = -2
# user ID to use for tests that do not have a django user available
test = -3
# user ID for automatic update by the system
system = -4
class SortOrder(object):
"""
Values for sorting asset metadata.
"""
ascending = 1
descending = 2
class BulkOpsRecord(object):
"""
For handling nesting of bulk operations
"""
def __init__(self):
self._active_count = 0
self.has_publish_item = False
self.has_library_updated_item = False
@property
def active(self):
"""
Return whether this bulk write is active.
"""
return self._active_count > 0
def nest(self):
"""
Record another level of nesting of this bulk write operation
"""
self._active_count += 1
def unnest(self):
"""
Record the completion of a level of nesting of the bulk write operation
"""
self._active_count -= 1
@property
def is_root(self):
"""
Return whether the bulk write is at the root (first) level of nesting
"""
return self._active_count == 1
class ActiveBulkThread(threading.local):
"""
Add the expected vars to the thread.
"""
def __init__(self, bulk_ops_record_type, **kwargs):
super(ActiveBulkThread, self).__init__(**kwargs)
self.records = defaultdict(bulk_ops_record_type)
class BulkOperationsMixin(object):
"""
This implements the :meth:`bulk_operations` modulestore semantics which handles nested invocations
In particular, it implements :meth:`_begin_bulk_operation` and
:meth:`_end_bulk_operation` to provide the external interface
Internally, this mixin records the set of all active bulk operations (keyed on the active course),
and only writes those values when :meth:`_end_bulk_operation` is called.
If a bulk write operation isn't active, then the changes are immediately written to the underlying
mongo_connection.
"""
def __init__(self, *args, **kwargs):
super(BulkOperationsMixin, self).__init__(*args, **kwargs)
self._active_bulk_ops = ActiveBulkThread(self._bulk_ops_record_type)
self.signal_handler = None
@contextmanager
def bulk_operations(self, course_id, emit_signals=True):
"""
A context manager for notifying the store of bulk operations. This affects only the current thread.
In the case of Mongo, it temporarily disables refreshing the metadata inheritance tree
until the bulk operation is completed.
"""
try:
self._begin_bulk_operation(course_id)
yield
finally:
self._end_bulk_operation(course_id, emit_signals)
# the relevant type of bulk_ops_record for the mixin (overriding classes should override
# this variable)
_bulk_ops_record_type = BulkOpsRecord
def _get_bulk_ops_record(self, course_key, ignore_case=False):
"""
Return the :class:`.BulkOpsRecord` for this course.
"""
if course_key is None:
return self._bulk_ops_record_type()
# Retrieve the bulk record based on matching org/course/run (possibly ignoring case)
if ignore_case:
for key, record in self._active_bulk_ops.records.iteritems():
# Shortcut: check basic equivalence for cases where org/course/run might be None.
if key == course_key or (
key.org.lower() == course_key.org.lower() and
key.course.lower() == course_key.course.lower() and
key.run.lower() == course_key.run.lower()
):
return record
return self._active_bulk_ops.records[course_key.for_branch(None)]
@property
def _active_records(self):
"""
Yield all active (CourseLocator, BulkOpsRecord) tuples.
"""
for course_key, record in self._active_bulk_ops.records.iteritems():
if record.active:
yield (course_key, record)
def _clear_bulk_ops_record(self, course_key):
"""
Clear the record for this course
"""
if course_key.for_branch(None) in self._active_bulk_ops.records:
del self._active_bulk_ops.records[course_key.for_branch(None)]
def _start_outermost_bulk_operation(self, bulk_ops_record, course_key):
"""
The outermost nested bulk_operation call: do the actual begin of the bulk operation.
Implementing classes must override this method; otherwise, the bulk operations are a noop
"""
pass
def _begin_bulk_operation(self, course_key):
"""
Begin a bulk operation on course_key.
"""
bulk_ops_record = self._get_bulk_ops_record(course_key)
# Increment the number of active bulk operations (bulk operations
# on the same course can be nested)
bulk_ops_record.nest()
# If this is the highest level bulk operation, then initialize it
if bulk_ops_record.is_root:
self._start_outermost_bulk_operation(bulk_ops_record, course_key)
def _end_outermost_bulk_operation(self, bulk_ops_record, structure_key):
"""
The outermost nested bulk_operation call: do the actual end of the bulk operation.
Implementing classes must override this method; otherwise, the bulk operations are a noop
"""
pass
def _end_bulk_operation(self, structure_key, emit_signals=True):
"""
End the active bulk operation on structure_key (course or library key).
"""
# If no bulk op is active, return
bulk_ops_record = self._get_bulk_ops_record(structure_key)
if not bulk_ops_record.active:
return
# Send the pre-publish signal within the context of the bulk operation.
# Writes performed by signal handlers will be persisted when the bulk
# operation ends.
if emit_signals and bulk_ops_record.is_root:
self.send_pre_publish_signal(bulk_ops_record, structure_key)
bulk_ops_record.unnest()
# If this wasn't the outermost context, then don't close out the
# bulk operation.
if bulk_ops_record.active:
return
dirty = self._end_outermost_bulk_operation(bulk_ops_record, structure_key)
# The bulk op has ended. However, the signal tasks below still need to use the
# built-up bulk op information (if the signals trigger tasks in the same thread).
# So re-nest until the signals are sent.
bulk_ops_record.nest()
if emit_signals and dirty:
self.send_bulk_published_signal(bulk_ops_record, structure_key)
self.send_bulk_library_updated_signal(bulk_ops_record, structure_key)
# Signals are sent. Now unnest and clear the bulk op for good.
bulk_ops_record.unnest()
self._clear_bulk_ops_record(structure_key)
def _is_in_bulk_operation(self, course_key, ignore_case=False):
"""
Return whether a bulk operation is active on `course_key`.
"""
return self._get_bulk_ops_record(course_key, ignore_case).active
def send_pre_publish_signal(self, bulk_ops_record, course_id):
"""
Send a signal just before items are published in the course.
"""
signal_handler = getattr(self, "signal_handler", None)
if signal_handler and bulk_ops_record.has_publish_item:
signal_handler.send("pre_publish", course_key=course_id)
def send_bulk_published_signal(self, bulk_ops_record, course_id):
"""
Sends out the signal that items have been published from within this course.
"""
if self.signal_handler and bulk_ops_record.has_publish_item:
self.signal_handler.send("course_published", course_key=course_id)
bulk_ops_record.has_publish_item = False
def send_bulk_library_updated_signal(self, bulk_ops_record, library_id):
"""
Sends out the signal that library have been updated.
"""
if self.signal_handler and bulk_ops_record.has_library_updated_item:
self.signal_handler.send("library_updated", library_key=library_id)
bulk_ops_record.has_library_updated_item = False
class EditInfo(object):
"""
Encapsulates the editing info of a block.
"""
def __init__(self, **kwargs):
self.from_storable(kwargs)
# For details, see caching_descriptor_system.py get_subtree_edited_by/on.
self._subtree_edited_on = kwargs.get('_subtree_edited_on', None)
self._subtree_edited_by = kwargs.get('_subtree_edited_by', None)
def to_storable(self):
"""
Serialize to a Mongo-storable format.
"""
return {
'previous_version': self.previous_version,
'update_version': self.update_version,
'source_version': self.source_version,
'edited_on': self.edited_on,
'edited_by': self.edited_by,
'original_usage': self.original_usage,
'original_usage_version': self.original_usage_version,
}
def from_storable(self, edit_info):
"""
De-serialize from Mongo-storable format to an object.
"""
# Guid for the structure which previously changed this XBlock.
# (Will be the previous value of 'update_version'.)
self.previous_version = edit_info.get('previous_version', None)
# Guid for the structure where this XBlock got its current field values.
# May point to a structure not in this structure's history (e.g., to a draft
# branch from which this version was published).
self.update_version = edit_info.get('update_version', None)
self.source_version = edit_info.get('source_version', None)
# Datetime when this XBlock's fields last changed.
self.edited_on = edit_info.get('edited_on', None)
# User ID which changed this XBlock last.
self.edited_by = edit_info.get('edited_by', None)
# If this block has been copied from a library using copy_from_template,
# these fields point to the original block in the library, for analytics.
self.original_usage = edit_info.get('original_usage', None)
self.original_usage_version = edit_info.get('original_usage_version', None)
def __repr__(self):
# pylint: disable=bad-continuation, redundant-keyword-arg
return ("{classname}(previous_version={self.previous_version}, "
"update_version={self.update_version}, "
"source_version={source_version}, "
"edited_on={self.edited_on}, "
"edited_by={self.edited_by}, "
"original_usage={self.original_usage}, "
"original_usage_version={self.original_usage_version}, "
"_subtree_edited_on={self._subtree_edited_on}, "
"_subtree_edited_by={self._subtree_edited_by})").format(
self=self,
classname=self.__class__.__name__,
source_version="UNSET" if self.source_version is None else self.source_version,
) # pylint: disable=bad-continuation
def __eq__(self, edit_info):
"""
Two EditInfo instances are equal iff their storable representations
are equal.
"""
return self.to_storable() == edit_info.to_storable()
def __neq__(self, edit_info):
"""
Two EditInfo instances are not equal if they're not equal.
"""
return not self == edit_info
class BlockData(object):
"""
Wrap the block data in an object instead of using a straight Python dictionary.
Allows the storing of meta-information about a structure that doesn't persist along with
the structure itself.
"""
def __init__(self, **kwargs):
# Has the definition been loaded?
self.definition_loaded = False
self.from_storable(kwargs)
def to_storable(self):
"""
Serialize to a Mongo-storable format.
"""
return {
'fields': self.fields,
'block_type': self.block_type,
'definition': self.definition,
'defaults': self.defaults,
'edit_info': self.edit_info.to_storable()
}
def from_storable(self, block_data):
"""
De-serialize from Mongo-storable format to an object.
"""
# Contains the Scope.settings and 'children' field values.
# 'children' are stored as a list of (block_type, block_id) pairs.
self.fields = block_data.get('fields', {})
# XBlock type ID.
self.block_type = block_data.get('block_type', None)
# DB id of the record containing the content of this XBlock.
self.definition = block_data.get('definition', None)
# Scope.settings default values copied from a template block (used e.g. when
# blocks are copied from a library to a course)
self.defaults = block_data.get('defaults', {})
# EditInfo object containing all versioning/editing data.
self.edit_info = EditInfo(**block_data.get('edit_info', {}))
def __repr__(self):
# pylint: disable=bad-continuation, redundant-keyword-arg
return ("{classname}(fields={self.fields}, "
"block_type={self.block_type}, "
"definition={self.definition}, "
"definition_loaded={self.definition_loaded}, "
"defaults={self.defaults}, "
"edit_info={self.edit_info})").format(
self=self,
classname=self.__class__.__name__,
) # pylint: disable=bad-continuation
def __eq__(self, block_data):
"""
Two BlockData objects are equal iff all their attributes are equal.
"""
attrs = ['fields', 'block_type', 'definition', 'defaults', 'edit_info']
return all(getattr(self, attr) == getattr(block_data, attr) for attr in attrs)
def __neq__(self, block_data):
"""
Just define this as not self.__eq__(block_data)
"""
return not self == block_data
new_contract('BlockData', BlockData)
class IncorrectlySortedList(Exception):
"""
Thrown when calling find() on a SortedAssetList not sorted by filename.
"""
pass
class SortedAssetList(SortedListWithKey):
"""
List of assets that is sorted based on an asset attribute.
"""
def __init__(self, **kwargs):
self.filename_sort = False
key_func = kwargs.get('key', None)
if key_func is None:
kwargs['key'] = itemgetter('filename')
self.filename_sort = True
super(SortedAssetList, self).__init__(**kwargs)
@contract(asset_id=AssetKey)
def find(self, asset_id):
"""
Find the index of a particular asset in the list. This method is only functional for lists
sorted by filename. If the list is sorted on any other key, find() raises a
Returns: Index of asset, if found. None if not found.
"""
# Don't attempt to find an asset by filename in a list that's not sorted by filename.
if not self.filename_sort:
raise IncorrectlySortedList()
# See if this asset already exists by checking the external_filename.
# Studio doesn't currently support using multiple course assets with the same filename.
# So use the filename as the unique identifier.
idx = None
idx_left = self.bisect_left({'filename': asset_id.path})
idx_right = self.bisect_right({'filename': asset_id.path})
if idx_left != idx_right:
# Asset was found in the list.
idx = idx_left
return idx
@contract(asset_md=AssetMetadata)
def insert_or_update(self, asset_md):
"""
Insert asset metadata if asset is not present. Update asset metadata if asset is already present.
"""
metadata_to_insert = asset_md.to_storable()
asset_idx = self.find(asset_md.asset_id)
if asset_idx is None:
# Add new metadata sorted into the list.
self.add(metadata_to_insert)
else:
# Replace existing metadata.
self[asset_idx] = metadata_to_insert
class ModuleStoreAssetBase(object):
"""
The methods for accessing assets and their metadata
"""
def _find_course_asset(self, asset_key):
"""
Returns same as _find_course_assets plus the index to the given asset or None. Does not convert
to AssetMetadata; thus, is internal.
Arguments:
asset_key (AssetKey): what to look for
Returns:
Tuple of:
- AssetMetadata[] for all assets of the given asset_key's type
- the index of asset in list (None if asset does not exist)
"""
course_assets = self._find_course_assets(asset_key.course_key)
all_assets = SortedAssetList(iterable=[])
# Assets should be pre-sorted, so add them efficiently without sorting.
# extend() will raise a ValueError if the passed-in list is not sorted.
all_assets.extend(course_assets.setdefault(asset_key.block_type, []))
idx = all_assets.find(asset_key)
return course_assets, idx
@contract(asset_key='AssetKey')
def find_asset_metadata(self, asset_key, **kwargs):
"""
Find the metadata for a particular course asset.
Arguments:
asset_key (AssetKey): key containing original asset filename
Returns:
asset metadata (AssetMetadata) -or- None if not found
"""
course_assets, asset_idx = self._find_course_asset(asset_key)
if asset_idx is None:
return None
mdata = AssetMetadata(asset_key, asset_key.path, **kwargs)
all_assets = course_assets[asset_key.asset_type]
mdata.from_storable(all_assets[asset_idx])
return mdata
@contract(
course_key='CourseKey', asset_type='None | basestring',
start='int | None', maxresults='int | None', sort='tuple(str,(int,>=1,<=2))|None'
)
def get_all_asset_metadata(self, course_key, asset_type, start=0, maxresults=-1, sort=None, **kwargs):
"""
Returns a list of asset metadata for all assets of the given asset_type in the course.
Args:
course_key (CourseKey): course identifier
asset_type (str): the block_type of the assets to return. If None, return assets of all types.
start (int): optional - start at this asset number. Zero-based!
maxresults (int): optional - return at most this many, -1 means no limit
sort (array): optional - None means no sort
(sort_by (str), sort_order (str))
sort_by - one of 'uploadDate' or 'displayname'
sort_order - one of SortOrder.ascending or SortOrder.descending
Returns:
List of AssetMetadata objects.
"""
course_assets = self._find_course_assets(course_key)
# Determine the proper sort - with defaults of ('displayname', SortOrder.ascending).
key_func = None
sort_order = ModuleStoreEnum.SortOrder.ascending
if sort:
if sort[0] == 'uploadDate':
key_func = lambda x: x['edit_info']['edited_on']
if sort[1] == ModuleStoreEnum.SortOrder.descending:
sort_order = ModuleStoreEnum.SortOrder.descending
if asset_type is None:
# Add assets of all types to the sorted list.
all_assets = SortedAssetList(iterable=[], key=key_func)
for asset_type, val in course_assets.iteritems():
all_assets.update(val)
else:
# Add assets of a single type to the sorted list.
all_assets = SortedAssetList(iterable=course_assets.get(asset_type, []), key=key_func)
num_assets = len(all_assets)
start_idx = start
end_idx = min(num_assets, start + maxresults)
if maxresults < 0:
# No limit on the results.
end_idx = num_assets
step_incr = 1
if sort_order == ModuleStoreEnum.SortOrder.descending:
# Flip the indices and iterate backwards.
step_incr = -1
start_idx = (num_assets - 1) - start_idx
end_idx = (num_assets - 1) - end_idx
ret_assets = []
for idx in xrange(start_idx, end_idx, step_incr):
raw_asset = all_assets[idx]
asset_key = course_key.make_asset_key(raw_asset['asset_type'], raw_asset['filename'])
new_asset = AssetMetadata(asset_key)
new_asset.from_storable(raw_asset)
ret_assets.append(new_asset)
return ret_assets
# pylint: disable=unused-argument
def check_supports(self, course_key, method):
"""
Verifies that a modulestore supports a particular method.
Some modulestores may differ based on the course_key, such
as mixed (since it has to find the underlying modulestore),
so it's required as part of the method signature.
"""
return hasattr(self, method)
class ModuleStoreAssetWriteInterface(ModuleStoreAssetBase):
"""
The write operations for assets and asset metadata
"""
def _save_assets_by_type(self, course_key, asset_metadata_list, course_assets, user_id, import_only):
"""
Common private method that saves/updates asset metadata items in the internal modulestore
structure used to store asset metadata items.
"""
# Lazily create a sorted list if not already created.
assets_by_type = defaultdict(lambda: SortedAssetList(iterable=course_assets.get(asset_type, [])))
for asset_md in asset_metadata_list:
if asset_md.asset_id.course_key != course_key:
# pylint: disable=logging-format-interpolation
log.warning("Asset's course {} does not match other assets for course {} - not saved.".format(
asset_md.asset_id.course_key, course_key
))
continue
if not import_only:
asset_md.update({'edited_by': user_id, 'edited_on': datetime.datetime.now(UTC)})
asset_type = asset_md.asset_id.asset_type
all_assets = assets_by_type[asset_type]
all_assets.insert_or_update(asset_md)
return assets_by_type
@contract(asset_metadata='AssetMetadata')
def save_asset_metadata(self, asset_metadata, user_id, import_only):
"""
Saves the asset metadata for a particular course's asset.
Arguments:
asset_metadata (AssetMetadata): data about the course asset data
user_id (int): user ID saving the asset metadata
import_only (bool): True if importing without editing, False if editing
Returns:
True if metadata save was successful, else False
"""
raise NotImplementedError()
@contract(asset_metadata_list='list(AssetMetadata)')
def save_asset_metadata_list(self, asset_metadata_list, user_id, import_only):
"""
Saves a list of asset metadata for a particular course's asset.
Arguments:
asset_metadata (AssetMetadata): data about the course asset data
user_id (int): user ID saving the asset metadata
import_only (bool): True if importing without editing, False if editing
Returns:
True if metadata save was successful, else False
"""
raise NotImplementedError()
def set_asset_metadata_attrs(self, asset_key, attrs, user_id):
"""
Base method to over-ride in modulestore.
"""
raise NotImplementedError()
def delete_asset_metadata(self, asset_key, user_id):
"""
Base method to over-ride in modulestore.
"""
raise NotImplementedError()
@contract(asset_key='AssetKey', attr=str)
def set_asset_metadata_attr(self, asset_key, attr, value, user_id):
"""
Add/set the given attr on the asset at the given location. Value can be any type which pymongo accepts.
Arguments:
asset_key (AssetKey): asset identifier
attr (str): which attribute to set
value: the value to set it to (any type pymongo accepts such as datetime, number, string)
user_id (int): user ID saving the asset metadata
Raises:
ItemNotFoundError if no such item exists
AttributeError is attr is one of the build in attrs.
"""
return self.set_asset_metadata_attrs(asset_key, {attr: value}, user_id)
@contract(source_course_key='CourseKey', dest_course_key='CourseKey')
def copy_all_asset_metadata(self, source_course_key, dest_course_key, user_id):
"""
Copy all the course assets from source_course_key to dest_course_key.
NOTE: unlike get_all_asset_metadata, this does not take an asset type because
this function is intended for things like cloning or exporting courses not for
clients to list assets.
Arguments:
source_course_key (CourseKey): identifier of course to copy from
dest_course_key (CourseKey): identifier of course to copy to
user_id (int): user ID copying the asset metadata
"""
pass
# pylint: disable=abstract-method
class ModuleStoreRead(ModuleStoreAssetBase):
"""
An abstract interface for a database backend that stores XModuleDescriptor
instances and extends read-only functionality
"""
__metaclass__ = ABCMeta
@abstractmethod
def has_item(self, usage_key):
"""
Returns True if usage_key exists in this ModuleStore.
"""
pass
@abstractmethod
def get_item(self, usage_key, depth=0, using_descriptor_system=None, **kwargs):
"""
Returns an XModuleDescriptor instance for the item at location.
If any segment of the location is None except revision, raises
xmodule.modulestore.exceptions.InsufficientSpecificationError
If no object is found at that location, raises
xmodule.modulestore.exceptions.ItemNotFoundError
usage_key: A :class:`.UsageKey` subclass instance
depth (int): An argument that some module stores may use to prefetch
descendents of the queried modules for more efficient results later
in the request. The depth is counted in the number of calls to
get_children() to cache. None indicates to cache all descendents
"""
pass
@abstractmethod
def get_course_errors(self, course_key):
"""
Return a list of (msg, exception-or-None) errors that the modulestore
encountered when loading the course at course_id.
Raises the same exceptions as get_item if the location isn't found or
isn't fully specified.
Args:
course_key (:class:`.CourseKey`): The course to check for errors
"""
pass
@abstractmethod
def get_items(self, course_id, qualifiers=None, **kwargs):
"""
Returns a list of XModuleDescriptor instances for the items
that match location. Any element of location that is None is treated
as a wildcard that matches any value
location: Something that can be passed to Location
"""
pass
@contract(block='XBlock | BlockData | dict', qualifiers=dict)
def _block_matches(self, block, qualifiers):
"""
Return True or False depending on whether the field value (block contents)
matches the qualifiers as per get_items.
NOTE: Method only finds directly set value matches - not inherited nor default value matches.
For substring matching:
pass a regex object.
For arbitrary function comparison such as date time comparison:
pass the function as in start=lambda x: x < datetime.datetime(2014, 1, 1, 0, tzinfo=pytz.UTC)
Args:
block (dict, XBlock, or BlockData): either the BlockData (transformed from the db) -or-
a dict (from BlockData.fields or get_explicitly_set_fields_by_scope) -or-
the xblock.fields() value -or-
the XBlock from which to get the 'fields' value.
qualifiers (dict): {field: value} search pairs.
"""
if isinstance(block, XBlock):
# If an XBlock is passed-in, just match its fields.
xblock, fields = (block, block.fields)
elif isinstance(block, BlockData):
# BlockData is an object - compare its attributes in dict form.
xblock, fields = (None, block.__dict__)
else:
xblock, fields = (None, block)
def _is_set_on(key):
"""
Is this key set in fields? (return tuple of boolean and value). A helper which can
handle fields either being the json doc or xblock fields. Is inner function to restrict
use and to access local vars.
"""
if key not in fields:
return False, None
field = fields[key]
if xblock is not None:
return field.is_set_on(block), getattr(xblock, key)
else:
return True, field
for key, criteria in qualifiers.iteritems():
is_set, value = _is_set_on(key)
if isinstance(criteria, dict) and '$exists' in criteria and criteria['$exists'] == is_set:
continue
if not is_set:
return False
if not self._value_matches(value, criteria):
return False
return True
def _value_matches(self, target, criteria):
"""
helper for _block_matches: does the target (field value) match the criteria?
If target is a list, do any of the list elements meet the criteria
If the criteria is a regex, does the target match it?
If the criteria is a function, does invoking it on the target yield something truthy?
If criteria is a dict {($nin|$in): []}, then do (none|any) of the list elements meet the criteria
Otherwise, is the target == criteria
"""
if isinstance(target, list):
return any(self._value_matches(ele, criteria) for ele in target)
elif isinstance(criteria, re._pattern_type): # pylint: disable=protected-access
return criteria.search(target) is not None
elif callable(criteria):
return criteria(target)
elif isinstance(criteria, dict) and '$in' in criteria:
# note isn't handling any other things in the dict other than in
return any(self._value_matches(target, test_val) for test_val in criteria['$in'])
elif isinstance(criteria, dict) and '$nin' in criteria:
# note isn't handling any other things in the dict other than nin
return not any(self._value_matches(target, test_val) for test_val in criteria['$nin'])
else:
return criteria == target
@abstractmethod
def make_course_key(self, org, course, run):
"""
Return a valid :class:`~opaque_keys.edx.keys.CourseKey` for this modulestore
that matches the supplied `org`, `course`, and `run`.
This key may represent a course that doesn't exist in this modulestore.
"""
pass
@abstractmethod
def get_courses(self, **kwargs):
'''
Returns a list containing the top level XModuleDescriptors of the courses
in this modulestore. This method can take an optional argument 'org' which
will efficiently apply a filter so that only the courses of the specified
ORG in the CourseKey will be fetched.
'''
pass
@abstractmethod
def get_course(self, course_id, depth=0, **kwargs):
'''
Look for a specific course by its id (:class:`CourseKey`).
Returns the course descriptor, or None if not found.
'''
pass
@abstractmethod
def has_course(self, course_id, ignore_case=False, **kwargs):
'''
Look for a specific course id. Returns whether it exists.
Args:
course_id (CourseKey):
ignore_case (boolean): some modulestores are case-insensitive. Use this flag
to search for whether a potentially conflicting course exists in that case.
'''
pass
@abstractmethod
def get_parent_location(self, location, **kwargs):
'''
Find the location that is the parent of this location in this
course. Needed for path_to_location().
'''
pass
@abstractmethod
def get_orphans(self, course_key, **kwargs):
"""
Get all of the xblocks in the given course which have no parents and are not of types which are
usually orphaned. NOTE: may include xblocks which still have references via xblocks which don't
use children to point to their dependents.
"""
pass
@abstractmethod
def get_errored_courses(self):
"""
Return a dictionary of course_dir -> [(msg, exception_str)], for each
course_dir where course loading failed.
"""
pass
@abstractmethod
def get_modulestore_type(self, course_id):
"""
Returns a type which identifies which modulestore is servicing the given
course_id. The return can be either "xml" (for XML based courses) or "mongo" for MongoDB backed courses
"""
pass
@abstractmethod
def get_courses_for_wiki(self, wiki_slug, **kwargs):
"""
Return the list of courses which use this wiki_slug
:param wiki_slug: the course wiki root slug
:return: list of course keys
"""
pass
@abstractmethod
def has_published_version(self, xblock):
"""
Returns true if this xblock exists in the published course regardless of whether it's up to date
"""
pass
@abstractmethod
def close_connections(self):
"""
Closes any open connections to the underlying databases
"""
pass
@contextmanager
def bulk_operations(self, course_id, emit_signals=True): # pylint: disable=unused-argument
"""
A context manager for notifying the store of bulk operations. This affects only the current thread.
"""
yield
def ensure_indexes(self):
"""
Ensure that all appropriate indexes are created that are needed by this modulestore, or raise
an exception if unable to.
This method is intended for use by tests and administrative commands, and not
to be run during server startup.
"""
pass
# pylint: disable=abstract-method
class ModuleStoreWrite(ModuleStoreRead, ModuleStoreAssetWriteInterface):
"""
An abstract interface for a database backend that stores XModuleDescriptor
instances and extends both read and write functionality
"""
__metaclass__ = ABCMeta
@abstractmethod
def update_item(self, xblock, user_id, allow_not_found=False, force=False, **kwargs):
"""
Update the given xblock's persisted repr. Pass the user's unique id which the persistent store
should save with the update if it has that ability.
:param allow_not_found: whether this method should raise an exception if the given xblock
has not been persisted before.
:param force: fork the structure and don't update the course draftVersion if there's a version
conflict (only applicable to version tracking and conflict detecting persistence stores)
:raises VersionConflictError: if org, course, run, and version_guid given and the current
version head != version_guid and force is not True. (only applicable to version tracking stores)
"""
pass
@abstractmethod
def delete_item(self, location, user_id, **kwargs):
"""
Delete an item and its subtree from persistence. Remove the item from any parents (Note, does not
affect parents from other branches or logical branches; thus, in old mongo, deleting something
whose parent cannot be draft, deletes it from both but deleting a component under a draft vertical
only deletes it from the draft.
Pass the user's unique id which the persistent store
should save with the update if it has that ability.
:param force: fork the structure and don't update the course draftVersion if there's a version
conflict (only applicable to version tracking and conflict detecting persistence stores)
:raises VersionConflictError: if org, course, run, and version_guid given and the current
version head != version_guid and force is not True. (only applicable to version tracking stores)
"""
pass
@abstractmethod
def create_course(self, org, course, run, user_id, fields=None, **kwargs):
"""
Creates and returns the course.
Args:
org (str): the organization that owns the course
course (str): the name of the course
run (str): the name of the run
user_id: id of the user creating the course
fields (dict): Fields to set on the course at initialization
kwargs: Any optional arguments understood by a subset of modulestores to customize instantiation
Returns: a CourseDescriptor
"""
pass
@abstractmethod
def create_item(self, user_id, course_key, block_type, block_id=None, fields=None, **kwargs):
"""
Creates and saves a new item in a course.
Returns the newly created item.
Args:
user_id: ID of the user creating and saving the xmodule
course_key: A :class:`~opaque_keys.edx.CourseKey` identifying which course to create
this item in
block_type: The type of block to create
block_id: a unique identifier for the new item. If not supplied,
a new identifier will be generated
fields (dict): A dictionary specifying initial values for some or all fields
in the newly created block
"""
pass
@abstractmethod
def clone_course(self, source_course_id, dest_course_id, user_id, fields=None):
"""
Sets up source_course_id to point a course with the same content as the desct_course_id. This
operation may be cheap or expensive. It may have to copy all assets and all xblock content or
merely setup new pointers.
Backward compatibility: this method used to require in some modulestores that dest_course_id
pointed to an empty but already created course. Implementers should support this or should
enable creating the course from scratch.
Raises:
ItemNotFoundError: if the source course doesn't exist (or any of its xblocks aren't found)
DuplicateItemError: if the destination course already exists (with content in some cases)
"""
pass
@abstractmethod
def delete_course(self, course_key, user_id, **kwargs):
"""
Deletes the course. It may be a soft or hard delete. It may or may not remove the xblock definitions
depending on the persistence layer and how tightly bound the xblocks are to the course.
Args:
course_key (CourseKey): which course to delete
user_id: id of the user deleting the course
"""
pass
@abstractmethod
def _drop_database(self):
"""
A destructive operation to drop the underlying database and close all connections.
Intended to be used by test code for cleanup.
"""
pass
# pylint: disable=abstract-method
class ModuleStoreReadBase(BulkOperationsMixin, ModuleStoreRead):
'''
Implement interface functionality that can be shared.
'''
# pylint: disable=invalid-name
def __init__(
self,
contentstore=None,
doc_store_config=None, # ignore if passed up
metadata_inheritance_cache_subsystem=None, request_cache=None,
xblock_mixins=(), xblock_select=None, disabled_xblock_types=(), # pylint: disable=bad-continuation
# temporary parms to enable backward compatibility. remove once all envs migrated
db=None, collection=None, host=None, port=None, tz_aware=True, user=None, password=None,
# allow lower level init args to pass harmlessly
** kwargs
):
'''
Set up the error-tracking logic.
'''
super(ModuleStoreReadBase, self).__init__(**kwargs)
self._course_errors = defaultdict(make_error_tracker) # location -> ErrorLog
# pylint: disable=fixme
# TODO move the inheritance_cache_subsystem to classes which use it
self.metadata_inheritance_cache_subsystem = metadata_inheritance_cache_subsystem
self.request_cache = request_cache
self.xblock_mixins = xblock_mixins
self.xblock_select = xblock_select
self.disabled_xblock_types = disabled_xblock_types
self.contentstore = contentstore
def get_course_errors(self, course_key):
"""
Return list of errors for this :class:`.CourseKey`, if any. Raise the same
errors as get_item if course_key isn't present.
"""
# check that item is present and raise the promised exceptions if needed
# pylint: disable=fixme
# TODO (vshnayder): post-launch, make errors properties of items
# self.get_item(location)
assert isinstance(course_key, CourseKey)
return self._course_errors[course_key].errors
def get_errored_courses(self):
"""
Returns an empty dict.
It is up to subclasses to extend this method if the concept
of errored courses makes sense for their implementation.
"""
return {}
def get_course(self, course_id, depth=0, **kwargs):
"""
See ModuleStoreRead.get_course
Default impl--linear search through course list
"""
assert isinstance(course_id, CourseKey)
for course in self.get_courses(**kwargs):
if course.id == course_id:
return course
return None
def has_course(self, course_id, ignore_case=False, **kwargs):
"""
Returns the course_id of the course if it was found, else None
Args:
course_id (CourseKey):
ignore_case (boolean): some modulestores are case-insensitive. Use this flag
to search for whether a potentially conflicting course exists in that case.
"""
# linear search through list
assert isinstance(course_id, CourseKey)
if ignore_case:
return next(
(
c.id for c in self.get_courses()
if c.id.org.lower() == course_id.org.lower() and
c.id.course.lower() == course_id.course.lower() and
c.id.run.lower() == course_id.run.lower()
),
None
)
else:
return next(
(c.id for c in self.get_courses() if c.id == course_id),
None
)
def has_published_version(self, xblock):
"""
Returns True since this is a read-only store.
"""
return True
def heartbeat(self):
"""
Is this modulestore ready?
"""
# default is to say yes by not raising an exception
return {'default_impl': True}
def close_connections(self):
"""
Closes any open connections to the underlying databases
"""
if self.contentstore:
self.contentstore.close_connections()
super(ModuleStoreReadBase, self).close_connections()
@contextmanager
def default_store(self, store_type):
"""
A context manager for temporarily changing the default store
"""
if self.get_modulestore_type(None) != store_type:
raise ValueError(u"Cannot set default store to type {}".format(store_type))
yield
# pylint: disable=abstract-method
class ModuleStoreWriteBase(ModuleStoreReadBase, ModuleStoreWrite):
'''
Implement interface functionality that can be shared.
'''
def __init__(self, contentstore, **kwargs):
super(ModuleStoreWriteBase, self).__init__(contentstore=contentstore, **kwargs)
self.mixologist = Mixologist(self.xblock_mixins)
def partition_fields_by_scope(self, category, fields):
"""
Return dictionary of {scope: {field1: val, ..}..} for the fields of this potential xblock
:param category: the xblock category
:param fields: the dictionary of {fieldname: value}
"""
result = collections.defaultdict(dict)
if fields is None:
return result
cls = self.mixologist.mix(XBlock.load_class(category, select=prefer_xmodules))
for field_name, value in fields.iteritems():
field = getattr(cls, field_name)
result[field.scope][field_name] = value
return result
def create_course(self, org, course, run, user_id, fields=None, runtime=None, **kwargs):
"""
Creates any necessary other things for the course as a side effect and doesn't return
anything useful. The real subclass should call this before it returns the course.
"""
# clone a default 'about' overview module as well
about_location = self.make_course_key(org, course, run).make_usage_key('about', 'overview')
about_descriptor = XBlock.load_class('about')
overview_template = about_descriptor.get_template('overview.yaml')
self.create_item(
user_id,
about_location.course_key,
about_location.block_type,
block_id=about_location.block_id,
definition_data={'data': overview_template.get('data')},
metadata=overview_template.get('metadata'),
runtime=runtime,
continue_version=True,
)
def clone_course(self, source_course_id, dest_course_id, user_id, fields=None, **kwargs):
"""
This base method just copies the assets. The lower level impls must do the actual cloning of
content.
"""
with self.bulk_operations(dest_course_id):
# copy the assets
if self.contentstore:
self.contentstore.copy_all_course_assets(source_course_id, dest_course_id)
return dest_course_id
def delete_course(self, course_key, user_id, **kwargs):
"""
This base method just deletes the assets. The lower level impls must do the actual deleting of
content.
"""
# delete the assets
if self.contentstore:
self.contentstore.delete_all_course_assets(course_key)
super(ModuleStoreWriteBase, self).delete_course(course_key, user_id)
def _drop_database(self):
"""
A destructive operation to drop the underlying database and close all connections.
Intended to be used by test code for cleanup.
"""
if self.contentstore:
self.contentstore._drop_database() # pylint: disable=protected-access
super(ModuleStoreWriteBase, self)._drop_database() # pylint: disable=protected-access
def create_child(self, user_id, parent_usage_key, block_type, block_id=None, fields=None, **kwargs):
"""
Creates and saves a new xblock that as a child of the specified block
Returns the newly created item.
Args:
user_id: ID of the user creating and saving the xmodule
parent_usage_key: a :class:`~opaque_key.edx.UsageKey` identifing the
block that this item should be parented under
block_type: The type of block to create
block_id: a unique identifier for the new item. If not supplied,
a new identifier will be generated
fields (dict): A dictionary specifying initial values for some or all fields
in the newly created block
"""
item = self.create_item(user_id, parent_usage_key.course_key, block_type, block_id=block_id, fields=fields, **kwargs)
parent = self.get_item(parent_usage_key)
parent.children.append(item.location)
self.update_item(parent, user_id)
def _flag_publish_event(self, course_key):
"""
Wrapper around calls to fire the course_published signal
Unless we're nested in an active bulk operation, this simply fires the signal
otherwise a publish will be signalled at the end of the bulk operation
Arguments:
course_key - course_key to which the signal applies
"""
if self.signal_handler:
bulk_record = self._get_bulk_ops_record(course_key) if isinstance(self, BulkOperationsMixin) else None
if bulk_record and bulk_record.active:
bulk_record.has_publish_item = True
else:
self.signal_handler.send("course_published", course_key=course_key)
def _flag_library_updated_event(self, library_key):
"""
Wrapper around calls to fire the library_updated signal
Unless we're nested in an active bulk operation, this simply fires the signal
otherwise a publish will be signalled at the end of the bulk operation
Arguments:
library_key - library_key to which the signal applies
"""
if self.signal_handler:
bulk_record = self._get_bulk_ops_record(library_key) if isinstance(self, BulkOperationsMixin) else None
if bulk_record and bulk_record.active:
bulk_record.has_library_updated_item = True
else:
self.signal_handler.send("library_updated", library_key=library_key)
def _emit_course_deleted_signal(self, course_key):
"""
Helper method used to emit the course_deleted signal.
"""
if self.signal_handler:
self.signal_handler.send("course_deleted", course_key=course_key)
def only_xmodules(identifier, entry_points):
"""Only use entry_points that are supplied by the xmodule package"""
from_xmodule = [entry_point for entry_point in entry_points if entry_point.dist.key == 'xmodule']
return default_select(identifier, from_xmodule)
def prefer_xmodules(identifier, entry_points):
"""Prefer entry_points from the xmodule package"""
from_xmodule = [entry_point for entry_point in entry_points if entry_point.dist.key == 'xmodule']
if from_xmodule:
return default_select(identifier, from_xmodule)
else:
return default_select(identifier, entry_points)
class EdxJSONEncoder(json.JSONEncoder):
"""
Custom JSONEncoder that handles `Location` and `datetime.datetime` objects.
`Location`s are encoded as their url string form, and `datetime`s as
ISO date strings
"""
def default(self, obj):
if isinstance(obj, (CourseKey, UsageKey)):
return unicode(obj)
elif isinstance(obj, datetime.datetime):
if obj.tzinfo is not None:
if obj.utcoffset() is None:
return obj.isoformat() + 'Z'
else:
return obj.isoformat()
else:
return obj.isoformat()
else:
return super(EdxJSONEncoder, self).default(obj)
| agpl-3.0 | -5,972,559,246,773,214,000 | 37.791989 | 125 | 0.626547 | false |
nisse3000/pymatgen | pymatgen/analysis/tests/test_structure_matcher.py | 5 | 31631 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import unittest
import os
import json
import numpy as np
from pymatgen.analysis.structure_matcher import StructureMatcher, \
ElementComparator, FrameworkComparator, OrderDisorderElementComparator, \
OccupancyComparator
from monty.json import MontyDecoder
from pymatgen.core.operations import SymmOp
from pymatgen import Structure, Element, Lattice
from pymatgen.util.coord import find_in_coord_list_pbc
from pymatgen.util.testing import PymatgenTest
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class StructureMatcherTest(PymatgenTest):
def setUp(self):
with open(os.path.join(test_dir, "TiO2_entries.json"), 'r') as fp:
entries = json.load(fp, cls=MontyDecoder)
self.struct_list = [e.structure for e in entries]
self.oxi_structs = [self.get_structure("Li2O"),
Structure.from_file(os.path.join(
test_dir, "POSCAR.Li2O"))]
def test_ignore_species(self):
s1 = Structure.from_file(os.path.join(test_dir, "LiFePO4.cif"))
s2 = Structure.from_file(os.path.join(test_dir, "POSCAR"))
m = StructureMatcher(ignored_species=["Li"], primitive_cell=False,
attempt_supercell=True)
self.assertTrue(m.fit(s1, s2))
self.assertTrue(m.fit_anonymous(s1, s2))
groups = m.group_structures([s1, s2])
self.assertEqual(len(groups), 1)
s2.make_supercell((2, 1, 1))
ss1 = m.get_s2_like_s1(s2, s1, include_ignored_species=True)
self.assertAlmostEqual(ss1.lattice.a, 20.820740000000001)
self.assertEqual(ss1.composition.reduced_formula, "LiFePO4")
self.assertEqual({
k.symbol: v.symbol for k, v in
m.get_best_electronegativity_anonymous_mapping(s1, s2).items()},
{"Fe": "Fe", "P": "P", "O": "O"})
def test_get_supercell_size(self):
l = Lattice.cubic(1)
l2 = Lattice.cubic(0.9)
s1 = Structure(l, ['Mg', 'Cu', 'Ag', 'Cu', 'Ag'], [[0]*3]*5)
s2 = Structure(l2, ['Cu', 'Cu', 'Ag'], [[0]*3]*3)
sm = StructureMatcher(supercell_size='volume')
self.assertEqual(sm._get_supercell_size(s1, s2),
(1, True))
self.assertEqual(sm._get_supercell_size(s2, s1),
(1, True))
sm = StructureMatcher(supercell_size='num_sites')
self.assertEqual(sm._get_supercell_size(s1, s2),
(2, False))
self.assertEqual(sm._get_supercell_size(s2, s1),
(2, True))
sm = StructureMatcher(supercell_size='Ag')
self.assertEqual(sm._get_supercell_size(s1, s2),
(2, False))
self.assertEqual(sm._get_supercell_size(s2, s1),
(2, True))
sm = StructureMatcher(supercell_size='wfieoh')
self.assertRaises(ValueError, sm._get_supercell_size, s1, s2)
def test_cmp_fstruct(self):
sm = StructureMatcher()
s1 = np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]])
s2 = np.array([[0.11, 0.22, 0.33]])
frac_tol = np.array([0.02, 0.03, 0.04])
mask = np.array([[False, False]])
mask2 = np.array([[True, False]])
self.assertRaises(ValueError, sm._cmp_fstruct, s2, s1, frac_tol, mask.T)
self.assertRaises(ValueError, sm._cmp_fstruct, s1, s2, frac_tol, mask.T)
self.assertTrue(sm._cmp_fstruct(s1, s2, frac_tol, mask))
self.assertFalse(sm._cmp_fstruct(s1, s2, frac_tol/2, mask))
self.assertFalse(sm._cmp_fstruct(s1, s2, frac_tol, mask2))
def test_cart_dists(self):
sm = StructureMatcher()
l = Lattice.orthorhombic(1, 2, 3)
s1 = np.array([[0.13, 0.25, 0.37], [0.1, 0.2, 0.3]])
s2 = np.array([[0.11, 0.22, 0.33]])
s3 = np.array([[0.1, 0.2, 0.3], [0.11, 0.2, 0.3]])
s4 = np.array([[0.1, 0.2, 0.3], [0.1, 0.6, 0.7]])
mask = np.array([[False, False]])
mask2 = np.array([[False, True]])
mask3 = np.array([[False, False], [False, False]])
mask4 = np.array([[False, True], [False, True]])
n1 = (len(s1) / l.volume) ** (1/3)
n2 = (len(s2) / l.volume) ** (1/3)
self.assertRaises(ValueError, sm._cart_dists, s2, s1, l, mask.T, n2)
self.assertRaises(ValueError, sm._cart_dists, s1, s2, l, mask.T, n1)
d, ft, s = sm._cart_dists(s1, s2, l, mask, n1)
self.assertTrue(np.allclose(d, [0]))
self.assertTrue(np.allclose(ft, [-0.01, -0.02, -0.03]))
self.assertTrue(np.allclose(s, [1]))
#check that masking best value works
d, ft, s = sm._cart_dists(s1, s2, l, mask2, n1)
self.assertTrue(np.allclose(d, [0]))
self.assertTrue(np.allclose(ft, [0.02, 0.03, 0.04]))
self.assertTrue(np.allclose(s, [0]))
#check that averaging of translation is done properly
d, ft, s = sm._cart_dists(s1, s3, l, mask3, n1)
self.assertTrue(np.allclose(d, [0.08093341]*2))
self.assertTrue(np.allclose(ft, [0.01, 0.025, 0.035]))
self.assertTrue(np.allclose(s, [1, 0]))
#check distances are large when mask allows no 'real' mapping
d, ft, s = sm._cart_dists(s1, s4, l, mask4, n1)
self.assertTrue(np.min(d) > 1e8)
self.assertTrue(np.min(ft) > 1e8)
def test_get_mask(self):
sm = StructureMatcher(comparator=ElementComparator())
l = Lattice.cubic(1)
s1 = Structure(l, ['Mg', 'Cu', 'Ag', 'Cu'], [[0]*3]*4)
s2 = Structure(l, ['Cu', 'Cu', 'Ag'], [[0]*3]*3)
result = [[True, False, True, False],
[True, False, True, False],
[True, True, False, True]]
m, inds, i = sm._get_mask(s1, s2, 1, True)
self.assertTrue(np.all(m == result))
self.assertTrue(i == 2)
self.assertEqual(inds, [2])
#test supercell with match
result = [[1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 1, 1, 0, 0, 1, 1]]
m, inds, i = sm._get_mask(s1, s2, 2, True)
self.assertTrue(np.all(m == result))
self.assertTrue(i == 2)
self.assertTrue(np.allclose(inds, np.array([4])))
#test supercell without match
result = [[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1]]
m, inds, i = sm._get_mask(s2, s1, 2, True)
self.assertTrue(np.all(m == result))
self.assertTrue(i == 0)
self.assertTrue(np.allclose(inds, np.array([])))
#test s2_supercell
result = [[1, 1, 1], [1, 1, 1],
[0, 0, 1], [0, 0, 1],
[1, 1, 0], [1, 1, 0],
[0, 0, 1], [0, 0, 1]]
m, inds, i = sm._get_mask(s2, s1, 2, False)
self.assertTrue(np.all(m == result))
self.assertTrue(i == 0)
self.assertTrue(np.allclose(inds, np.array([])))
#test for multiple translation indices
s1 = Structure(l, ['Cu', 'Ag', 'Cu', 'Ag', 'Ag'], [[0]*3]*5)
s2 = Structure(l, ['Ag', 'Cu', 'Ag'], [[0]*3]*3)
result = [[1, 0, 1, 0, 0],
[0, 1, 0, 1, 1],
[1, 0, 1, 0, 0]]
m, inds, i = sm._get_mask(s1, s2, 1, True)
self.assertTrue(np.all(m == result))
self.assertTrue(i == 1)
self.assertTrue(np.allclose(inds, [0, 2]))
def test_get_supercells(self):
sm = StructureMatcher(comparator=ElementComparator())
l = Lattice.cubic(1)
l2 = Lattice.cubic(0.5)
s1 = Structure(l, ['Mg', 'Cu', 'Ag', 'Cu'], [[0]*3]*4)
s2 = Structure(l2, ['Cu', 'Cu', 'Ag'], [[0]*3]*3)
scs = list(sm._get_supercells(s1, s2, 8, False))
for x in scs:
self.assertAlmostEqual(abs(np.linalg.det(x[3])), 8)
self.assertEqual(len(x[0]), 4)
self.assertEqual(len(x[1]), 24)
self.assertEqual(len(scs), 48)
scs = list(sm._get_supercells(s2, s1, 8, True))
for x in scs:
self.assertAlmostEqual(abs(np.linalg.det(x[3])), 8)
self.assertEqual(len(x[0]), 24)
self.assertEqual(len(x[1]), 4)
self.assertEqual(len(scs), 48)
def test_fit(self):
"""
Take two known matched structures
1) Ensure match
2) Ensure match after translation and rotations
3) Ensure no-match after large site translation
4) Ensure match after site shuffling
"""
sm = StructureMatcher()
self.assertTrue(sm.fit(self.struct_list[0], self.struct_list[1]))
# Test rotational/translational invariance
op = SymmOp.from_axis_angle_and_translation([0, 0, 1], 30, False,
np.array([0.4, 0.7, 0.9]))
self.struct_list[1].apply_operation(op)
self.assertTrue(sm.fit(self.struct_list[0], self.struct_list[1]))
#Test failure under large atomic translation
self.struct_list[1].translate_sites([0], [.4, .4, .2],
frac_coords=True)
self.assertFalse(sm.fit(self.struct_list[0], self.struct_list[1]))
self.struct_list[1].translate_sites([0], [-.4, -.4, -.2],
frac_coords=True)
# random.shuffle(editor._sites)
self.assertTrue(sm.fit(self.struct_list[0], self.struct_list[1]))
#Test FrameworkComporator
sm2 = StructureMatcher(comparator=FrameworkComparator())
lfp = self.get_structure("LiFePO4")
nfp = self.get_structure("NaFePO4")
self.assertTrue(sm2.fit(lfp, nfp))
self.assertFalse(sm.fit(lfp, nfp))
#Test anonymous fit.
self.assertEqual(sm.fit_anonymous(lfp, nfp), True)
self.assertAlmostEqual(sm.get_rms_anonymous(lfp, nfp)[0],
0.060895871160262717)
#Test partial occupancies.
s1 = Structure(Lattice.cubic(3),
[{"Fe": 0.5}, {"Fe": 0.5}, {"Fe": 0.5}, {"Fe": 0.5}],
[[0, 0, 0], [0.25, 0.25, 0.25],
[0.5, 0.5, 0.5], [0.75, 0.75, 0.75]])
s2 = Structure(Lattice.cubic(3),
[{"Fe": 0.25}, {"Fe": 0.5}, {"Fe": 0.5}, {"Fe": 0.75}],
[[0, 0, 0], [0.25, 0.25, 0.25],
[0.5, 0.5, 0.5], [0.75, 0.75, 0.75]])
self.assertFalse(sm.fit(s1, s2))
self.assertFalse(sm.fit(s2, s1))
s2 = Structure(Lattice.cubic(3),
[{"Mn": 0.5}, {"Mn": 0.5}, {"Mn": 0.5},
{"Mn": 0.5}],
[[0, 0, 0], [0.25, 0.25, 0.25],
[0.5, 0.5, 0.5], [0.75, 0.75, 0.75]])
self.assertEqual(sm.fit_anonymous(s1, s2), True)
self.assertAlmostEqual(sm.get_rms_anonymous(s1, s2)[0], 0)
def test_oxi(self):
"""Test oxidation state removal matching"""
sm = StructureMatcher()
self.assertFalse(sm.fit(self.oxi_structs[0], self.oxi_structs[1]))
sm = StructureMatcher(comparator=ElementComparator())
self.assertTrue(sm.fit(self.oxi_structs[0], self.oxi_structs[1]))
def test_primitive(self):
"""Test primitive cell reduction"""
sm = StructureMatcher(primitive_cell=True)
self.struct_list[1].make_supercell([[2, 0, 0], [0, 3, 0], [0, 0, 1]])
self.assertTrue(sm.fit(self.struct_list[0], self.struct_list[1]))
def test_class(self):
# Tests entire class as single working unit
sm = StructureMatcher()
# Test group_structures and find_indices
out = sm.group_structures(self.struct_list)
self.assertEqual(list(map(len, out)), [4, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1])
self.assertEqual(sum(map(len, out)), len(self.struct_list))
for s in self.struct_list[::2]:
s.replace_species({'Ti': 'Zr', 'O':'Ti'})
out = sm.group_structures(self.struct_list, anonymous=True)
self.assertEqual(list(map(len, out)), [4, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1])
def test_mix(self):
structures = [self.get_structure("Li2O"),
self.get_structure("Li2O2"),
self.get_structure("LiFePO4")]
for fname in ["POSCAR.Li2O", "POSCAR.LiFePO4"]:
structures.append(Structure.from_file(os.path.join(test_dir, fname)))
sm = StructureMatcher(comparator=ElementComparator())
groups = sm.group_structures(structures)
for g in groups:
formula = g[0].composition.reduced_formula
if formula in ["Li2O", "LiFePO4"]:
self.assertEqual(len(g), 2)
else:
self.assertEqual(len(g), 1)
def test_left_handed_lattice(self):
"""Ensure Left handed lattices are accepted"""
sm = StructureMatcher()
s = Structure.from_file(os.path.join(test_dir, "Li3GaPCO7.json"))
self.assertTrue(sm.fit(s, s))
def test_as_dict_and_from_dict(self):
sm = StructureMatcher(ltol=0.1, stol=0.2, angle_tol=2,
primitive_cell=False, scale=False,
comparator=FrameworkComparator())
d = sm.as_dict()
sm2 = StructureMatcher.from_dict(d)
self.assertEqual(sm2.as_dict(), d)
def test_no_scaling(self):
sm = StructureMatcher(ltol=0.1, stol=0.1, angle_tol=2,
scale=False, comparator=ElementComparator())
self.assertTrue(sm.fit(self.struct_list[0], self.struct_list[1]))
self.assertTrue(sm.get_rms_dist(self.struct_list[0],
self.struct_list[1])[0] < 0.0008)
def test_supercell_fit(self):
sm = StructureMatcher(attempt_supercell=False)
s1 = Structure.from_file(os.path.join(test_dir, "Al3F9.json"))
s2 = Structure.from_file(os.path.join(test_dir, "Al3F9_distorted.json"))
self.assertFalse(sm.fit(s1, s2))
sm = StructureMatcher(attempt_supercell=True)
self.assertTrue(sm.fit(s1, s2))
self.assertTrue(sm.fit(s2, s1))
def test_get_lattices(self):
sm = StructureMatcher(ltol=0.2, stol=0.3, angle_tol=5,
primitive_cell=True, scale=True,
attempt_supercell=False)
l1 = Lattice.from_lengths_and_angles([1, 2.1, 1.9] , [90, 89, 91])
l2 = Lattice.from_lengths_and_angles([1.1, 2, 2] , [89, 91, 90])
s1 = Structure(l1, [], [])
s2 = Structure(l2, [], [])
lattices = list(sm._get_lattices(s=s1, target_lattice=s2.lattice))
self.assertEqual(len(lattices), 16)
l3 = Lattice.from_lengths_and_angles([1.1, 2, 20] , [89, 91, 90])
s3 = Structure(l3, [], [])
lattices = list(sm._get_lattices(s=s1, target_lattice=s3.lattice))
self.assertEqual(len(lattices), 0)
def test_find_match1(self):
sm = StructureMatcher(ltol=0.2, stol=0.3, angle_tol=5,
primitive_cell=True, scale=True,
attempt_supercell=False)
l = Lattice.orthorhombic(1, 2, 3)
s1 = Structure(l, ['Si', 'Si', 'Ag'],
[[0,0,0.1],[0,0,0.2],[.7,.4,.5]])
s2 = Structure(l, ['Si', 'Si', 'Ag'],
[[0,0.1,0],[0,0.1,-0.95],[.7,.5,.375]])
s1, s2, fu, s1_supercell = sm._preprocess(s1, s2, False)
match = sm._strict_match(s1, s2, fu, s1_supercell = True, use_rms = True, break_on_match = False)
scale_matrix = match[2]
s2.make_supercell(scale_matrix)
fc = s2.frac_coords + match[3]
fc -= np.round(fc)
self.assertAlmostEqual(np.sum(fc), 0.9)
self.assertAlmostEqual(np.sum(fc[:,:2]), 0.1)
cart_dist = np.sum(match[1] * (l.volume/3) ** (1/3))
self.assertAlmostEqual(cart_dist, 0.15)
def test_find_match2(self):
sm = StructureMatcher(ltol=0.2, stol=0.3, angle_tol=5,
primitive_cell=True, scale=True,
attempt_supercell=False)
l = Lattice.orthorhombic(1, 2, 3)
s1 = Structure(l, ['Si', 'Si'], [[0,0,0.1],[0,0,0.2]])
s2 = Structure(l, ['Si', 'Si'], [[0,0.1,0],[0,0.1,-0.95]])
s1, s2, fu, s1_supercell = sm._preprocess(s1, s2, False)
match = sm._strict_match(s1, s2, fu, s1_supercell = False,
use_rms = True, break_on_match = False)
scale_matrix = match[2]
s2.make_supercell(scale_matrix)
s2.translate_sites(range(len(s2)), match[3])
self.assertAlmostEqual(np.sum(s2.frac_coords), 0.3)
self.assertAlmostEqual(np.sum(s2.frac_coords[:,:2]), 0)
def test_supercell_subsets(self):
sm = StructureMatcher(ltol=0.2, stol=0.3, angle_tol=5,
primitive_cell=False, scale=True,
attempt_supercell=True, allow_subset=True,
supercell_size='volume')
sm_no_s = StructureMatcher(ltol=0.2, stol=0.3, angle_tol=5,
primitive_cell=False, scale=True,
attempt_supercell=True, allow_subset=False,
supercell_size='volume')
l = Lattice.orthorhombic(1, 2, 3)
s1 = Structure(l, ['Ag', 'Si', 'Si'],
[[.7,.4,.5],[0,0,0.1],[0,0,0.2]])
s1.make_supercell([2,1,1])
s2 = Structure(l, ['Si', 'Si', 'Ag'],
[[0,0.1,-0.95],[0,0.1,0],[-.7,.5,.375]])
shuffle = [0,2,1,3,4,5]
s1 = Structure.from_sites([s1[i] for i in shuffle])
#test when s1 is exact supercell of s2
result = sm.get_s2_like_s1(s1, s2)
for a, b in zip(s1, result):
self.assertTrue(a.distance(b) < 0.08)
self.assertEqual(a.species_and_occu, b.species_and_occu)
self.assertTrue(sm.fit(s1, s2))
self.assertTrue(sm.fit(s2, s1))
self.assertTrue(sm_no_s.fit(s1, s2))
self.assertTrue(sm_no_s.fit(s2, s1))
rms = (0.048604032430991401, 0.059527539448807391)
self.assertTrue(np.allclose(sm.get_rms_dist(s1, s2), rms))
self.assertTrue(np.allclose(sm.get_rms_dist(s2, s1), rms))
#test when the supercell is a subset of s2
subset_supercell = s1.copy()
del subset_supercell[0]
result = sm.get_s2_like_s1(subset_supercell, s2)
self.assertEqual(len(result), 6)
for a, b in zip(subset_supercell, result):
self.assertTrue(a.distance(b) < 0.08)
self.assertEqual(a.species_and_occu, b.species_and_occu)
self.assertTrue(sm.fit(subset_supercell, s2))
self.assertTrue(sm.fit(s2, subset_supercell))
self.assertFalse(sm_no_s.fit(subset_supercell, s2))
self.assertFalse(sm_no_s.fit(s2, subset_supercell))
rms = (0.053243049896333279, 0.059527539448807336)
self.assertTrue(np.allclose(sm.get_rms_dist(subset_supercell, s2), rms))
self.assertTrue(np.allclose(sm.get_rms_dist(s2, subset_supercell), rms))
#test when s2 (once made a supercell) is a subset of s1
s2_missing_site = s2.copy()
del s2_missing_site[1]
result = sm.get_s2_like_s1(s1, s2_missing_site)
for a, b in zip((s1[i] for i in (0, 2, 4, 5)), result):
self.assertTrue(a.distance(b) < 0.08)
self.assertEqual(a.species_and_occu, b.species_and_occu)
self.assertTrue(sm.fit(s1, s2_missing_site))
self.assertTrue(sm.fit(s2_missing_site, s1))
self.assertFalse(sm_no_s.fit(s1, s2_missing_site))
self.assertFalse(sm_no_s.fit(s2_missing_site, s1))
rms = (0.029763769724403633, 0.029763769724403987)
self.assertTrue(np.allclose(sm.get_rms_dist(s1, s2_missing_site), rms))
self.assertTrue(np.allclose(sm.get_rms_dist(s2_missing_site, s1), rms))
def test_get_s2_large_s2(self):
sm = StructureMatcher(ltol=0.2, stol=0.3, angle_tol=5,
primitive_cell=False, scale=False,
attempt_supercell=True, allow_subset=False,
supercell_size='volume')
l = Lattice.orthorhombic(1, 2, 3)
s1 = Structure(l, ['Ag', 'Si', 'Si'],
[[.7,.4,.5],[0,0,0.1],[0,0,0.2]])
l2 = Lattice.orthorhombic(1.01, 2.01, 3.01)
s2 = Structure(l2, ['Si', 'Si', 'Ag'],
[[0,0.1,-0.95],[0,0.1,0],[-.7,.5,.375]])
s2.make_supercell([[0,-1,0],[1,0,0],[0,0,1]])
result = sm.get_s2_like_s1(s1, s2)
for x,y in zip(s1, result):
self.assertLess(x.distance(y), 0.08)
def test_get_mapping(self):
sm = StructureMatcher(ltol=0.2, stol=0.3, angle_tol=5,
primitive_cell=False, scale=True,
attempt_supercell=False,
allow_subset = True)
l = Lattice.orthorhombic(1, 2, 3)
s1 = Structure(l, ['Ag', 'Si', 'Si'],
[[.7,.4,.5],[0,0,0.1],[0,0,0.2]])
s1.make_supercell([2,1,1])
s2 = Structure(l, ['Si', 'Si', 'Ag'],
[[0,0.1,-0.95],[0,0.1,0],[-.7,.5,.375]])
shuffle = [2,0,1,3,5,4]
s1 = Structure.from_sites([s1[i] for i in shuffle])
#test the mapping
s2.make_supercell([2,1,1])
#equal sizes
for i, x in enumerate(sm.get_mapping(s1, s2)):
self.assertEqual(s1[x].species_and_occu,
s2[i].species_and_occu)
del s1[0]
#s1 is subset of s2
for i, x in enumerate(sm.get_mapping(s2, s1)):
self.assertEqual(s1[i].species_and_occu,
s2[x].species_and_occu)
#s2 is smaller than s1
del s2[0]
del s2[1]
self.assertRaises(ValueError, sm.get_mapping, s2, s1)
def test_get_supercell_matrix(self):
sm = StructureMatcher(ltol=0.1, stol=0.3, angle_tol=2,
primitive_cell=False, scale=True,
attempt_supercell=True)
l = Lattice.orthorhombic(1, 2, 3)
s1 = Structure(l, ['Si', 'Si', 'Ag'],
[[0,0,0.1],[0,0,0.2],[.7,.4,.5]])
s1.make_supercell([2,1,1])
s2 = Structure(l, ['Si', 'Si', 'Ag'],
[[0,0.1,0],[0,0.1,-0.95],[-.7,.5,.375]])
result = sm.get_supercell_matrix(s1, s2)
self.assertTrue((result == [[-2,0,0],[0,1,0],[0,0,1]]).all())
s1 = Structure(l, ['Si', 'Si', 'Ag'],
[[0,0,0.1],[0,0,0.2],[.7,.4,.5]])
s1.make_supercell([[1, -1, 0],[0, 0, -1],[0, 1, 0]])
s2 = Structure(l, ['Si', 'Si', 'Ag'],
[[0,0.1,0],[0,0.1,-0.95],[-.7,.5,.375]])
result = sm.get_supercell_matrix(s1, s2)
self.assertTrue((result == [[-1,-1,0],[0,0,-1],[0,1,0]]).all())
#test when the supercell is a subset
sm = StructureMatcher(ltol=0.1, stol=0.3, angle_tol=2,
primitive_cell=False, scale=True,
attempt_supercell=True, allow_subset=True)
del s1[0]
result = sm.get_supercell_matrix(s1, s2)
self.assertTrue((result == [[-1,-1,0],[0,0,-1],[0,1,0]]).all())
def test_subset(self):
sm = StructureMatcher(ltol=0.2, stol=0.3, angle_tol=5,
primitive_cell=False, scale=True,
attempt_supercell=False,
allow_subset=True)
l = Lattice.orthorhombic(10, 20, 30)
s1 = Structure(l, ['Si', 'Si', 'Ag'],
[[0,0,0.1],[0,0,0.2],[.7,.4,.5]])
s2 = Structure(l, ['Si', 'Ag'],
[[0,0.1,0],[-.7,.5,.4]])
result = sm.get_s2_like_s1(s1, s2)
self.assertEqual(len(find_in_coord_list_pbc(result.frac_coords,
[0,0,0.1])), 1)
self.assertEqual(len(find_in_coord_list_pbc(result.frac_coords,
[0.7,0.4,0.5])), 1)
#test with fewer species in s2
s1 = Structure(l, ['Si', 'Ag', 'Si'],
[[0,0,0.1],[0,0,0.2],[.7,.4,.5]])
s2 = Structure(l, ['Si', 'Si'],
[[0,0.1,0],[-.7,.5,.4]])
result = sm.get_s2_like_s1(s1, s2)
mindists = np.min(s1.lattice.get_all_distances(
s1.frac_coords, result.frac_coords), axis=0)
self.assertLess(np.max(mindists), 1e-6)
self.assertEqual(len(find_in_coord_list_pbc(result.frac_coords,
[0,0,0.1])), 1)
self.assertEqual(len(find_in_coord_list_pbc(result.frac_coords,
[0.7,0.4,0.5])), 1)
#test with not enough sites in s1
#test with fewer species in s2
s1 = Structure(l, ['Si', 'Ag', 'Cl'],
[[0,0,0.1],[0,0,0.2],[.7,.4,.5]])
s2 = Structure(l, ['Si', 'Si'],
[[0,0.1,0],[-.7,.5,.4]])
self.assertEqual(sm.get_s2_like_s1(s1, s2), None)
def test_out_of_cell_s2_like_s1(self):
l = Lattice.cubic(5)
s1 = Structure(l, ['Si', 'Ag', 'Si'],
[[0,0,-0.02],[0,0,0.001],[.7,.4,.5]])
s2 = Structure(l, ['Si', 'Ag', 'Si'],
[[0,0,0.98],[0,0,0.99],[.7,.4,.5]])
new_s2 = StructureMatcher(primitive_cell=False).get_s2_like_s1(s1, s2)
dists = np.sum((s1.cart_coords - new_s2.cart_coords) ** 2, axis=-1) ** 0.5
self.assertLess(np.max(dists), 0.1)
def test_disordered_primitive_to_ordered_supercell(self):
sm_atoms = StructureMatcher(ltol=0.2, stol=0.3, angle_tol=5,
primitive_cell=False, scale=True,
attempt_supercell=True,
allow_subset=True,
supercell_size = 'num_atoms',
comparator=OrderDisorderElementComparator())
sm_sites = StructureMatcher(ltol=0.2, stol=0.3, angle_tol=5,
primitive_cell=False, scale=True,
attempt_supercell=True,
allow_subset=True,
supercell_size = 'num_sites',
comparator=OrderDisorderElementComparator())
lp = Lattice.orthorhombic(10, 20, 30)
pcoords = [[0, 0, 0],
[0.5, 0.5, 0.5]]
ls = Lattice.orthorhombic(20,20,30)
scoords = [[0, 0, 0],
[0.75, 0.5, 0.5]]
prim = Structure(lp, [{'Na':0.5}, {'Cl':0.5}], pcoords)
supercell = Structure(ls, ['Na', 'Cl'], scoords)
supercell.make_supercell([[-1,1,0],[0,1,1],[1,0,0]])
self.assertFalse(sm_sites.fit(prim, supercell))
self.assertTrue(sm_atoms.fit(prim, supercell))
self.assertRaises(ValueError, sm_atoms.get_s2_like_s1, prim, supercell)
self.assertEqual(len(sm_atoms.get_s2_like_s1(supercell, prim)), 4)
def test_ordered_primitive_to_disordered_supercell(self):
sm_atoms = StructureMatcher(ltol=0.2, stol=0.3, angle_tol=5,
primitive_cell=False, scale=True,
attempt_supercell=True,
allow_subset=True,
supercell_size = 'num_atoms',
comparator=OrderDisorderElementComparator())
sm_sites = StructureMatcher(ltol=0.2, stol=0.3, angle_tol=5,
primitive_cell=False, scale=True,
attempt_supercell=True,
allow_subset=True,
supercell_size = 'num_sites',
comparator=OrderDisorderElementComparator())
lp = Lattice.orthorhombic(10, 20, 30)
pcoords = [[0, 0, 0],
[0.5, 0.5, 0.5]]
ls = Lattice.orthorhombic(20,20,30)
scoords = [[0, 0, 0],
[0.5, 0, 0],
[0.25, 0.5, 0.5],
[0.75, 0.5, 0.5]]
s1 = Structure(lp, ['Na', 'Cl'], pcoords)
s2 = Structure(ls, [{'Na':0.5}, {'Na':0.5}, {'Cl':0.5}, {'Cl':0.5}], scoords)
self.assertTrue(sm_sites.fit(s1, s2))
self.assertFalse(sm_atoms.fit(s1, s2))
def test_occupancy_comparator(self):
lp = Lattice.orthorhombic(10, 20, 30)
pcoords = [[0, 0, 0],
[0.5, 0.5, 0.5]]
s1 = Structure(lp, [{'Na':0.6, 'K':0.4}, 'Cl'], pcoords)
s2 = Structure(lp, [{'Xa':0.4, 'Xb':0.6}, 'Cl'], pcoords)
s3 = Structure(lp, [{'Xa':0.5, 'Xb':0.5}, 'Cl'], pcoords)
sm_sites = StructureMatcher(ltol=0.2, stol=0.3, angle_tol=5,
primitive_cell=False, scale=True,
attempt_supercell=True,
allow_subset=True,
supercell_size = 'num_sites',
comparator=OccupancyComparator())
self.assertTrue(sm_sites.fit(s1, s2))
self.assertFalse(sm_sites.fit(s1, s3))
def test_electronegativity(self):
sm = StructureMatcher(ltol=0.2, stol=0.3, angle_tol=5)
s1 = Structure.from_file(os.path.join(test_dir, "Na2Fe2PAsO4S4.json"))
s2 = Structure.from_file(os.path.join(test_dir, "Na2Fe2PNO4Se4.json"))
self.assertEqual(sm.get_best_electronegativity_anonymous_mapping(s1, s2),
{Element('S'): Element('Se'),
Element('As'): Element('N'),
Element('Fe'): Element('Fe'),
Element('Na'): Element('Na'),
Element('P'): Element('P'),
Element('O'): Element('O'),})
self.assertEqual(len(sm.get_all_anonymous_mappings(s1, s2)), 2)
# test include_dist
dists = {Element('N'): 0, Element('P'): 0.0010725064}
for mapping, d in sm.get_all_anonymous_mappings(s1, s2, include_dist=True):
self.assertAlmostEqual(dists[mapping[Element('As')]], d)
def test_rms_vs_minimax(self):
# This tests that structures with adjusted RMS less than stol, but minimax
# greater than stol are treated properly
# stol=0.3 gives exactly an ftol of 0.1 on the c axis
sm = StructureMatcher(ltol=0.2, stol=0.301, angle_tol=1, primitive_cell=False)
l = Lattice.orthorhombic(1, 2, 12)
sp = ["Si", "Si", "Al"]
s1 = Structure(l, sp, [[0.5, 0, 0], [0, 0, 0], [0, 0, 0.5]])
s2 = Structure(l, sp, [[0.5, 0, 0], [0, 0, 0], [0, 0, 0.6]])
self.assertArrayAlmostEqual(sm.get_rms_dist(s1, s2),
(0.32 ** 0.5 / 2, 0.4))
self.assertEqual(sm.fit(s1, s2), False)
self.assertEqual(sm.fit_anonymous(s1, s2), False)
self.assertEqual(sm.get_mapping(s1, s2), None)
if __name__ == '__main__':
unittest.main()
| mit | 7,587,811,550,906,117,000 | 42.871012 | 105 | 0.513167 | false |
chrisrico/python-trezor | trezorlib/protobuf_json.py | 7 | 5081 | # JSON serialization support for Google's protobuf Messages
# Copyright (c) 2009, Paul Dovbush
# All rights reserved.
# http://code.google.com/p/protobuf-json/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of <ORGANIZATION> nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
Provide serialization and de-serialization of Google's protobuf Messages into/from JSON format.
'''
# groups are deprecated and not supported;
# Note that preservation of unknown fields is currently not available for Python (c) google docs
# extensions is not supported from 0.0.5 (due to gpb2.3 changes)
__version__='0.0.5'
__author__='Paul Dovbush <[email protected]>'
import json
from google.protobuf.descriptor import FieldDescriptor as FD
import binascii
import types_pb2 as types
class ParseError(Exception): pass
def json2pb(pb, js):
''' convert JSON string to google.protobuf.descriptor instance '''
for field in pb.DESCRIPTOR.fields:
if field.name not in js:
continue
if field.type == FD.TYPE_MESSAGE:
pass
elif field.type in _js2ftype:
ftype = _js2ftype[field.type]
else:
raise ParseError("Field %s.%s of type '%d' is not supported" % (pb.__class__.__name__, field.name, field.type, ))
value = js[field.name]
if field.label == FD.LABEL_REPEATED:
pb_value = getattr(pb, field.name, None)
for v in value:
if field.type == FD.TYPE_MESSAGE:
json2pb(pb_value.add(), v)
else:
pb_value.append(ftype(v))
else:
if field.type == FD.TYPE_MESSAGE:
json2pb(getattr(pb, field.name, None), value)
else:
setattr(pb, field.name, ftype(value))
return pb
def pb2json(pb):
''' convert google.protobuf.descriptor instance to JSON string '''
js = {}
# fields = pb.DESCRIPTOR.fields #all fields
fields = pb.ListFields() #only filled (including extensions)
for field,value in fields:
if field.type == FD.TYPE_MESSAGE:
ftype = pb2json
elif field.type == FD.TYPE_ENUM:
ftype = lambda x: field.enum_type.values[x].name
elif field.type in _ftype2js:
ftype = _ftype2js[field.type]
else:
raise ParseError("Field %s.%s of type '%d' is not supported" % (pb.__class__.__name__, field.name, field.type, ))
if field.label == FD.LABEL_REPEATED:
js_value = []
for v in value:
js_value.append(ftype(v))
else:
js_value = ftype(value)
js[field.name] = js_value
return js
_ftype2js = {
FD.TYPE_DOUBLE: float,
FD.TYPE_FLOAT: float,
FD.TYPE_INT64: long,
FD.TYPE_UINT64: long,
FD.TYPE_INT32: int,
FD.TYPE_FIXED64: float,
FD.TYPE_FIXED32: float,
FD.TYPE_BOOL: bool,
FD.TYPE_STRING: unicode,
#FD.TYPE_MESSAGE handled specially
FD.TYPE_BYTES: lambda x: binascii.hexlify(x),
FD.TYPE_UINT32: int,
# FD.TYPE_ENUM: handled specially
FD.TYPE_SFIXED32: float,
FD.TYPE_SFIXED64: float,
FD.TYPE_SINT32: int,
FD.TYPE_SINT64: long,
}
_js2ftype = {
FD.TYPE_DOUBLE: float,
FD.TYPE_FLOAT: float,
FD.TYPE_INT64: long,
FD.TYPE_UINT64: long,
FD.TYPE_INT32: int,
FD.TYPE_FIXED64: float,
FD.TYPE_FIXED32: float,
FD.TYPE_BOOL: bool,
FD.TYPE_STRING: unicode,
# FD.TYPE_MESSAGE handled specially
FD.TYPE_BYTES: lambda x: binascii.unhexlify(x),
FD.TYPE_UINT32: int,
FD.TYPE_ENUM: lambda x: getattr(types, x),
FD.TYPE_SFIXED32: float,
FD.TYPE_SFIXED64: float,
FD.TYPE_SINT32: int,
FD.TYPE_SINT64: long,
}
| lgpl-3.0 | -1,736,151,721,899,683,600 | 34.78169 | 125 | 0.660697 | false |
satdreamgr/enigma2 | lib/python/Components/FileList.py | 16 | 14841 | import os
import re
from MenuList import MenuList
from Components.Harddisk import harddiskmanager
from Tools.Directories import SCOPE_CURRENT_SKIN, resolveFilename, fileExists
from enigma import RT_HALIGN_LEFT, eListboxPythonMultiContent, \
eServiceReference, eServiceCenter, gFont
from Tools.LoadPixmap import LoadPixmap
import skin
EXTENSIONS = {
"m4a": "music",
"mp2": "music",
"mp3": "music",
"wav": "music",
"ogg": "music",
"wma": "music",
"flac": "music",
"jpg": "picture",
"jpeg": "picture",
"png": "picture",
"bmp": "picture",
"ts": "movie",
"avi": "movie",
"divx": "movie",
"m4v": "movie",
"mpg": "movie",
"mpeg": "movie",
"mkv": "movie",
"mp4": "movie",
"mov": "movie",
"m2ts": "movie",
"3gp": "movie",
"3g2": "movie",
"asf": "movie",
"wmv": "movie",
}
def FileEntryComponent(name, absolute = None, isDir = False):
res = [ (absolute, isDir) ]
x, y, w, h = skin.parameters.get("FileListName",(35, 1, 470, 20))
res.append((eListboxPythonMultiContent.TYPE_TEXT, x, y, w, h, 0, RT_HALIGN_LEFT, name))
if isDir:
png = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, "extensions/directory.png"))
else:
extension = name.split('.')
extension = extension[-1].lower()
if EXTENSIONS.has_key(extension):
png = LoadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, "extensions/" + EXTENSIONS[extension] + ".png"))
else:
png = None
if png is not None:
x, y, w, h = skin.parameters.get("FileListIcon",(10, 2, 20, 20))
res.append((eListboxPythonMultiContent.TYPE_PIXMAP_ALPHATEST, x, y, w, h, png))
return res
class FileList(MenuList):
def __init__(self, directory, showDirectories = True, showFiles = True, showMountpoints = True, matchingPattern = None, useServiceRef = False, inhibitDirs = False, inhibitMounts = False, isTop = False, enableWrapAround = False, additionalExtensions = None):
MenuList.__init__(self, list, enableWrapAround, eListboxPythonMultiContent)
self.additional_extensions = additionalExtensions
self.mountpoints = []
self.current_directory = None
self.current_mountpoint = None
self.useServiceRef = useServiceRef
self.showDirectories = showDirectories
self.showMountpoints = showMountpoints
self.showFiles = showFiles
if isTop:
self.topDirectory = directory
else:
self.topDirectory = "/"
# example: matching .nfi and .ts files: "^.*\.(nfi|ts)"
if matchingPattern:
self.matchingPattern = re.compile(matchingPattern)
else:
self.matchingPattern = None
self.inhibitDirs = inhibitDirs or []
self.inhibitMounts = inhibitMounts or []
self.refreshMountpoints()
self.changeDir(directory)
font = skin.fonts.get("FileList", ("Regular", 18, 23))
self.l.setFont(0, gFont(font[0], font[1]))
self.l.setItemHeight(font[2])
self.serviceHandler = eServiceCenter.getInstance()
def refreshMountpoints(self):
self.mountpoints = [os.path.join(p.mountpoint, "") for p in harddiskmanager.getMountedPartitions()]
self.mountpoints.sort(reverse = True)
def getMountpoint(self, file):
file = os.path.join(os.path.realpath(file), "")
for m in self.mountpoints:
if file.startswith(m):
return m
return False
def getMountpointLink(self, file):
if os.path.realpath(file) == file:
return self.getMountpoint(file)
else:
if file[-1] == "/":
file = file[:-1]
mp = self.getMountpoint(file)
last = file
file = os.path.dirname(file)
while last != "/" and mp == self.getMountpoint(file):
last = file
file = os.path.dirname(file)
return os.path.join(last, "")
def getSelection(self):
if self.l.getCurrentSelection() is None:
return None
return self.l.getCurrentSelection()[0]
def getCurrentEvent(self):
l = self.l.getCurrentSelection()
if not l or l[0][1] == True:
return None
else:
return self.serviceHandler.info(l[0][0]).getEvent(l[0][0])
def getFileList(self):
return self.list
def inParentDirs(self, dir, parents):
dir = os.path.realpath(dir)
for p in parents:
if dir.startswith(p):
return True
return False
def changeDir(self, directory, select = None):
self.list = []
# if we are just entering from the list of mount points:
if self.current_directory is None:
if directory and self.showMountpoints:
self.current_mountpoint = self.getMountpointLink(directory)
else:
self.current_mountpoint = None
self.current_directory = directory
directories = []
files = []
if directory is None and self.showMountpoints: # present available mountpoints
for p in harddiskmanager.getMountedPartitions():
path = os.path.join(p.mountpoint, "")
if path not in self.inhibitMounts and not self.inParentDirs(path, self.inhibitDirs):
self.list.append(FileEntryComponent(name = p.description, absolute = path, isDir = True))
files = [ ]
directories = [ ]
elif directory is None:
files = [ ]
directories = [ ]
elif self.useServiceRef:
# we should not use the 'eServiceReference(string)' constructor, because it doesn't allow ':' in the directoryname
root = eServiceReference(2, 0, directory)
if self.additional_extensions:
root.setName(self.additional_extensions)
serviceHandler = eServiceCenter.getInstance()
list = serviceHandler.list(root)
while 1:
s = list.getNext()
if not s.valid():
del list
break
if s.flags & s.mustDescent:
directories.append(s.getPath())
else:
files.append(s)
directories.sort()
files.sort()
else:
if fileExists(directory):
try:
files = os.listdir(directory)
except:
files = []
files.sort()
tmpfiles = files[:]
for x in tmpfiles:
if os.path.isdir(directory + x):
directories.append(directory + x + "/")
files.remove(x)
if self.showDirectories:
if directory:
if self.showMountpoints and directory == self.current_mountpoint:
self.list.append(FileEntryComponent(name = "<" +_("List of storage devices") + ">", absolute = None, isDir = True))
elif (directory != self.topDirectory) and not (self.inhibitMounts and self.getMountpoint(directory) in self.inhibitMounts):
self.list.append(FileEntryComponent(name = "<" +_("Parent directory") + ">", absolute = '/'.join(directory.split('/')[:-2]) + '/', isDir = True))
for x in directories:
if not (self.inhibitMounts and self.getMountpoint(x) in self.inhibitMounts) and not self.inParentDirs(x, self.inhibitDirs):
name = x.split('/')[-2]
self.list.append(FileEntryComponent(name = name, absolute = x, isDir = True))
if self.showFiles:
for x in files:
if self.useServiceRef:
path = x.getPath()
name = path.split('/')[-1]
else:
path = directory + x
name = x
if (self.matchingPattern is None) or self.matchingPattern.search(path):
self.list.append(FileEntryComponent(name = name, absolute = x , isDir = False))
if self.showMountpoints and len(self.list) == 0:
self.list.append(FileEntryComponent(name = _("nothing connected"), absolute = None, isDir = False))
self.l.setList(self.list)
if select is not None:
i = 0
self.moveToIndex(0)
for x in self.list:
p = x[0][0]
if isinstance(p, eServiceReference):
p = p.getPath()
if p == select:
self.moveToIndex(i)
i += 1
def getCurrentDirectory(self):
return self.current_directory
def canDescent(self):
if self.getSelection() is None:
return False
return self.getSelection()[1]
def descent(self):
if self.getSelection() is None:
return
self.changeDir(self.getSelection()[0], select = self.current_directory)
def getFilename(self):
if self.getSelection() is None:
return None
x = self.getSelection()[0]
if isinstance(x, eServiceReference):
x = x.getPath()
return x
def getServiceRef(self):
if self.getSelection() is None:
return None
x = self.getSelection()[0]
if isinstance(x, eServiceReference):
return x
return None
def execBegin(self):
harddiskmanager.on_partition_list_change.append(self.partitionListChanged)
def execEnd(self):
harddiskmanager.on_partition_list_change.remove(self.partitionListChanged)
def refresh(self):
self.changeDir(self.current_directory, self.getFilename())
def partitionListChanged(self, action, device):
self.refreshMountpoints()
if self.current_directory is None:
self.refresh()
def MultiFileSelectEntryComponent(name, absolute = None, isDir = False, selected = False):
res = [ (absolute, isDir, selected, name) ]
x, y, w, h = skin.parameters.get("FileListMultiName",(55, 0, 470, 25))
res.append((eListboxPythonMultiContent.TYPE_TEXT, x, y, w, h, 0, RT_HALIGN_LEFT, name))
if isDir:
png = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, "extensions/directory.png"))
else:
extension = name.split('.')
extension = extension[-1].lower()
if EXTENSIONS.has_key(extension):
png = LoadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, "extensions/" + EXTENSIONS[extension] + ".png"))
else:
png = None
if png is not None:
x, y, w, h = skin.parameters.get("FileListMultiIcon",(30, 2, 20, 20))
res.append((eListboxPythonMultiContent.TYPE_PIXMAP_ALPHATEST, x, y, w, h, png))
if not name.startswith('<'):
if selected:
icon = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/lock_on.png"))
else:
icon = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/lock_off.png"))
x, y, w, h = skin.parameters.get("FileListMultiLock",(2, 0, 25, 25))
res.append((eListboxPythonMultiContent.TYPE_PIXMAP_ALPHATEST, x, y, w, h, icon))
return res
class MultiFileSelectList(FileList):
def __init__(self, preselectedFiles, directory, showMountpoints = False, matchingPattern = None, showDirectories = True, showFiles = True, useServiceRef = False, inhibitDirs = False, inhibitMounts = False, isTop = False, enableWrapAround = False, additionalExtensions = None):
if preselectedFiles is None:
self.selectedFiles = []
else:
self.selectedFiles = preselectedFiles
FileList.__init__(self, directory, showMountpoints = showMountpoints, matchingPattern = matchingPattern, showDirectories = showDirectories, showFiles = showFiles, useServiceRef = useServiceRef, inhibitDirs = inhibitDirs, inhibitMounts = inhibitMounts, isTop = isTop, enableWrapAround = enableWrapAround, additionalExtensions = additionalExtensions)
self.changeDir(directory)
font = skin.fonts.get("FileListMulti", ("Regular", 20, 25))
self.l.setFont(0, gFont(font[0], font[1]))
self.l.setItemHeight(font[2])
self.onSelectionChanged = [ ]
def selectionChanged(self):
for f in self.onSelectionChanged:
f()
def changeSelectionState(self):
idx = self.l.getCurrentSelectionIndex()
newList = self.list[:]
x = self.list[idx]
if not x[0][3].startswith('<'):
if x[0][1] is True:
realPathname = x[0][0]
else:
realPathname = self.current_directory + x[0][0]
if x[0][2] == True:
SelectState = False
try:
self.selectedFiles.remove(realPathname)
except:
try:
self.selectedFiles.remove(os.path.normpath(realPathname))
except:
print "Couldn't remove:", realPathname
else:
SelectState = True
if (realPathname not in self.selectedFiles) and (os.path.normpath(realPathname) not in self.selectedFiles):
self.selectedFiles.append(realPathname)
newList[idx] = MultiFileSelectEntryComponent(name = x[0][3], absolute = x[0][0], isDir = x[0][1], selected = SelectState)
self.list = newList
self.l.setList(self.list)
def getSelectedList(self):
return self.selectedFiles
def changeDir(self, directory, select = None):
self.list = []
# if we are just entering from the list of mount points:
if self.current_directory is None:
if directory and self.showMountpoints:
self.current_mountpoint = self.getMountpointLink(directory)
else:
self.current_mountpoint = None
self.current_directory = directory
directories = []
files = []
if directory is None and self.showMountpoints: # present available mountpoints
for p in harddiskmanager.getMountedPartitions():
path = os.path.join(p.mountpoint, "")
if path not in self.inhibitMounts and not self.inParentDirs(path, self.inhibitDirs):
self.list.append(MultiFileSelectEntryComponent(name = p.description, absolute = path, isDir = True))
files = [ ]
directories = [ ]
elif directory is None:
files = [ ]
directories = [ ]
elif self.useServiceRef:
root = eServiceReference("2:0:1:0:0:0:0:0:0:0:" + directory)
if self.additional_extensions:
root.setName(self.additional_extensions)
serviceHandler = eServiceCenter.getInstance()
list = serviceHandler.list(root)
while 1:
s = list.getNext()
if not s.valid():
del list
break
if s.flags & s.mustDescent:
directories.append(s.getPath())
else:
files.append(s)
directories.sort()
files.sort()
else:
if fileExists(directory):
try:
files = os.listdir(directory)
except:
files = []
files.sort()
tmpfiles = files[:]
for x in tmpfiles:
if os.path.isdir(directory + x):
directories.append(directory + x + "/")
files.remove(x)
if self.showDirectories:
if directory:
if self.showMountpoints and directory == self.current_mountpoint:
self.list.append(FileEntryComponent(name = "<" +_("List of storage devices") + ">", absolute = None, isDir = True))
elif (directory != self.topDirectory) and not (self.inhibitMounts and self.getMountpoint(directory) in self.inhibitMounts):
self.list.append(FileEntryComponent(name = "<" +_("Parent directory") + ">", absolute = '/'.join(directory.split('/')[:-2]) + '/', isDir = True))
for x in directories:
if not (self.inhibitMounts and self.getMountpoint(x) in self.inhibitMounts) and not self.inParentDirs(x, self.inhibitDirs):
name = x.split('/')[-2]
alreadySelected = (x in self.selectedFiles) or (os.path.normpath(x) in self.selectedFiles)
self.list.append(MultiFileSelectEntryComponent(name = name, absolute = x, isDir = True, selected = alreadySelected))
if self.showFiles:
for x in files:
if self.useServiceRef:
path = x.getPath()
name = path.split('/')[-1]
else:
path = directory + x
name = x
if (self.matchingPattern is None) or self.matchingPattern.search(path):
alreadySelected = False
for entry in self.selectedFiles:
if os.path.basename(entry) == x:
alreadySelected = True
self.list.append(MultiFileSelectEntryComponent(name = name, absolute = x , isDir = False, selected = alreadySelected))
self.l.setList(self.list)
if select is not None:
i = 0
self.moveToIndex(0)
for x in self.list:
p = x[0][0]
if isinstance(p, eServiceReference):
p = p.getPath()
if p == select:
self.moveToIndex(i)
i += 1
| gpl-2.0 | 1,395,567,340,161,607,400 | 32.653061 | 351 | 0.688902 | false |
blockc/fabric | test/feature/steps/config_util.py | 12 | 4171 | # Copyright IBM Corp. 2017 All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import subprocess
import os
import sys
from shutil import copyfile
CHANNEL_PROFILE = "SysTestChannel"
def generateConfig(channelID, profile, ordererProfile, projectName, block="orderer.block"):
# Save all the files to a specific directory for the test
testConfigs = "configs/%s" % projectName
if not os.path.isdir(testConfigs):
os.mkdir(testConfigs)
configFile = "configtx.yaml"
if os.path.isfile("configs/%s.yaml" % channelID):
configFile = "%s.yaml" % channelID
copyfile("configs/%s" % configFile, "%s/configtx.yaml" % testConfigs)
# Copy config to orderer org structures
for orgDir in os.listdir("./{0}/ordererOrganizations".format(testConfigs)):
copyfile("{0}/configtx.yaml".format(testConfigs),
"{0}/ordererOrganizations/{1}/msp/config.yaml".format(testConfigs,
orgDir))
# Copy config to peer org structures
for orgDir in os.listdir("./{0}/peerOrganizations".format(testConfigs)):
copyfile("{0}/configtx.yaml".format(testConfigs),
"{0}/peerOrganizations/{1}/msp/config.yaml".format(testConfigs,
orgDir))
copyfile("{0}/configtx.yaml".format(testConfigs),
"{0}/peerOrganizations/{1}/users/Admin@{1}/msp/config.yaml".format(testConfigs,
orgDir))
try:
command = ["configtxgen", "-profile", ordererProfile,
"-outputBlock", block,
"-channelID", channelID]
subprocess.check_call(command, cwd=testConfigs)
generateChannelConfig(channelID, profile, projectName)
generateChannelAnchorConfig(channelID, profile, projectName)
except:
print("Unable to generate channel config data: {0}".format(sys.exc_info()[1]))
def generateChannelConfig(channelID, profile, projectName):
testConfigs = "configs/%s" % projectName
try:
command = ["configtxgen", "-profile", profile,
"-outputCreateChannelTx", "%s.tx" % channelID,
"-channelID", channelID]
subprocess.check_call(command, cwd=testConfigs)
except:
print("Unable to generate channel config data: {0}".format(sys.exc_info()[1]))
def generateChannelAnchorConfig(channelID, profile, projectName):
testConfigs = "configs/%s" % projectName
for org in os.listdir("./{0}/peerOrganizations".format(testConfigs)):
try:
command = ["configtxgen", "-profile", profile,
"-outputAnchorPeersUpdate", "{0}{1}Anchor.tx".format(org, channelID),
"-channelID", channelID,
"-asOrg", org.title().replace('.', '')]
subprocess.check_call(command, cwd=testConfigs)
except:
print("Unable to generate channel anchor config data: {0}".format(sys.exc_info()[1]))
def generateCrypto(projectName):
# Save all the files to a specific directory for the test
testConfigs = "configs/%s" % projectName
if not os.path.isdir(testConfigs):
os.mkdir(testConfigs)
try:
subprocess.check_call(["cryptogen", "generate",
'--output={0}'.format(testConfigs),
'--config=./configs/crypto.yaml'],
env=os.environ)
except:
print("Unable to generate crypto material: {0}".format(sys.exc_info()[1]))
| apache-2.0 | -5,130,327,849,694,396,000 | 43.849462 | 97 | 0.612803 | false |
jrabbit/compose | tests/acceptance/cli_test.py | 1 | 96756 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
import datetime
import json
import os
import os.path
import re
import signal
import subprocess
import time
from collections import Counter
from collections import namedtuple
from operator import attrgetter
import pytest
import six
import yaml
from docker import errors
from .. import mock
from ..helpers import create_host_file
from compose.cli.command import get_project
from compose.config.errors import DuplicateOverrideFileFound
from compose.container import Container
from compose.project import OneOffFilter
from compose.utils import nanoseconds_from_time_seconds
from tests.integration.testcases import DockerClientTestCase
from tests.integration.testcases import get_links
from tests.integration.testcases import is_cluster
from tests.integration.testcases import no_cluster
from tests.integration.testcases import pull_busybox
from tests.integration.testcases import SWARM_SKIP_RM_VOLUMES
from tests.integration.testcases import v2_1_only
from tests.integration.testcases import v2_2_only
from tests.integration.testcases import v2_only
from tests.integration.testcases import v3_only
ProcessResult = namedtuple('ProcessResult', 'stdout stderr')
BUILD_CACHE_TEXT = 'Using cache'
BUILD_PULL_TEXT = 'Status: Image is up to date for busybox:latest'
def start_process(base_dir, options):
proc = subprocess.Popen(
['docker-compose'] + options,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=base_dir)
print("Running process: %s" % proc.pid)
return proc
def wait_on_process(proc, returncode=0):
stdout, stderr = proc.communicate()
if proc.returncode != returncode:
print("Stderr: {}".format(stderr))
print("Stdout: {}".format(stdout))
assert proc.returncode == returncode
return ProcessResult(stdout.decode('utf-8'), stderr.decode('utf-8'))
def wait_on_condition(condition, delay=0.1, timeout=40):
start_time = time.time()
while not condition():
if time.time() - start_time > timeout:
raise AssertionError("Timeout: %s" % condition)
time.sleep(delay)
def kill_service(service):
for container in service.containers():
if container.is_running:
container.kill()
class ContainerCountCondition(object):
def __init__(self, project, expected):
self.project = project
self.expected = expected
def __call__(self):
return len([c for c in self.project.containers() if c.is_running]) == self.expected
def __str__(self):
return "waiting for counter count == %s" % self.expected
class ContainerStateCondition(object):
def __init__(self, client, name, status):
self.client = client
self.name = name
self.status = status
def __call__(self):
try:
container = self.client.inspect_container(self.name)
return container['State']['Status'] == self.status
except errors.APIError:
return False
def __str__(self):
return "waiting for container to be %s" % self.status
class CLITestCase(DockerClientTestCase):
def setUp(self):
super(CLITestCase, self).setUp()
self.base_dir = 'tests/fixtures/simple-composefile'
self.override_dir = None
def tearDown(self):
if self.base_dir:
self.project.kill()
self.project.down(None, True)
for container in self.project.containers(stopped=True, one_off=OneOffFilter.only):
container.remove(force=True)
networks = self.client.networks()
for n in networks:
if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name)):
self.client.remove_network(n['Name'])
volumes = self.client.volumes().get('Volumes') or []
for v in volumes:
if v['Name'].split('/')[-1].startswith('{}_'.format(self.project.name)):
self.client.remove_volume(v['Name'])
if hasattr(self, '_project'):
del self._project
super(CLITestCase, self).tearDown()
@property
def project(self):
# Hack: allow project to be overridden
if not hasattr(self, '_project'):
self._project = get_project(self.base_dir, override_dir=self.override_dir)
return self._project
def dispatch(self, options, project_options=None, returncode=0):
project_options = project_options or []
proc = start_process(self.base_dir, project_options + options)
return wait_on_process(proc, returncode=returncode)
def execute(self, container, cmd):
# Remove once Hijack and CloseNotifier sign a peace treaty
self.client.close()
exc = self.client.exec_create(container.id, cmd)
self.client.exec_start(exc)
return self.client.exec_inspect(exc)['ExitCode']
def lookup(self, container, hostname):
return self.execute(container, ["nslookup", hostname]) == 0
def test_help(self):
self.base_dir = 'tests/fixtures/no-composefile'
result = self.dispatch(['help', 'up'], returncode=0)
assert 'Usage: up [options] [--scale SERVICE=NUM...] [SERVICE...]' in result.stdout
# Prevent tearDown from trying to create a project
self.base_dir = None
def test_help_nonexistent(self):
self.base_dir = 'tests/fixtures/no-composefile'
result = self.dispatch(['help', 'foobar'], returncode=1)
assert 'No such command' in result.stderr
self.base_dir = None
def test_shorthand_host_opt(self):
self.dispatch(
['-H={0}'.format(os.environ.get('DOCKER_HOST', 'unix://')),
'up', '-d'],
returncode=0
)
def test_host_not_reachable(self):
result = self.dispatch(['-H=tcp://doesnotexist:8000', 'ps'], returncode=1)
assert "Couldn't connect to Docker daemon" in result.stderr
def test_host_not_reachable_volumes_from_container(self):
self.base_dir = 'tests/fixtures/volumes-from-container'
container = self.client.create_container(
'busybox', 'true', name='composetest_data_container',
host_config={}
)
self.addCleanup(self.client.remove_container, container)
result = self.dispatch(['-H=tcp://doesnotexist:8000', 'ps'], returncode=1)
assert "Couldn't connect to Docker daemon" in result.stderr
def test_config_list_services(self):
self.base_dir = 'tests/fixtures/v2-full'
result = self.dispatch(['config', '--services'])
assert set(result.stdout.rstrip().split('\n')) == {'web', 'other'}
def test_config_list_volumes(self):
self.base_dir = 'tests/fixtures/v2-full'
result = self.dispatch(['config', '--volumes'])
assert set(result.stdout.rstrip().split('\n')) == {'data'}
def test_config_quiet_with_error(self):
self.base_dir = None
result = self.dispatch([
'-f', 'tests/fixtures/invalid-composefile/invalid.yml',
'config', '-q'
], returncode=1)
assert "'notaservice' must be a mapping" in result.stderr
def test_config_quiet(self):
self.base_dir = 'tests/fixtures/v2-full'
assert self.dispatch(['config', '-q']).stdout == ''
def test_config_default(self):
self.base_dir = 'tests/fixtures/v2-full'
result = self.dispatch(['config'])
# assert there are no python objects encoded in the output
assert '!!' not in result.stdout
output = yaml.load(result.stdout)
expected = {
'version': '2.0',
'volumes': {'data': {'driver': 'local'}},
'networks': {'front': {}},
'services': {
'web': {
'build': {
'context': os.path.abspath(self.base_dir),
},
'networks': {'front': None, 'default': None},
'volumes_from': ['service:other:rw'],
},
'other': {
'image': 'busybox:latest',
'command': 'top',
'volumes': ['/data'],
},
},
}
assert output == expected
def test_config_restart(self):
self.base_dir = 'tests/fixtures/restart'
result = self.dispatch(['config'])
assert yaml.load(result.stdout) == {
'version': '2.0',
'services': {
'never': {
'image': 'busybox',
'restart': 'no',
},
'always': {
'image': 'busybox',
'restart': 'always',
},
'on-failure': {
'image': 'busybox',
'restart': 'on-failure',
},
'on-failure-5': {
'image': 'busybox',
'restart': 'on-failure:5',
},
'restart-null': {
'image': 'busybox',
'restart': ''
},
},
}
def test_config_external_network(self):
self.base_dir = 'tests/fixtures/networks'
result = self.dispatch(['-f', 'external-networks.yml', 'config'])
json_result = yaml.load(result.stdout)
assert 'networks' in json_result
assert json_result['networks'] == {
'networks_foo': {
'external': True # {'name': 'networks_foo'}
},
'bar': {
'external': {'name': 'networks_bar'}
}
}
def test_config_external_volume_v2(self):
self.base_dir = 'tests/fixtures/volumes'
result = self.dispatch(['-f', 'external-volumes-v2.yml', 'config'])
json_result = yaml.load(result.stdout)
assert 'volumes' in json_result
assert json_result['volumes'] == {
'foo': {
'external': True,
},
'bar': {
'external': {
'name': 'some_bar',
},
}
}
def test_config_external_volume_v2_x(self):
self.base_dir = 'tests/fixtures/volumes'
result = self.dispatch(['-f', 'external-volumes-v2-x.yml', 'config'])
json_result = yaml.load(result.stdout)
assert 'volumes' in json_result
assert json_result['volumes'] == {
'foo': {
'external': True,
'name': 'some_foo',
},
'bar': {
'external': True,
'name': 'some_bar',
}
}
def test_config_external_volume_v3_x(self):
self.base_dir = 'tests/fixtures/volumes'
result = self.dispatch(['-f', 'external-volumes-v3-x.yml', 'config'])
json_result = yaml.load(result.stdout)
assert 'volumes' in json_result
assert json_result['volumes'] == {
'foo': {
'external': True,
},
'bar': {
'external': {
'name': 'some_bar',
},
}
}
def test_config_external_volume_v3_4(self):
self.base_dir = 'tests/fixtures/volumes'
result = self.dispatch(['-f', 'external-volumes-v3-4.yml', 'config'])
json_result = yaml.load(result.stdout)
assert 'volumes' in json_result
assert json_result['volumes'] == {
'foo': {
'external': True,
'name': 'some_foo',
},
'bar': {
'external': True,
'name': 'some_bar',
}
}
def test_config_external_network_v3_5(self):
self.base_dir = 'tests/fixtures/networks'
result = self.dispatch(['-f', 'external-networks-v3-5.yml', 'config'])
json_result = yaml.load(result.stdout)
assert 'networks' in json_result
assert json_result['networks'] == {
'foo': {
'external': True,
'name': 'some_foo',
},
'bar': {
'external': True,
'name': 'some_bar',
},
}
def test_config_v1(self):
self.base_dir = 'tests/fixtures/v1-config'
result = self.dispatch(['config'])
assert yaml.load(result.stdout) == {
'version': '2.1',
'services': {
'net': {
'image': 'busybox',
'network_mode': 'bridge',
},
'volume': {
'image': 'busybox',
'volumes': ['/data'],
'network_mode': 'bridge',
},
'app': {
'image': 'busybox',
'volumes_from': ['service:volume:rw'],
'network_mode': 'service:net',
},
},
}
@v3_only()
def test_config_v3(self):
self.base_dir = 'tests/fixtures/v3-full'
result = self.dispatch(['config'])
assert yaml.load(result.stdout) == {
'version': '3.2',
'volumes': {
'foobar': {
'labels': {
'com.docker.compose.test': 'true',
},
},
},
'services': {
'web': {
'image': 'busybox',
'deploy': {
'mode': 'replicated',
'replicas': 6,
'labels': ['FOO=BAR'],
'update_config': {
'parallelism': 3,
'delay': '10s',
'failure_action': 'continue',
'monitor': '60s',
'max_failure_ratio': 0.3,
},
'resources': {
'limits': {
'cpus': '0.001',
'memory': '50M',
},
'reservations': {
'cpus': '0.0001',
'memory': '20M',
},
},
'restart_policy': {
'condition': 'on_failure',
'delay': '5s',
'max_attempts': 3,
'window': '120s',
},
'placement': {
'constraints': ['node=foo'],
},
},
'healthcheck': {
'test': 'cat /etc/passwd',
'interval': '10s',
'timeout': '1s',
'retries': 5,
},
'volumes': [{
'read_only': True,
'source': '/host/path',
'target': '/container/path',
'type': 'bind'
}, {
'source': 'foobar', 'target': '/container/volumepath', 'type': 'volume'
}, {
'target': '/anonymous', 'type': 'volume'
}, {
'source': 'foobar',
'target': '/container/volumepath2',
'type': 'volume',
'volume': {'nocopy': True}
}],
'stop_grace_period': '20s',
},
},
}
def test_ps(self):
self.project.get_service('simple').create_container()
result = self.dispatch(['ps'])
assert 'simplecomposefile_simple_1' in result.stdout
def test_ps_default_composefile(self):
self.base_dir = 'tests/fixtures/multiple-composefiles'
self.dispatch(['up', '-d'])
result = self.dispatch(['ps'])
self.assertIn('multiplecomposefiles_simple_1', result.stdout)
self.assertIn('multiplecomposefiles_another_1', result.stdout)
self.assertNotIn('multiplecomposefiles_yetanother_1', result.stdout)
def test_ps_alternate_composefile(self):
config_path = os.path.abspath(
'tests/fixtures/multiple-composefiles/compose2.yml')
self._project = get_project(self.base_dir, [config_path])
self.base_dir = 'tests/fixtures/multiple-composefiles'
self.dispatch(['-f', 'compose2.yml', 'up', '-d'])
result = self.dispatch(['-f', 'compose2.yml', 'ps'])
self.assertNotIn('multiplecomposefiles_simple_1', result.stdout)
self.assertNotIn('multiplecomposefiles_another_1', result.stdout)
self.assertIn('multiplecomposefiles_yetanother_1', result.stdout)
def test_pull(self):
result = self.dispatch(['pull'])
assert sorted(result.stderr.split('\n'))[1:] == [
'Pulling another (busybox:latest)...',
'Pulling simple (busybox:latest)...',
]
def test_pull_with_digest(self):
result = self.dispatch(['-f', 'digest.yml', 'pull'])
assert 'Pulling simple (busybox:latest)...' in result.stderr
assert ('Pulling digest (busybox@'
'sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b520'
'04ee8502d)...') in result.stderr
def test_pull_with_ignore_pull_failures(self):
result = self.dispatch([
'-f', 'ignore-pull-failures.yml',
'pull', '--ignore-pull-failures']
)
assert 'Pulling simple (busybox:latest)...' in result.stderr
assert 'Pulling another (nonexisting-image:latest)...' in result.stderr
assert ('repository nonexisting-image not found' in result.stderr or
'image library/nonexisting-image:latest not found' in result.stderr or
'pull access denied for nonexisting-image' in result.stderr)
def test_pull_with_quiet(self):
assert self.dispatch(['pull', '--quiet']).stderr == ''
assert self.dispatch(['pull', '--quiet']).stdout == ''
def test_pull_with_parallel_failure(self):
result = self.dispatch([
'-f', 'ignore-pull-failures.yml', 'pull', '--parallel'],
returncode=1
)
self.assertRegexpMatches(result.stderr, re.compile('^Pulling simple', re.MULTILINE))
self.assertRegexpMatches(result.stderr, re.compile('^Pulling another', re.MULTILINE))
self.assertRegexpMatches(result.stderr,
re.compile('^ERROR: for another .*does not exist.*', re.MULTILINE))
self.assertRegexpMatches(result.stderr,
re.compile('''^(ERROR: )?(b')?.* nonexisting-image''',
re.MULTILINE))
def test_build_plain(self):
self.base_dir = 'tests/fixtures/simple-dockerfile'
self.dispatch(['build', 'simple'])
result = self.dispatch(['build', 'simple'])
assert BUILD_PULL_TEXT not in result.stdout
def test_build_no_cache(self):
self.base_dir = 'tests/fixtures/simple-dockerfile'
self.dispatch(['build', 'simple'])
result = self.dispatch(['build', '--no-cache', 'simple'])
assert BUILD_CACHE_TEXT not in result.stdout
assert BUILD_PULL_TEXT not in result.stdout
def test_build_pull(self):
# Make sure we have the latest busybox already
pull_busybox(self.client)
self.base_dir = 'tests/fixtures/simple-dockerfile'
self.dispatch(['build', 'simple'], None)
result = self.dispatch(['build', '--pull', 'simple'])
if not is_cluster(self.client):
# If previous build happened on another node, cache won't be available
assert BUILD_CACHE_TEXT in result.stdout
assert BUILD_PULL_TEXT in result.stdout
def test_build_no_cache_pull(self):
# Make sure we have the latest busybox already
pull_busybox(self.client)
self.base_dir = 'tests/fixtures/simple-dockerfile'
self.dispatch(['build', 'simple'])
result = self.dispatch(['build', '--no-cache', '--pull', 'simple'])
assert BUILD_CACHE_TEXT not in result.stdout
assert BUILD_PULL_TEXT in result.stdout
@pytest.mark.xfail(reason='17.10.0 RC bug remove after GA https://github.com/moby/moby/issues/35116')
def test_build_failed(self):
self.base_dir = 'tests/fixtures/simple-failing-dockerfile'
self.dispatch(['build', 'simple'], returncode=1)
labels = ["com.docker.compose.test_failing_image=true"]
containers = [
Container.from_ps(self.project.client, c)
for c in self.project.client.containers(
all=True,
filters={"label": labels})
]
assert len(containers) == 1
@pytest.mark.xfail(reason='17.10.0 RC bug remove after GA https://github.com/moby/moby/issues/35116')
def test_build_failed_forcerm(self):
self.base_dir = 'tests/fixtures/simple-failing-dockerfile'
self.dispatch(['build', '--force-rm', 'simple'], returncode=1)
labels = ["com.docker.compose.test_failing_image=true"]
containers = [
Container.from_ps(self.project.client, c)
for c in self.project.client.containers(
all=True,
filters={"label": labels})
]
assert not containers
def test_build_shm_size_build_option(self):
pull_busybox(self.client)
self.base_dir = 'tests/fixtures/build-shm-size'
result = self.dispatch(['build', '--no-cache'], None)
assert 'shm_size: 96' in result.stdout
def test_build_memory_build_option(self):
pull_busybox(self.client)
self.base_dir = 'tests/fixtures/build-memory'
result = self.dispatch(['build', '--no-cache', '--memory', '96m', 'service'], None)
assert 'memory: 100663296' in result.stdout # 96 * 1024 * 1024
def test_bundle_with_digests(self):
self.base_dir = 'tests/fixtures/bundle-with-digests/'
tmpdir = pytest.ensuretemp('cli_test_bundle')
self.addCleanup(tmpdir.remove)
filename = str(tmpdir.join('example.dab'))
self.dispatch(['bundle', '--output', filename])
with open(filename, 'r') as fh:
bundle = json.load(fh)
assert bundle == {
'Version': '0.1',
'Services': {
'web': {
'Image': ('dockercloud/hello-world@sha256:fe79a2cfbd17eefc3'
'44fb8419420808df95a1e22d93b7f621a7399fd1e9dca1d'),
'Networks': ['default'],
},
'redis': {
'Image': ('redis@sha256:a84cb8f53a70e19f61ff2e1d5e73fb7ae62d'
'374b2b7392de1e7d77be26ef8f7b'),
'Networks': ['default'],
}
},
}
def test_build_override_dir(self):
self.base_dir = 'tests/fixtures/build-path-override-dir'
self.override_dir = os.path.abspath('tests/fixtures')
result = self.dispatch([
'--project-directory', self.override_dir,
'build'])
assert 'Successfully built' in result.stdout
def test_build_override_dir_invalid_path(self):
config_path = os.path.abspath('tests/fixtures/build-path-override-dir/docker-compose.yml')
result = self.dispatch([
'-f', config_path,
'build'], returncode=1)
assert 'does not exist, is not accessible, or is not a valid URL' in result.stderr
def test_create(self):
self.dispatch(['create'])
service = self.project.get_service('simple')
another = self.project.get_service('another')
service_containers = service.containers(stopped=True)
another_containers = another.containers(stopped=True)
assert len(service_containers) == 1
assert len(another_containers) == 1
assert not service_containers[0].is_running
assert not another_containers[0].is_running
def test_create_with_force_recreate(self):
self.dispatch(['create'], None)
service = self.project.get_service('simple')
service_containers = service.containers(stopped=True)
assert len(service_containers) == 1
assert not service_containers[0].is_running
old_ids = [c.id for c in service.containers(stopped=True)]
self.dispatch(['create', '--force-recreate'], None)
service_containers = service.containers(stopped=True)
assert len(service_containers) == 1
assert not service_containers[0].is_running
new_ids = [c.id for c in service_containers]
assert old_ids != new_ids
def test_create_with_no_recreate(self):
self.dispatch(['create'], None)
service = self.project.get_service('simple')
service_containers = service.containers(stopped=True)
assert len(service_containers) == 1
assert not service_containers[0].is_running
old_ids = [c.id for c in service.containers(stopped=True)]
self.dispatch(['create', '--no-recreate'], None)
service_containers = service.containers(stopped=True)
assert len(service_containers) == 1
assert not service_containers[0].is_running
new_ids = [c.id for c in service_containers]
assert old_ids == new_ids
def test_run_one_off_with_volume(self):
self.base_dir = 'tests/fixtures/simple-composefile-volume-ready'
volume_path = os.path.abspath(os.path.join(os.getcwd(), self.base_dir, 'files'))
node = create_host_file(self.client, os.path.join(volume_path, 'example.txt'))
self.dispatch([
'run',
'-v', '{}:/data'.format(volume_path),
'-e', 'constraint:node=={}'.format(node if node is not None else '*'),
'simple',
'test', '-f', '/data/example.txt'
], returncode=0)
service = self.project.get_service('simple')
container_data = service.containers(one_off=OneOffFilter.only, stopped=True)[0]
mount = container_data.get('Mounts')[0]
assert mount['Source'] == volume_path
assert mount['Destination'] == '/data'
assert mount['Type'] == 'bind'
def test_run_one_off_with_multiple_volumes(self):
self.base_dir = 'tests/fixtures/simple-composefile-volume-ready'
volume_path = os.path.abspath(os.path.join(os.getcwd(), self.base_dir, 'files'))
node = create_host_file(self.client, os.path.join(volume_path, 'example.txt'))
self.dispatch([
'run',
'-v', '{}:/data'.format(volume_path),
'-v', '{}:/data1'.format(volume_path),
'-e', 'constraint:node=={}'.format(node if node is not None else '*'),
'simple',
'test', '-f', '/data/example.txt'
], returncode=0)
self.dispatch([
'run',
'-v', '{}:/data'.format(volume_path),
'-v', '{}:/data1'.format(volume_path),
'-e', 'constraint:node=={}'.format(node if node is not None else '*'),
'simple',
'test', '-f' '/data1/example.txt'
], returncode=0)
def test_run_one_off_with_volume_merge(self):
self.base_dir = 'tests/fixtures/simple-composefile-volume-ready'
volume_path = os.path.abspath(os.path.join(os.getcwd(), self.base_dir, 'files'))
node = create_host_file(self.client, os.path.join(volume_path, 'example.txt'))
self.dispatch([
'-f', 'docker-compose.merge.yml',
'run',
'-v', '{}:/data'.format(volume_path),
'-e', 'constraint:node=={}'.format(node if node is not None else '*'),
'simple',
'test', '-f', '/data/example.txt'
], returncode=0)
service = self.project.get_service('simple')
container_data = service.containers(one_off=OneOffFilter.only, stopped=True)[0]
mounts = container_data.get('Mounts')
assert len(mounts) == 2
config_mount = [m for m in mounts if m['Destination'] == '/data1'][0]
override_mount = [m for m in mounts if m['Destination'] == '/data'][0]
assert config_mount['Type'] == 'volume'
assert override_mount['Source'] == volume_path
assert override_mount['Type'] == 'bind'
def test_create_with_force_recreate_and_no_recreate(self):
self.dispatch(
['create', '--force-recreate', '--no-recreate'],
returncode=1)
def test_down_invalid_rmi_flag(self):
result = self.dispatch(['down', '--rmi', 'bogus'], returncode=1)
assert '--rmi flag must be' in result.stderr
@v2_only()
def test_down(self):
self.base_dir = 'tests/fixtures/v2-full'
self.dispatch(['up', '-d'])
wait_on_condition(ContainerCountCondition(self.project, 2))
self.dispatch(['run', 'web', 'true'])
self.dispatch(['run', '-d', 'web', 'tail', '-f', '/dev/null'])
assert len(self.project.containers(one_off=OneOffFilter.only, stopped=True)) == 2
result = self.dispatch(['down', '--rmi=local', '--volumes'])
assert 'Stopping v2full_web_1' in result.stderr
assert 'Stopping v2full_other_1' in result.stderr
assert 'Stopping v2full_web_run_2' in result.stderr
assert 'Removing v2full_web_1' in result.stderr
assert 'Removing v2full_other_1' in result.stderr
assert 'Removing v2full_web_run_1' in result.stderr
assert 'Removing v2full_web_run_2' in result.stderr
assert 'Removing volume v2full_data' in result.stderr
assert 'Removing image v2full_web' in result.stderr
assert 'Removing image busybox' not in result.stderr
assert 'Removing network v2full_default' in result.stderr
assert 'Removing network v2full_front' in result.stderr
def test_down_timeout(self):
self.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
self.assertTrue(service.containers()[0].is_running)
""
self.dispatch(['down', '-t', '1'], None)
self.assertEqual(len(service.containers(stopped=True)), 0)
def test_down_signal(self):
self.base_dir = 'tests/fixtures/stop-signal-composefile'
self.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
self.assertTrue(service.containers()[0].is_running)
self.dispatch(['down', '-t', '1'], None)
self.assertEqual(len(service.containers(stopped=True)), 0)
def test_up_detached(self):
self.dispatch(['up', '-d'])
service = self.project.get_service('simple')
another = self.project.get_service('another')
self.assertEqual(len(service.containers()), 1)
self.assertEqual(len(another.containers()), 1)
# Ensure containers don't have stdin and stdout connected in -d mode
container, = service.containers()
self.assertFalse(container.get('Config.AttachStderr'))
self.assertFalse(container.get('Config.AttachStdout'))
self.assertFalse(container.get('Config.AttachStdin'))
def test_up_attached(self):
self.base_dir = 'tests/fixtures/echo-services'
result = self.dispatch(['up', '--no-color'])
assert 'simple_1 | simple' in result.stdout
assert 'another_1 | another' in result.stdout
assert 'simple_1 exited with code 0' in result.stdout
assert 'another_1 exited with code 0' in result.stdout
@v2_only()
def test_up(self):
self.base_dir = 'tests/fixtures/v2-simple'
self.dispatch(['up', '-d'], None)
services = self.project.get_services()
network_name = self.project.networks.networks['default'].full_name
networks = self.client.networks(names=[network_name])
self.assertEqual(len(networks), 1)
assert networks[0]['Driver'] == 'bridge' if not is_cluster(self.client) else 'overlay'
assert 'com.docker.network.bridge.enable_icc' not in networks[0]['Options']
network = self.client.inspect_network(networks[0]['Id'])
for service in services:
containers = service.containers()
self.assertEqual(len(containers), 1)
container = containers[0]
self.assertIn(container.id, network['Containers'])
networks = container.get('NetworkSettings.Networks')
self.assertEqual(list(networks), [network['Name']])
self.assertEqual(
sorted(networks[network['Name']]['Aliases']),
sorted([service.name, container.short_id]))
for service in services:
assert self.lookup(container, service.name)
@v2_only()
def test_up_no_start(self):
self.base_dir = 'tests/fixtures/v2-full'
self.dispatch(['up', '--no-start'], None)
services = self.project.get_services()
default_network = self.project.networks.networks['default'].full_name
front_network = self.project.networks.networks['front'].full_name
networks = self.client.networks(names=[default_network, front_network])
assert len(networks) == 2
for service in services:
containers = service.containers(stopped=True)
assert len(containers) == 1
container = containers[0]
assert not container.is_running
assert container.get('State.Status') == 'created'
volumes = self.project.volumes.volumes
assert 'data' in volumes
volume = volumes['data']
# The code below is a Swarm-compatible equivalent to volume.exists()
remote_volumes = [
v for v in self.client.volumes().get('Volumes', [])
if v['Name'].split('/')[-1] == volume.full_name
]
assert len(remote_volumes) > 0
@v2_only()
def test_up_no_ansi(self):
self.base_dir = 'tests/fixtures/v2-simple'
result = self.dispatch(['--no-ansi', 'up', '-d'], None)
assert "%c[2K\r" % 27 not in result.stderr
assert "%c[1A" % 27 not in result.stderr
assert "%c[1B" % 27 not in result.stderr
@v2_only()
def test_up_with_default_network_config(self):
filename = 'default-network-config.yml'
self.base_dir = 'tests/fixtures/networks'
self._project = get_project(self.base_dir, [filename])
self.dispatch(['-f', filename, 'up', '-d'], None)
network_name = self.project.networks.networks['default'].full_name
networks = self.client.networks(names=[network_name])
assert networks[0]['Options']['com.docker.network.bridge.enable_icc'] == 'false'
@v2_only()
def test_up_with_network_aliases(self):
filename = 'network-aliases.yml'
self.base_dir = 'tests/fixtures/networks'
self.dispatch(['-f', filename, 'up', '-d'], None)
back_name = '{}_back'.format(self.project.name)
front_name = '{}_front'.format(self.project.name)
networks = [
n for n in self.client.networks()
if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
]
# Two networks were created: back and front
assert sorted(n['Name'].split('/')[-1] for n in networks) == [back_name, front_name]
web_container = self.project.get_service('web').containers()[0]
back_aliases = web_container.get(
'NetworkSettings.Networks.{}.Aliases'.format(back_name)
)
assert 'web' in back_aliases
front_aliases = web_container.get(
'NetworkSettings.Networks.{}.Aliases'.format(front_name)
)
assert 'web' in front_aliases
assert 'forward_facing' in front_aliases
assert 'ahead' in front_aliases
@v2_only()
def test_up_with_network_internal(self):
self.require_api_version('1.23')
filename = 'network-internal.yml'
self.base_dir = 'tests/fixtures/networks'
self.dispatch(['-f', filename, 'up', '-d'], None)
internal_net = '{}_internal'.format(self.project.name)
networks = [
n for n in self.client.networks()
if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
]
# One network was created: internal
assert sorted(n['Name'].split('/')[-1] for n in networks) == [internal_net]
assert networks[0]['Internal'] is True
@v2_only()
def test_up_with_network_static_addresses(self):
filename = 'network-static-addresses.yml'
ipv4_address = '172.16.100.100'
ipv6_address = 'fe80::1001:100'
self.base_dir = 'tests/fixtures/networks'
self.dispatch(['-f', filename, 'up', '-d'], None)
static_net = '{}_static_test'.format(self.project.name)
networks = [
n for n in self.client.networks()
if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
]
# One networks was created: front
assert sorted(n['Name'].split('/')[-1] for n in networks) == [static_net]
web_container = self.project.get_service('web').containers()[0]
ipam_config = web_container.get(
'NetworkSettings.Networks.{}.IPAMConfig'.format(static_net)
)
assert ipv4_address in ipam_config.values()
assert ipv6_address in ipam_config.values()
@v2_only()
def test_up_with_networks(self):
self.base_dir = 'tests/fixtures/networks'
self.dispatch(['up', '-d'], None)
back_name = '{}_back'.format(self.project.name)
front_name = '{}_front'.format(self.project.name)
networks = [
n for n in self.client.networks()
if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
]
# Two networks were created: back and front
assert sorted(n['Name'].split('/')[-1] for n in networks) == [back_name, front_name]
# lookup by ID instead of name in case of duplicates
back_network = self.client.inspect_network(
[n for n in networks if n['Name'] == back_name][0]['Id']
)
front_network = self.client.inspect_network(
[n for n in networks if n['Name'] == front_name][0]['Id']
)
web_container = self.project.get_service('web').containers()[0]
app_container = self.project.get_service('app').containers()[0]
db_container = self.project.get_service('db').containers()[0]
for net_name in [front_name, back_name]:
links = app_container.get('NetworkSettings.Networks.{}.Links'.format(net_name))
assert '{}:database'.format(db_container.name) in links
# db and app joined the back network
assert sorted(back_network['Containers']) == sorted([db_container.id, app_container.id])
# web and app joined the front network
assert sorted(front_network['Containers']) == sorted([web_container.id, app_container.id])
# web can see app but not db
assert self.lookup(web_container, "app")
assert not self.lookup(web_container, "db")
# app can see db
assert self.lookup(app_container, "db")
# app has aliased db to "database"
assert self.lookup(app_container, "database")
@v2_only()
def test_up_missing_network(self):
self.base_dir = 'tests/fixtures/networks'
result = self.dispatch(
['-f', 'missing-network.yml', 'up', '-d'],
returncode=1)
assert 'Service "web" uses an undefined network "foo"' in result.stderr
@v2_only()
@no_cluster('container networks not supported in Swarm')
def test_up_with_network_mode(self):
c = self.client.create_container(
'busybox', 'top', name='composetest_network_mode_container',
host_config={}
)
self.addCleanup(self.client.remove_container, c, force=True)
self.client.start(c)
container_mode_source = 'container:{}'.format(c['Id'])
filename = 'network-mode.yml'
self.base_dir = 'tests/fixtures/networks'
self._project = get_project(self.base_dir, [filename])
self.dispatch(['-f', filename, 'up', '-d'], None)
networks = [
n for n in self.client.networks()
if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
]
assert not networks
for name in ['bridge', 'host', 'none']:
container = self.project.get_service(name).containers()[0]
assert list(container.get('NetworkSettings.Networks')) == [name]
assert container.get('HostConfig.NetworkMode') == name
service_mode_source = 'container:{}'.format(
self.project.get_service('bridge').containers()[0].id)
service_mode_container = self.project.get_service('service').containers()[0]
assert not service_mode_container.get('NetworkSettings.Networks')
assert service_mode_container.get('HostConfig.NetworkMode') == service_mode_source
container_mode_container = self.project.get_service('container').containers()[0]
assert not container_mode_container.get('NetworkSettings.Networks')
assert container_mode_container.get('HostConfig.NetworkMode') == container_mode_source
@v2_only()
def test_up_external_networks(self):
filename = 'external-networks.yml'
self.base_dir = 'tests/fixtures/networks'
self._project = get_project(self.base_dir, [filename])
result = self.dispatch(['-f', filename, 'up', '-d'], returncode=1)
assert 'declared as external, but could not be found' in result.stderr
networks = [
n['Name'] for n in self.client.networks()
if n['Name'].startswith('{}_'.format(self.project.name))
]
assert not networks
network_names = ['{}_{}'.format(self.project.name, n) for n in ['foo', 'bar']]
for name in network_names:
self.client.create_network(name, attachable=True)
self.dispatch(['-f', filename, 'up', '-d'])
container = self.project.containers()[0]
assert sorted(list(container.get('NetworkSettings.Networks'))) == sorted(network_names)
@v2_only()
def test_up_with_external_default_network(self):
filename = 'external-default.yml'
self.base_dir = 'tests/fixtures/networks'
self._project = get_project(self.base_dir, [filename])
result = self.dispatch(['-f', filename, 'up', '-d'], returncode=1)
assert 'declared as external, but could not be found' in result.stderr
networks = [
n['Name'] for n in self.client.networks()
if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
]
assert not networks
network_name = 'composetest_external_network'
self.client.create_network(network_name, attachable=True)
self.dispatch(['-f', filename, 'up', '-d'])
container = self.project.containers()[0]
assert list(container.get('NetworkSettings.Networks')) == [network_name]
@v2_1_only()
def test_up_with_network_labels(self):
filename = 'network-label.yml'
self.base_dir = 'tests/fixtures/networks'
self._project = get_project(self.base_dir, [filename])
self.dispatch(['-f', filename, 'up', '-d'], returncode=0)
network_with_label = '{}_network_with_label'.format(self.project.name)
networks = [
n for n in self.client.networks()
if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
]
assert [n['Name'].split('/')[-1] for n in networks] == [network_with_label]
assert 'label_key' in networks[0]['Labels']
assert networks[0]['Labels']['label_key'] == 'label_val'
@v2_1_only()
def test_up_with_volume_labels(self):
filename = 'volume-label.yml'
self.base_dir = 'tests/fixtures/volumes'
self._project = get_project(self.base_dir, [filename])
self.dispatch(['-f', filename, 'up', '-d'], returncode=0)
volume_with_label = '{}_volume_with_label'.format(self.project.name)
volumes = [
v for v in self.client.volumes().get('Volumes', [])
if v['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
]
assert set([v['Name'].split('/')[-1] for v in volumes]) == set([volume_with_label])
assert 'label_key' in volumes[0]['Labels']
assert volumes[0]['Labels']['label_key'] == 'label_val'
@v2_only()
def test_up_no_services(self):
self.base_dir = 'tests/fixtures/no-services'
self.dispatch(['up', '-d'], None)
network_names = [
n['Name'] for n in self.client.networks()
if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
]
assert network_names == []
def test_up_with_links_v1(self):
self.base_dir = 'tests/fixtures/links-composefile'
self.dispatch(['up', '-d', 'web'], None)
# No network was created
network_name = self.project.networks.networks['default'].full_name
networks = self.client.networks(names=[network_name])
assert networks == []
web = self.project.get_service('web')
db = self.project.get_service('db')
console = self.project.get_service('console')
# console was not started
self.assertEqual(len(web.containers()), 1)
self.assertEqual(len(db.containers()), 1)
self.assertEqual(len(console.containers()), 0)
# web has links
web_container = web.containers()[0]
self.assertTrue(web_container.get('HostConfig.Links'))
def test_up_with_net_is_invalid(self):
self.base_dir = 'tests/fixtures/net-container'
result = self.dispatch(
['-f', 'v2-invalid.yml', 'up', '-d'],
returncode=1)
assert "Unsupported config option for services.bar: 'net'" in result.stderr
@no_cluster("Legacy networking not supported on Swarm")
def test_up_with_net_v1(self):
self.base_dir = 'tests/fixtures/net-container'
self.dispatch(['up', '-d'], None)
bar = self.project.get_service('bar')
bar_container = bar.containers()[0]
foo = self.project.get_service('foo')
foo_container = foo.containers()[0]
assert foo_container.get('HostConfig.NetworkMode') == \
'container:{}'.format(bar_container.id)
@v3_only()
def test_up_with_healthcheck(self):
def wait_on_health_status(container, status):
def condition():
container.inspect()
return container.get('State.Health.Status') == status
return wait_on_condition(condition, delay=0.5)
self.base_dir = 'tests/fixtures/healthcheck'
self.dispatch(['up', '-d'], None)
passes = self.project.get_service('passes')
passes_container = passes.containers()[0]
assert passes_container.get('Config.Healthcheck') == {
"Test": ["CMD-SHELL", "/bin/true"],
"Interval": nanoseconds_from_time_seconds(1),
"Timeout": nanoseconds_from_time_seconds(30 * 60),
"Retries": 1,
}
wait_on_health_status(passes_container, 'healthy')
fails = self.project.get_service('fails')
fails_container = fails.containers()[0]
assert fails_container.get('Config.Healthcheck') == {
"Test": ["CMD", "/bin/false"],
"Interval": nanoseconds_from_time_seconds(2.5),
"Retries": 2,
}
wait_on_health_status(fails_container, 'unhealthy')
disabled = self.project.get_service('disabled')
disabled_container = disabled.containers()[0]
assert disabled_container.get('Config.Healthcheck') == {
"Test": ["NONE"],
}
assert 'Health' not in disabled_container.get('State')
def test_up_with_no_deps(self):
self.base_dir = 'tests/fixtures/links-composefile'
self.dispatch(['up', '-d', '--no-deps', 'web'], None)
web = self.project.get_service('web')
db = self.project.get_service('db')
console = self.project.get_service('console')
self.assertEqual(len(web.containers()), 1)
self.assertEqual(len(db.containers()), 0)
self.assertEqual(len(console.containers()), 0)
def test_up_with_force_recreate(self):
self.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
old_ids = [c.id for c in service.containers()]
self.dispatch(['up', '-d', '--force-recreate'], None)
self.assertEqual(len(service.containers()), 1)
new_ids = [c.id for c in service.containers()]
self.assertNotEqual(old_ids, new_ids)
def test_up_with_no_recreate(self):
self.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
old_ids = [c.id for c in service.containers()]
self.dispatch(['up', '-d', '--no-recreate'], None)
self.assertEqual(len(service.containers()), 1)
new_ids = [c.id for c in service.containers()]
self.assertEqual(old_ids, new_ids)
def test_up_with_force_recreate_and_no_recreate(self):
self.dispatch(
['up', '-d', '--force-recreate', '--no-recreate'],
returncode=1)
def test_up_with_timeout_detached(self):
result = self.dispatch(['up', '-d', '-t', '1'], returncode=1)
assert "-d and --timeout cannot be combined." in result.stderr
def test_up_handles_sigint(self):
proc = start_process(self.base_dir, ['up', '-t', '2'])
wait_on_condition(ContainerCountCondition(self.project, 2))
os.kill(proc.pid, signal.SIGINT)
wait_on_condition(ContainerCountCondition(self.project, 0))
def test_up_handles_sigterm(self):
proc = start_process(self.base_dir, ['up', '-t', '2'])
wait_on_condition(ContainerCountCondition(self.project, 2))
os.kill(proc.pid, signal.SIGTERM)
wait_on_condition(ContainerCountCondition(self.project, 0))
@v2_only()
def test_up_handles_force_shutdown(self):
self.base_dir = 'tests/fixtures/sleeps-composefile'
proc = start_process(self.base_dir, ['up', '-t', '200'])
wait_on_condition(ContainerCountCondition(self.project, 2))
os.kill(proc.pid, signal.SIGTERM)
time.sleep(0.1)
os.kill(proc.pid, signal.SIGTERM)
wait_on_condition(ContainerCountCondition(self.project, 0))
def test_up_handles_abort_on_container_exit(self):
self.base_dir = 'tests/fixtures/abort-on-container-exit-0'
proc = start_process(self.base_dir, ['up', '--abort-on-container-exit'])
wait_on_condition(ContainerCountCondition(self.project, 0))
proc.wait()
self.assertEqual(proc.returncode, 0)
def test_up_handles_abort_on_container_exit_code(self):
self.base_dir = 'tests/fixtures/abort-on-container-exit-1'
proc = start_process(self.base_dir, ['up', '--abort-on-container-exit'])
wait_on_condition(ContainerCountCondition(self.project, 0))
proc.wait()
self.assertEqual(proc.returncode, 1)
@v2_only()
@no_cluster('Container PID mode does not work across clusters')
def test_up_with_pid_mode(self):
c = self.client.create_container(
'busybox', 'top', name='composetest_pid_mode_container',
host_config={}
)
self.addCleanup(self.client.remove_container, c, force=True)
self.client.start(c)
container_mode_source = 'container:{}'.format(c['Id'])
self.base_dir = 'tests/fixtures/pid-mode'
self.dispatch(['up', '-d'], None)
service_mode_source = 'container:{}'.format(
self.project.get_service('container').containers()[0].id)
service_mode_container = self.project.get_service('service').containers()[0]
assert service_mode_container.get('HostConfig.PidMode') == service_mode_source
container_mode_container = self.project.get_service('container').containers()[0]
assert container_mode_container.get('HostConfig.PidMode') == container_mode_source
host_mode_container = self.project.get_service('host').containers()[0]
assert host_mode_container.get('HostConfig.PidMode') == 'host'
def test_exec_without_tty(self):
self.base_dir = 'tests/fixtures/links-composefile'
self.dispatch(['up', '-d', 'console'])
self.assertEqual(len(self.project.containers()), 1)
stdout, stderr = self.dispatch(['exec', '-T', 'console', 'ls', '-1d', '/'])
self.assertEqual(stderr, "")
self.assertEqual(stdout, "/\n")
def test_exec_custom_user(self):
self.base_dir = 'tests/fixtures/links-composefile'
self.dispatch(['up', '-d', 'console'])
self.assertEqual(len(self.project.containers()), 1)
stdout, stderr = self.dispatch(['exec', '-T', '--user=operator', 'console', 'whoami'])
self.assertEqual(stdout, "operator\n")
self.assertEqual(stderr, "")
@v2_2_only()
def test_exec_service_with_environment_overridden(self):
name = 'service'
self.base_dir = 'tests/fixtures/environment-exec'
self.dispatch(['up', '-d'])
self.assertEqual(len(self.project.containers()), 1)
stdout, stderr = self.dispatch([
'exec',
'-T',
'-e', 'foo=notbar',
'--env', 'alpha=beta',
name,
'env',
])
# env overridden
assert 'foo=notbar' in stdout
# keep environment from yaml
assert 'hello=world' in stdout
# added option from command line
assert 'alpha=beta' in stdout
self.assertEqual(stderr, '')
def test_run_service_without_links(self):
self.base_dir = 'tests/fixtures/links-composefile'
self.dispatch(['run', 'console', '/bin/true'])
self.assertEqual(len(self.project.containers()), 0)
# Ensure stdin/out was open
container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
config = container.inspect()['Config']
self.assertTrue(config['AttachStderr'])
self.assertTrue(config['AttachStdout'])
self.assertTrue(config['AttachStdin'])
def test_run_service_with_links(self):
self.base_dir = 'tests/fixtures/links-composefile'
self.dispatch(['run', 'web', '/bin/true'], None)
db = self.project.get_service('db')
console = self.project.get_service('console')
self.assertEqual(len(db.containers()), 1)
self.assertEqual(len(console.containers()), 0)
@v2_only()
def test_run_service_with_dependencies(self):
self.base_dir = 'tests/fixtures/v2-dependencies'
self.dispatch(['run', 'web', '/bin/true'], None)
db = self.project.get_service('db')
console = self.project.get_service('console')
self.assertEqual(len(db.containers()), 1)
self.assertEqual(len(console.containers()), 0)
def test_run_service_with_scaled_dependencies(self):
self.base_dir = 'tests/fixtures/v2-dependencies'
self.dispatch(['up', '-d', '--scale', 'db=2', '--scale', 'console=0'])
db = self.project.get_service('db')
console = self.project.get_service('console')
assert len(db.containers()) == 2
assert len(console.containers()) == 0
self.dispatch(['run', 'web', '/bin/true'], None)
assert len(db.containers()) == 2
assert len(console.containers()) == 0
def test_run_with_no_deps(self):
self.base_dir = 'tests/fixtures/links-composefile'
self.dispatch(['run', '--no-deps', 'web', '/bin/true'])
db = self.project.get_service('db')
self.assertEqual(len(db.containers()), 0)
def test_run_does_not_recreate_linked_containers(self):
self.base_dir = 'tests/fixtures/links-composefile'
self.dispatch(['up', '-d', 'db'])
db = self.project.get_service('db')
self.assertEqual(len(db.containers()), 1)
old_ids = [c.id for c in db.containers()]
self.dispatch(['run', 'web', '/bin/true'], None)
self.assertEqual(len(db.containers()), 1)
new_ids = [c.id for c in db.containers()]
self.assertEqual(old_ids, new_ids)
def test_run_without_command(self):
self.base_dir = 'tests/fixtures/commands-composefile'
self.check_build('tests/fixtures/simple-dockerfile', tag='composetest_test')
self.dispatch(['run', 'implicit'])
service = self.project.get_service('implicit')
containers = service.containers(stopped=True, one_off=OneOffFilter.only)
self.assertEqual(
[c.human_readable_command for c in containers],
[u'/bin/sh -c echo "success"'],
)
self.dispatch(['run', 'explicit'])
service = self.project.get_service('explicit')
containers = service.containers(stopped=True, one_off=OneOffFilter.only)
self.assertEqual(
[c.human_readable_command for c in containers],
[u'/bin/true'],
)
@pytest.mark.skipif(SWARM_SKIP_RM_VOLUMES, reason='Swarm DELETE /containers/<id> bug')
def test_run_rm(self):
self.base_dir = 'tests/fixtures/volume'
proc = start_process(self.base_dir, ['run', '--rm', 'test'])
wait_on_condition(ContainerStateCondition(
self.project.client,
'volume_test_run_1',
'running'))
service = self.project.get_service('test')
containers = service.containers(one_off=OneOffFilter.only)
self.assertEqual(len(containers), 1)
mounts = containers[0].get('Mounts')
for mount in mounts:
if mount['Destination'] == '/container-path':
anonymous_name = mount['Name']
break
os.kill(proc.pid, signal.SIGINT)
wait_on_process(proc, 1)
self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 0)
volumes = self.client.volumes()['Volumes']
assert volumes is not None
for volume in service.options.get('volumes'):
if volume.internal == '/container-named-path':
name = volume.external
break
volume_names = [v['Name'].split('/')[-1] for v in volumes]
assert name in volume_names
assert anonymous_name not in volume_names
def test_run_service_with_dockerfile_entrypoint(self):
self.base_dir = 'tests/fixtures/entrypoint-dockerfile'
self.dispatch(['run', 'test'])
container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
assert container.get('Config.Entrypoint') == ['printf']
assert container.get('Config.Cmd') == ['default', 'args']
def test_run_service_with_dockerfile_entrypoint_overridden(self):
self.base_dir = 'tests/fixtures/entrypoint-dockerfile'
self.dispatch(['run', '--entrypoint', 'echo', 'test'])
container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
assert container.get('Config.Entrypoint') == ['echo']
assert not container.get('Config.Cmd')
def test_run_service_with_dockerfile_entrypoint_and_command_overridden(self):
self.base_dir = 'tests/fixtures/entrypoint-dockerfile'
self.dispatch(['run', '--entrypoint', 'echo', 'test', 'foo'])
container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
assert container.get('Config.Entrypoint') == ['echo']
assert container.get('Config.Cmd') == ['foo']
def test_run_service_with_compose_file_entrypoint(self):
self.base_dir = 'tests/fixtures/entrypoint-composefile'
self.dispatch(['run', 'test'])
container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
assert container.get('Config.Entrypoint') == ['printf']
assert container.get('Config.Cmd') == ['default', 'args']
def test_run_service_with_compose_file_entrypoint_overridden(self):
self.base_dir = 'tests/fixtures/entrypoint-composefile'
self.dispatch(['run', '--entrypoint', 'echo', 'test'])
container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
assert container.get('Config.Entrypoint') == ['echo']
assert not container.get('Config.Cmd')
def test_run_service_with_compose_file_entrypoint_and_command_overridden(self):
self.base_dir = 'tests/fixtures/entrypoint-composefile'
self.dispatch(['run', '--entrypoint', 'echo', 'test', 'foo'])
container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
assert container.get('Config.Entrypoint') == ['echo']
assert container.get('Config.Cmd') == ['foo']
def test_run_service_with_compose_file_entrypoint_and_empty_string_command(self):
self.base_dir = 'tests/fixtures/entrypoint-composefile'
self.dispatch(['run', '--entrypoint', 'echo', 'test', ''])
container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
assert container.get('Config.Entrypoint') == ['echo']
assert container.get('Config.Cmd') == ['']
def test_run_service_with_user_overridden(self):
self.base_dir = 'tests/fixtures/user-composefile'
name = 'service'
user = 'sshd'
self.dispatch(['run', '--user={user}'.format(user=user), name], returncode=1)
service = self.project.get_service(name)
container = service.containers(stopped=True, one_off=OneOffFilter.only)[0]
self.assertEqual(user, container.get('Config.User'))
def test_run_service_with_user_overridden_short_form(self):
self.base_dir = 'tests/fixtures/user-composefile'
name = 'service'
user = 'sshd'
self.dispatch(['run', '-u', user, name], returncode=1)
service = self.project.get_service(name)
container = service.containers(stopped=True, one_off=OneOffFilter.only)[0]
self.assertEqual(user, container.get('Config.User'))
def test_run_service_with_environment_overridden(self):
name = 'service'
self.base_dir = 'tests/fixtures/environment-composefile'
self.dispatch([
'run', '-e', 'foo=notbar',
'-e', 'allo=moto=bobo',
'-e', 'alpha=beta',
name,
'/bin/true',
])
service = self.project.get_service(name)
container = service.containers(stopped=True, one_off=OneOffFilter.only)[0]
# env overridden
self.assertEqual('notbar', container.environment['foo'])
# keep environment from yaml
self.assertEqual('world', container.environment['hello'])
# added option from command line
self.assertEqual('beta', container.environment['alpha'])
# make sure a value with a = don't crash out
self.assertEqual('moto=bobo', container.environment['allo'])
def test_run_service_without_map_ports(self):
# create one off container
self.base_dir = 'tests/fixtures/ports-composefile'
self.dispatch(['run', '-d', 'simple'])
container = self.project.get_service('simple').containers(one_off=OneOffFilter.only)[0]
# get port information
port_random = container.get_local_port(3000)
port_assigned = container.get_local_port(3001)
# close all one off containers we just created
container.stop()
# check the ports
self.assertEqual(port_random, None)
self.assertEqual(port_assigned, None)
def test_run_service_with_map_ports(self):
# create one off container
self.base_dir = 'tests/fixtures/ports-composefile'
self.dispatch(['run', '-d', '--service-ports', 'simple'])
container = self.project.get_service('simple').containers(one_off=OneOffFilter.only)[0]
# get port information
port_random = container.get_local_port(3000)
port_assigned = container.get_local_port(3001)
port_range = container.get_local_port(3002), container.get_local_port(3003)
# close all one off containers we just created
container.stop()
# check the ports
assert port_random is not None
assert port_assigned.endswith(':49152')
assert port_range[0].endswith(':49153')
assert port_range[1].endswith(':49154')
def test_run_service_with_explicitly_mapped_ports(self):
# create one off container
self.base_dir = 'tests/fixtures/ports-composefile'
self.dispatch(['run', '-d', '-p', '30000:3000', '--publish', '30001:3001', 'simple'])
container = self.project.get_service('simple').containers(one_off=OneOffFilter.only)[0]
# get port information
port_short = container.get_local_port(3000)
port_full = container.get_local_port(3001)
# close all one off containers we just created
container.stop()
# check the ports
assert port_short.endswith(':30000')
assert port_full.endswith(':30001')
def test_run_service_with_explicitly_mapped_ip_ports(self):
# create one off container
self.base_dir = 'tests/fixtures/ports-composefile'
self.dispatch([
'run', '-d',
'-p', '127.0.0.1:30000:3000',
'--publish', '127.0.0.1:30001:3001',
'simple'
])
container = self.project.get_service('simple').containers(one_off=OneOffFilter.only)[0]
# get port information
port_short = container.get_local_port(3000)
port_full = container.get_local_port(3001)
# close all one off containers we just created
container.stop()
# check the ports
self.assertEqual(port_short, "127.0.0.1:30000")
self.assertEqual(port_full, "127.0.0.1:30001")
def test_run_with_expose_ports(self):
# create one off container
self.base_dir = 'tests/fixtures/expose-composefile'
self.dispatch(['run', '-d', '--service-ports', 'simple'])
container = self.project.get_service('simple').containers(one_off=OneOffFilter.only)[0]
ports = container.ports
self.assertEqual(len(ports), 9)
# exposed ports are not mapped to host ports
assert ports['3000/tcp'] is None
assert ports['3001/tcp'] is None
assert ports['3001/udp'] is None
assert ports['3002/tcp'] is None
assert ports['3003/tcp'] is None
assert ports['3004/tcp'] is None
assert ports['3005/tcp'] is None
assert ports['3006/udp'] is None
assert ports['3007/udp'] is None
# close all one off containers we just created
container.stop()
def test_run_with_custom_name(self):
self.base_dir = 'tests/fixtures/environment-composefile'
name = 'the-container-name'
self.dispatch(['run', '--name', name, 'service', '/bin/true'])
service = self.project.get_service('service')
container, = service.containers(stopped=True, one_off=OneOffFilter.only)
self.assertEqual(container.name, name)
def test_run_service_with_workdir_overridden(self):
self.base_dir = 'tests/fixtures/run-workdir'
name = 'service'
workdir = '/var'
self.dispatch(['run', '--workdir={workdir}'.format(workdir=workdir), name])
service = self.project.get_service(name)
container = service.containers(stopped=True, one_off=True)[0]
self.assertEqual(workdir, container.get('Config.WorkingDir'))
def test_run_service_with_workdir_overridden_short_form(self):
self.base_dir = 'tests/fixtures/run-workdir'
name = 'service'
workdir = '/var'
self.dispatch(['run', '-w', workdir, name])
service = self.project.get_service(name)
container = service.containers(stopped=True, one_off=True)[0]
self.assertEqual(workdir, container.get('Config.WorkingDir'))
@v2_only()
def test_run_interactive_connects_to_network(self):
self.base_dir = 'tests/fixtures/networks'
self.dispatch(['up', '-d'])
self.dispatch(['run', 'app', 'nslookup', 'app'])
self.dispatch(['run', 'app', 'nslookup', 'db'])
containers = self.project.get_service('app').containers(
stopped=True, one_off=OneOffFilter.only)
assert len(containers) == 2
for container in containers:
networks = container.get('NetworkSettings.Networks')
assert sorted(list(networks)) == [
'{}_{}'.format(self.project.name, name)
for name in ['back', 'front']
]
for _, config in networks.items():
# TODO: once we drop support for API <1.24, this can be changed to:
# assert config['Aliases'] == [container.short_id]
aliases = set(config['Aliases'] or []) - set([container.short_id])
assert not aliases
@v2_only()
def test_run_detached_connects_to_network(self):
self.base_dir = 'tests/fixtures/networks'
self.dispatch(['up', '-d'])
self.dispatch(['run', '-d', 'app', 'top'])
container = self.project.get_service('app').containers(one_off=OneOffFilter.only)[0]
networks = container.get('NetworkSettings.Networks')
assert sorted(list(networks)) == [
'{}_{}'.format(self.project.name, name)
for name in ['back', 'front']
]
for _, config in networks.items():
# TODO: once we drop support for API <1.24, this can be changed to:
# assert config['Aliases'] == [container.short_id]
aliases = set(config['Aliases'] or []) - set([container.short_id])
assert not aliases
assert self.lookup(container, 'app')
assert self.lookup(container, 'db')
def test_run_handles_sigint(self):
proc = start_process(self.base_dir, ['run', '-T', 'simple', 'top'])
wait_on_condition(ContainerStateCondition(
self.project.client,
'simplecomposefile_simple_run_1',
'running'))
os.kill(proc.pid, signal.SIGINT)
wait_on_condition(ContainerStateCondition(
self.project.client,
'simplecomposefile_simple_run_1',
'exited'))
def test_run_handles_sigterm(self):
proc = start_process(self.base_dir, ['run', '-T', 'simple', 'top'])
wait_on_condition(ContainerStateCondition(
self.project.client,
'simplecomposefile_simple_run_1',
'running'))
os.kill(proc.pid, signal.SIGTERM)
wait_on_condition(ContainerStateCondition(
self.project.client,
'simplecomposefile_simple_run_1',
'exited'))
@mock.patch.dict(os.environ)
def test_run_unicode_env_values_from_system(self):
value = 'ą, ć, ę, ł, ń, ó, ś, ź, ż'
if six.PY2: # os.environ doesn't support unicode values in Py2
os.environ['BAR'] = value.encode('utf-8')
else: # ... and doesn't support byte values in Py3
os.environ['BAR'] = value
self.base_dir = 'tests/fixtures/unicode-environment'
result = self.dispatch(['run', 'simple'])
if six.PY2: # Can't retrieve output on Py3. See issue #3670
assert value == result.stdout.strip()
container = self.project.containers(one_off=OneOffFilter.only, stopped=True)[0]
environment = container.get('Config.Env')
assert 'FOO={}'.format(value) in environment
@mock.patch.dict(os.environ)
def test_run_env_values_from_system(self):
os.environ['FOO'] = 'bar'
os.environ['BAR'] = 'baz'
self.dispatch(['run', '-e', 'FOO', 'simple', 'true'], None)
container = self.project.containers(one_off=OneOffFilter.only, stopped=True)[0]
environment = container.get('Config.Env')
assert 'FOO=bar' in environment
assert 'BAR=baz' not in environment
def test_run_label_flag(self):
self.base_dir = 'tests/fixtures/run-labels'
name = 'service'
self.dispatch(['run', '-l', 'default', '--label', 'foo=baz', name, '/bin/true'])
service = self.project.get_service(name)
container, = service.containers(stopped=True, one_off=OneOffFilter.only)
labels = container.labels
assert labels['default'] == ''
assert labels['foo'] == 'baz'
assert labels['hello'] == 'world'
def test_rm(self):
service = self.project.get_service('simple')
service.create_container()
kill_service(service)
self.assertEqual(len(service.containers(stopped=True)), 1)
self.dispatch(['rm', '--force'], None)
self.assertEqual(len(service.containers(stopped=True)), 0)
service = self.project.get_service('simple')
service.create_container()
kill_service(service)
self.assertEqual(len(service.containers(stopped=True)), 1)
self.dispatch(['rm', '-f'], None)
self.assertEqual(len(service.containers(stopped=True)), 0)
service = self.project.get_service('simple')
service.create_container()
self.dispatch(['rm', '-fs'], None)
self.assertEqual(len(service.containers(stopped=True)), 0)
def test_rm_stop(self):
self.dispatch(['up', '-d'], None)
simple = self.project.get_service('simple')
another = self.project.get_service('another')
assert len(simple.containers()) == 1
assert len(another.containers()) == 1
self.dispatch(['rm', '-fs'], None)
assert len(simple.containers(stopped=True)) == 0
assert len(another.containers(stopped=True)) == 0
self.dispatch(['up', '-d'], None)
assert len(simple.containers()) == 1
assert len(another.containers()) == 1
self.dispatch(['rm', '-fs', 'another'], None)
assert len(simple.containers()) == 1
assert len(another.containers(stopped=True)) == 0
def test_rm_all(self):
service = self.project.get_service('simple')
service.create_container(one_off=False)
service.create_container(one_off=True)
kill_service(service)
self.assertEqual(len(service.containers(stopped=True)), 1)
self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 1)
self.dispatch(['rm', '-f'], None)
self.assertEqual(len(service.containers(stopped=True)), 0)
self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 0)
service.create_container(one_off=False)
service.create_container(one_off=True)
kill_service(service)
self.assertEqual(len(service.containers(stopped=True)), 1)
self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 1)
self.dispatch(['rm', '-f', '--all'], None)
self.assertEqual(len(service.containers(stopped=True)), 0)
self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 0)
def test_stop(self):
self.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
self.assertTrue(service.containers()[0].is_running)
self.dispatch(['stop', '-t', '1'], None)
self.assertEqual(len(service.containers(stopped=True)), 1)
self.assertFalse(service.containers(stopped=True)[0].is_running)
def test_stop_signal(self):
self.base_dir = 'tests/fixtures/stop-signal-composefile'
self.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
self.assertTrue(service.containers()[0].is_running)
self.dispatch(['stop', '-t', '1'], None)
self.assertEqual(len(service.containers(stopped=True)), 1)
self.assertFalse(service.containers(stopped=True)[0].is_running)
self.assertEqual(service.containers(stopped=True)[0].exit_code, 0)
def test_start_no_containers(self):
result = self.dispatch(['start'], returncode=1)
assert 'No containers to start' in result.stderr
@v2_only()
def test_up_logging(self):
self.base_dir = 'tests/fixtures/logging-composefile'
self.dispatch(['up', '-d'])
simple = self.project.get_service('simple').containers()[0]
log_config = simple.get('HostConfig.LogConfig')
self.assertTrue(log_config)
self.assertEqual(log_config.get('Type'), 'none')
another = self.project.get_service('another').containers()[0]
log_config = another.get('HostConfig.LogConfig')
self.assertTrue(log_config)
self.assertEqual(log_config.get('Type'), 'json-file')
self.assertEqual(log_config.get('Config')['max-size'], '10m')
def test_up_logging_legacy(self):
self.base_dir = 'tests/fixtures/logging-composefile-legacy'
self.dispatch(['up', '-d'])
simple = self.project.get_service('simple').containers()[0]
log_config = simple.get('HostConfig.LogConfig')
self.assertTrue(log_config)
self.assertEqual(log_config.get('Type'), 'none')
another = self.project.get_service('another').containers()[0]
log_config = another.get('HostConfig.LogConfig')
self.assertTrue(log_config)
self.assertEqual(log_config.get('Type'), 'json-file')
self.assertEqual(log_config.get('Config')['max-size'], '10m')
def test_pause_unpause(self):
self.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertFalse(service.containers()[0].is_paused)
self.dispatch(['pause'], None)
self.assertTrue(service.containers()[0].is_paused)
self.dispatch(['unpause'], None)
self.assertFalse(service.containers()[0].is_paused)
def test_pause_no_containers(self):
result = self.dispatch(['pause'], returncode=1)
assert 'No containers to pause' in result.stderr
def test_unpause_no_containers(self):
result = self.dispatch(['unpause'], returncode=1)
assert 'No containers to unpause' in result.stderr
def test_logs_invalid_service_name(self):
self.dispatch(['logs', 'madeupname'], returncode=1)
def test_logs_follow(self):
self.base_dir = 'tests/fixtures/echo-services'
self.dispatch(['up', '-d'])
result = self.dispatch(['logs', '-f'])
if not is_cluster(self.client):
assert result.stdout.count('\n') == 5
else:
# Sometimes logs are picked up from old containers that haven't yet
# been removed (removal in Swarm is async)
assert result.stdout.count('\n') >= 5
assert 'simple' in result.stdout
assert 'another' in result.stdout
assert 'exited with code 0' in result.stdout
def test_logs_follow_logs_from_new_containers(self):
self.base_dir = 'tests/fixtures/logs-composefile'
self.dispatch(['up', '-d', 'simple'])
proc = start_process(self.base_dir, ['logs', '-f'])
self.dispatch(['up', '-d', 'another'])
wait_on_condition(ContainerStateCondition(
self.project.client,
'logscomposefile_another_1',
'exited'))
self.dispatch(['kill', 'simple'])
result = wait_on_process(proc)
assert 'hello' in result.stdout
assert 'test' in result.stdout
assert 'logscomposefile_another_1 exited with code 0' in result.stdout
assert 'logscomposefile_simple_1 exited with code 137' in result.stdout
def test_logs_default(self):
self.base_dir = 'tests/fixtures/logs-composefile'
self.dispatch(['up', '-d'])
result = self.dispatch(['logs'])
assert 'hello' in result.stdout
assert 'test' in result.stdout
assert 'exited with' not in result.stdout
def test_logs_on_stopped_containers_exits(self):
self.base_dir = 'tests/fixtures/echo-services'
self.dispatch(['up'])
result = self.dispatch(['logs'])
assert 'simple' in result.stdout
assert 'another' in result.stdout
assert 'exited with' not in result.stdout
def test_logs_timestamps(self):
self.base_dir = 'tests/fixtures/echo-services'
self.dispatch(['up', '-d'])
result = self.dispatch(['logs', '-f', '-t'])
self.assertRegexpMatches(result.stdout, '(\d{4})-(\d{2})-(\d{2})T(\d{2})\:(\d{2})\:(\d{2})')
def test_logs_tail(self):
self.base_dir = 'tests/fixtures/logs-tail-composefile'
self.dispatch(['up'])
result = self.dispatch(['logs', '--tail', '2'])
assert 'c\n' in result.stdout
assert 'd\n' in result.stdout
assert 'a\n' not in result.stdout
assert 'b\n' not in result.stdout
def test_kill(self):
self.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
self.assertTrue(service.containers()[0].is_running)
self.dispatch(['kill'], None)
self.assertEqual(len(service.containers(stopped=True)), 1)
self.assertFalse(service.containers(stopped=True)[0].is_running)
def test_kill_signal_sigstop(self):
self.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
self.assertTrue(service.containers()[0].is_running)
self.dispatch(['kill', '-s', 'SIGSTOP'], None)
self.assertEqual(len(service.containers()), 1)
# The container is still running. It has only been paused
self.assertTrue(service.containers()[0].is_running)
def test_kill_stopped_service(self):
self.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.dispatch(['kill', '-s', 'SIGSTOP'], None)
self.assertTrue(service.containers()[0].is_running)
self.dispatch(['kill', '-s', 'SIGKILL'], None)
self.assertEqual(len(service.containers(stopped=True)), 1)
self.assertFalse(service.containers(stopped=True)[0].is_running)
def test_restart(self):
service = self.project.get_service('simple')
container = service.create_container()
service.start_container(container)
started_at = container.dictionary['State']['StartedAt']
self.dispatch(['restart', '-t', '1'], None)
container.inspect()
self.assertNotEqual(
container.dictionary['State']['FinishedAt'],
'0001-01-01T00:00:00Z',
)
self.assertNotEqual(
container.dictionary['State']['StartedAt'],
started_at,
)
def test_restart_stopped_container(self):
service = self.project.get_service('simple')
container = service.create_container()
container.start()
container.kill()
self.assertEqual(len(service.containers(stopped=True)), 1)
self.dispatch(['restart', '-t', '1'], None)
self.assertEqual(len(service.containers(stopped=False)), 1)
def test_restart_no_containers(self):
result = self.dispatch(['restart'], returncode=1)
assert 'No containers to restart' in result.stderr
def test_scale(self):
project = self.project
self.dispatch(['scale', 'simple=1'])
self.assertEqual(len(project.get_service('simple').containers()), 1)
self.dispatch(['scale', 'simple=3', 'another=2'])
self.assertEqual(len(project.get_service('simple').containers()), 3)
self.assertEqual(len(project.get_service('another').containers()), 2)
self.dispatch(['scale', 'simple=1', 'another=1'])
self.assertEqual(len(project.get_service('simple').containers()), 1)
self.assertEqual(len(project.get_service('another').containers()), 1)
self.dispatch(['scale', 'simple=1', 'another=1'])
self.assertEqual(len(project.get_service('simple').containers()), 1)
self.assertEqual(len(project.get_service('another').containers()), 1)
self.dispatch(['scale', 'simple=0', 'another=0'])
self.assertEqual(len(project.get_service('simple').containers()), 0)
self.assertEqual(len(project.get_service('another').containers()), 0)
def test_scale_v2_2(self):
self.base_dir = 'tests/fixtures/scale'
result = self.dispatch(['scale', 'web=1'], returncode=1)
assert 'incompatible with the v2.2 format' in result.stderr
def test_up_scale_scale_up(self):
self.base_dir = 'tests/fixtures/scale'
project = self.project
self.dispatch(['up', '-d'])
assert len(project.get_service('web').containers()) == 2
assert len(project.get_service('db').containers()) == 1
self.dispatch(['up', '-d', '--scale', 'web=3'])
assert len(project.get_service('web').containers()) == 3
assert len(project.get_service('db').containers()) == 1
def test_up_scale_scale_down(self):
self.base_dir = 'tests/fixtures/scale'
project = self.project
self.dispatch(['up', '-d'])
assert len(project.get_service('web').containers()) == 2
assert len(project.get_service('db').containers()) == 1
self.dispatch(['up', '-d', '--scale', 'web=1'])
assert len(project.get_service('web').containers()) == 1
assert len(project.get_service('db').containers()) == 1
def test_up_scale_reset(self):
self.base_dir = 'tests/fixtures/scale'
project = self.project
self.dispatch(['up', '-d', '--scale', 'web=3', '--scale', 'db=3'])
assert len(project.get_service('web').containers()) == 3
assert len(project.get_service('db').containers()) == 3
self.dispatch(['up', '-d'])
assert len(project.get_service('web').containers()) == 2
assert len(project.get_service('db').containers()) == 1
def test_up_scale_to_zero(self):
self.base_dir = 'tests/fixtures/scale'
project = self.project
self.dispatch(['up', '-d'])
assert len(project.get_service('web').containers()) == 2
assert len(project.get_service('db').containers()) == 1
self.dispatch(['up', '-d', '--scale', 'web=0', '--scale', 'db=0'])
assert len(project.get_service('web').containers()) == 0
assert len(project.get_service('db').containers()) == 0
def test_port(self):
self.base_dir = 'tests/fixtures/ports-composefile'
self.dispatch(['up', '-d'], None)
container = self.project.get_service('simple').get_container()
def get_port(number):
result = self.dispatch(['port', 'simple', str(number)])
return result.stdout.rstrip()
assert get_port(3000) == container.get_local_port(3000)
assert ':49152' in get_port(3001)
assert ':49153' in get_port(3002)
def test_expanded_port(self):
self.base_dir = 'tests/fixtures/ports-composefile'
self.dispatch(['-f', 'expanded-notation.yml', 'up', '-d'])
container = self.project.get_service('simple').get_container()
def get_port(number):
result = self.dispatch(['port', 'simple', str(number)])
return result.stdout.rstrip()
assert get_port(3000) == container.get_local_port(3000)
assert ':53222' in get_port(3001)
assert ':53223' in get_port(3002)
def test_port_with_scale(self):
self.base_dir = 'tests/fixtures/ports-composefile-scale'
self.dispatch(['scale', 'simple=2'], None)
containers = sorted(
self.project.containers(service_names=['simple']),
key=attrgetter('name'))
def get_port(number, index=None):
if index is None:
result = self.dispatch(['port', 'simple', str(number)])
else:
result = self.dispatch(['port', '--index=' + str(index), 'simple', str(number)])
return result.stdout.rstrip()
self.assertEqual(get_port(3000), containers[0].get_local_port(3000))
self.assertEqual(get_port(3000, index=1), containers[0].get_local_port(3000))
self.assertEqual(get_port(3000, index=2), containers[1].get_local_port(3000))
self.assertEqual(get_port(3002), "")
def test_events_json(self):
events_proc = start_process(self.base_dir, ['events', '--json'])
self.dispatch(['up', '-d'])
wait_on_condition(ContainerCountCondition(self.project, 2))
os.kill(events_proc.pid, signal.SIGINT)
result = wait_on_process(events_proc, returncode=1)
lines = [json.loads(line) for line in result.stdout.rstrip().split('\n')]
assert Counter(e['action'] for e in lines) == {'create': 2, 'start': 2}
def test_events_human_readable(self):
def has_timestamp(string):
str_iso_date, str_iso_time, container_info = string.split(' ', 2)
try:
return isinstance(datetime.datetime.strptime(
'%s %s' % (str_iso_date, str_iso_time),
'%Y-%m-%d %H:%M:%S.%f'),
datetime.datetime)
except ValueError:
return False
events_proc = start_process(self.base_dir, ['events'])
self.dispatch(['up', '-d', 'simple'])
wait_on_condition(ContainerCountCondition(self.project, 1))
os.kill(events_proc.pid, signal.SIGINT)
result = wait_on_process(events_proc, returncode=1)
lines = result.stdout.rstrip().split('\n')
assert len(lines) == 2
container, = self.project.containers()
expected_template = ' container {} {}'
expected_meta_info = ['image=busybox:latest', 'name=simplecomposefile_simple_1']
assert expected_template.format('create', container.id) in lines[0]
assert expected_template.format('start', container.id) in lines[1]
for line in lines:
for info in expected_meta_info:
assert info in line
assert has_timestamp(lines[0])
def test_env_file_relative_to_compose_file(self):
config_path = os.path.abspath('tests/fixtures/env-file/docker-compose.yml')
self.dispatch(['-f', config_path, 'up', '-d'], None)
self._project = get_project(self.base_dir, [config_path])
containers = self.project.containers(stopped=True)
self.assertEqual(len(containers), 1)
self.assertIn("FOO=1", containers[0].get('Config.Env'))
@mock.patch.dict(os.environ)
def test_home_and_env_var_in_volume_path(self):
os.environ['VOLUME_NAME'] = 'my-volume'
os.environ['HOME'] = '/tmp/home-dir'
self.base_dir = 'tests/fixtures/volume-path-interpolation'
self.dispatch(['up', '-d'], None)
container = self.project.containers(stopped=True)[0]
actual_host_path = container.get_mount('/container-path')['Source']
components = actual_host_path.split('/')
assert components[-2:] == ['home-dir', 'my-volume']
def test_up_with_default_override_file(self):
self.base_dir = 'tests/fixtures/override-files'
self.dispatch(['up', '-d'], None)
containers = self.project.containers()
self.assertEqual(len(containers), 2)
web, db = containers
self.assertEqual(web.human_readable_command, 'top')
self.assertEqual(db.human_readable_command, 'top')
def test_up_with_multiple_files(self):
self.base_dir = 'tests/fixtures/override-files'
config_paths = [
'docker-compose.yml',
'docker-compose.override.yml',
'extra.yml',
]
self._project = get_project(self.base_dir, config_paths)
self.dispatch(
[
'-f', config_paths[0],
'-f', config_paths[1],
'-f', config_paths[2],
'up', '-d',
],
None)
containers = self.project.containers()
self.assertEqual(len(containers), 3)
web, other, db = containers
self.assertEqual(web.human_readable_command, 'top')
self.assertEqual(db.human_readable_command, 'top')
self.assertEqual(other.human_readable_command, 'top')
def test_up_with_extends(self):
self.base_dir = 'tests/fixtures/extends'
self.dispatch(['up', '-d'], None)
self.assertEqual(
set([s.name for s in self.project.services]),
set(['mydb', 'myweb']),
)
# Sort by name so we get [db, web]
containers = sorted(
self.project.containers(stopped=True),
key=lambda c: c.name,
)
self.assertEqual(len(containers), 2)
web = containers[1]
self.assertEqual(
set(get_links(web)),
set(['db', 'mydb_1', 'extends_mydb_1']))
expected_env = set([
"FOO=1",
"BAR=2",
"BAZ=2",
])
self.assertTrue(expected_env <= set(web.get('Config.Env')))
def test_top_services_not_running(self):
self.base_dir = 'tests/fixtures/top'
result = self.dispatch(['top'])
assert len(result.stdout) == 0
def test_top_services_running(self):
self.base_dir = 'tests/fixtures/top'
self.dispatch(['up', '-d'])
result = self.dispatch(['top'])
self.assertIn('top_service_a', result.stdout)
self.assertIn('top_service_b', result.stdout)
self.assertNotIn('top_not_a_service', result.stdout)
def test_top_processes_running(self):
self.base_dir = 'tests/fixtures/top'
self.dispatch(['up', '-d'])
result = self.dispatch(['top'])
assert result.stdout.count("top") == 4
def test_forward_exitval(self):
self.base_dir = 'tests/fixtures/exit-code-from'
proc = start_process(
self.base_dir,
['up', '--abort-on-container-exit', '--exit-code-from', 'another'])
result = wait_on_process(proc, returncode=1)
assert 'exitcodefrom_another_1 exited with code 1' in result.stdout
def test_images(self):
self.project.get_service('simple').create_container()
result = self.dispatch(['images'])
assert 'busybox' in result.stdout
assert 'simplecomposefile_simple_1' in result.stdout
def test_images_default_composefile(self):
self.base_dir = 'tests/fixtures/multiple-composefiles'
self.dispatch(['up', '-d'])
result = self.dispatch(['images'])
assert 'busybox' in result.stdout
assert 'multiplecomposefiles_another_1' in result.stdout
assert 'multiplecomposefiles_simple_1' in result.stdout
def test_up_with_override_yaml(self):
self.base_dir = 'tests/fixtures/override-yaml-files'
self._project = get_project(self.base_dir, [])
self.dispatch(
[
'up', '-d',
],
None)
containers = self.project.containers()
self.assertEqual(len(containers), 2)
web, db = containers
self.assertEqual(web.human_readable_command, 'sleep 100')
self.assertEqual(db.human_readable_command, 'top')
def test_up_with_duplicate_override_yaml_files(self):
self.base_dir = 'tests/fixtures/duplicate-override-yaml-files'
with self.assertRaises(DuplicateOverrideFileFound):
get_project(self.base_dir, [])
self.base_dir = None
| apache-2.0 | -442,494,116,897,954,300 | 38.168826 | 105 | 0.58561 | false |
gautam1858/tensorflow | tensorflow/lite/tutorials/mnist_tflite.py | 13 | 2850 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Script to evaluate accuracy of TFLite flatbuffer model on mnist dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf # pylint: disable=g-bad-import-order
from tensorflow.lite.tutorials import dataset
flags = tf.app.flags
flags.DEFINE_string('data_dir', '/tmp/data_dir',
'Directory where data is stored.')
flags.DEFINE_string('model_file', '',
'The path to the TFLite flatbuffer model file.')
flags = flags.FLAGS
def test_image_generator():
# Generates an iterator over images
with tf.Session() as sess:
input_data = tf.compat.v1.data.make_one_shot_iterator(dataset.test(
flags.data_dir)).get_next()
try:
while True:
yield sess.run(input_data)
except tf.errors.OutOfRangeError:
pass
def run_eval(interpreter, input_image):
"""Performs evaluation for input image over specified model.
Args:
interpreter: TFLite interpreter initialized with model to execute.
input_image: Image input to the model.
Returns:
output: output tensor of model being executed.
"""
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Test model on the input images.
input_image = np.reshape(input_image, input_details[0]['shape'])
interpreter.set_tensor(input_details[0]['index'], input_image)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
output = np.squeeze(output_data)
return output
def main(_):
interpreter = tf.lite.Interpreter(model_path=flags.model_file)
interpreter.allocate_tensors()
num_correct, total = 0, 0
for input_data in test_image_generator():
output = run_eval(interpreter, input_data[0])
total += 1
if output == input_data[1]:
num_correct += 1
if total % 500 == 0:
print('Accuracy after %i images: %f' %
(total, float(num_correct) / float(total)))
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run(main)
| apache-2.0 | 5,664,178,823,510,447,000 | 31.758621 | 80 | 0.680351 | false |
zephyrdeveloper/zapi-cloud | generator/python/CycleTest.py | 1 | 2143 | import json
import jwt
import time
import hashlib
import requests
def is_json(data):
try:
json.loads(data)
except ValueError:
return False
return True
# ACCOUNT ID
ACCOUNT_ID = '123456:1234abcd-1234-abcd-1234-1234abcd1234'
# ACCESS KEY from navigation >> Tests >> API Keys
ACCESS_KEY = 'amlyYTplN2UyNjFjNC02MTM4LTRiZWEtYWRiNy1lYmMyMjk0ZmZlMGUgYWRtaW4'
# ACCESS KEY from navigation >> Tests >> API Keys
SECRET_KEY = '01wOEb-ZpIruI_QoAPSHirjBXsZRA3LDuLpnw0OHP-8'
# JWT EXPIRE how long token been to be active? 3600 == 1 hour
JWT_EXPIRE = 3600
# BASE URL for Zephyr for Jira Cloud
BASE_URL = 'http://localhost:9000'
# RELATIVE PATH for token generation and make request to api
RELATIVE_PATH = '/public/rest/api/1.0/cycle'
# CANONICAL PATH (Http Method & Relative Path & Query String)
CANONICAL_PATH = 'POST&'+RELATIVE_PATH+'&'
# TOKEN HEADER: to generate jwt token
payload_token = {
'sub': ACCOUNT_ID,
'qsh': hashlib.sha256(CANONICAL_PATH.encode('utf-8')).hexdigest(),
'iss': ACCESS_KEY,
'exp': int(time.time())+JWT_EXPIRE,
'iat': int(time.time())
}
# GENERATE TOKEN
token = jwt.encode(payload_token, SECRET_KEY, algorithm='HS256').strip().decode('utf-8')
# REQUEST HEADER: to authenticate and authorize api
headers = {
'Authorization': 'JWT '+token,
'Content-Type': 'application/json',
'zapiAccessKey': ACCESS_KEY
}
# REQUEST HEADER: to create cycle
headers = {
'Authorization': 'JWT '+token,
'Content-Type': 'application/json',
'zapiAccessKey': ACCESS_KEY
}
# REQUEST PAYLOAD: to create cycle
cycle = {
'name': 'Sample Cycle',
'projectId': 10000,
'versionId': -1
}
# MAKE REQUEST:
raw_result = requests.post(BASE_URL + RELATIVE_PATH, headers=headers, json=cycle)
if is_json(raw_result.text):
# JSON RESPONSE: convert response to JSON
json_result = json.loads(raw_result.text)
# PRINT RESPONSE: pretty print with 4 indent
print(json.dumps(json_result, indent=4, sort_keys=True))
else:
print(raw_result.text)
| apache-2.0 | 944,096,804,246,844,400 | 25.134146 | 88 | 0.658889 | false |
Hearen/OnceServer | pool_management/bn-xend-core/xend/BNHostAPI.py | 1 | 111013 | import traceback
import inspect
import os
import Queue
import string
import sys
import time
import xmlrpclib
import socket
import struct
import threading
import XendDomain, XendDomainInfo, XendNode, XendDmesg, XendConfig
import XendLogging, XendTaskManager, XendAPIStore
from xen.xend.BNPoolAPI import BNPoolAPI
from xen.util.xmlrpcclient import ServerProxy
from xen.xend import uuid as genuuid
from XendLogging import log
from XendError import *
from xen.util import ip as getip
from xen.util import Netctl
from xen.util import LicenseUtil
from xen.xend.XendCPUPool import XendCPUPool
from XendAuthSessions import instance as auth_manager
from xen.util.xmlrpclib2 import stringify
try:
set
except NameError:
from sets import Set as set
reload(sys)
sys.setdefaultencoding( "utf-8" )
DOM0_UUID = "00000000-0000-0000-0000-000000000000"
argcounts = {}
def _get_XendAPI_instance():
import XendAPI
return XendAPI.instance()
# ------------------------------------------
# Utility Methods for Xen API Implementation
# ------------------------------------------
def xen_api_success(value):
"""Wraps a return value in XenAPI format."""
if value is None:
s = ''
else:
s = stringify(value)
return {"Status": "Success", "Value": s}
def xen_api_success_void():
"""Return success, but caller expects no return value."""
return xen_api_success("")
def xen_api_error(error):
"""Wraps an error value in XenAPI format."""
if type(error) == tuple:
error = list(error)
if type(error) != list:
error = [error]
if len(error) == 0:
error = ['INTERNAL_ERROR', 'Empty list given to xen_api_error']
return { "Status": "Failure",
"ErrorDescription": [str(x) for x in error] }
def xen_rpc_call(ip, method, *args):
"""wrap rpc call to a remote host"""
try:
if not ip:
return xen_api_error("Invalid ip for rpc call")
# create
proxy = ServerProxy("http://" + ip + ":9363/")
# login
response = proxy.session.login('root')
if cmp(response['Status'], 'Failure') == 0:
log.exception(response['ErrorDescription'])
return xen_api_error(response['ErrorDescription'])
session_ref = response['Value']
# excute
method_parts = method.split('_')
method_class = method_parts[0]
method_name = '_'.join(method_parts[1:])
if method.find("host_metrics") == 0:
method_class = "host_metrics"
method_name = '_'.join(method_parts[2:])
#log.debug(method_class)
#log.debug(method_name)
if method_class.find("Async") == 0:
method_class = method_class.split(".")[1]
response = proxy.__getattr__("Async").__getattr__(method_class).__getattr__(method_name)(session_ref, *args)
else:
response = proxy.__getattr__(method_class).__getattr__(method_name)(session_ref, *args)
if cmp(response['Status'], 'Failure') == 0:
log.exception(response['ErrorDescription'])
return xen_api_error(response['ErrorDescription'])
# result
return response
except socket.error:
return xen_api_error('socket error')
def xen_api_todo():
"""Temporary method to make sure we track down all the TODOs"""
return {"Status": "Error", "ErrorDescription": XEND_ERROR_TODO}
def now():
return datetime()
def datetime(when = None):
"""Marshall the given time as a Xen-API DateTime.
@param when The time in question, given as seconds since the epoch, UTC.
May be None, in which case the current time is used.
"""
if when is None:
return xmlrpclib.DateTime(time.gmtime())
else:
return xmlrpclib.DateTime(time.gmtime(when))
# ---------------------------------------------------
# Event dispatch
# ---------------------------------------------------
EVENT_QUEUE_LENGTH = 50
event_registrations = {}
def event_register(session, reg_classes):
if session not in event_registrations:
event_registrations[session] = {
'classes' : set(),
'queue' : Queue.Queue(EVENT_QUEUE_LENGTH),
'next-id' : 1
}
if not reg_classes:
reg_classes = classes
sessionclasses = event_registrations[session]['classes']
if hasattr(sessionclasses, 'union_update'):
sessionclasses.union_update(reg_classes)
else:
sessionclasses.update(reg_classes)
def event_unregister(session, unreg_classes):
if session not in event_registrations:
return
if unreg_classes:
event_registrations[session]['classes'].intersection_update(
unreg_classes)
if len(event_registrations[session]['classes']) == 0:
del event_registrations[session]
else:
del event_registrations[session]
def event_next(session):
if session not in event_registrations:
return xen_api_error(['SESSION_NOT_REGISTERED', session])
queue = event_registrations[session]['queue']
events = [queue.get()]
try:
while True:
events.append(queue.get(False))
except Queue.Empty:
pass
return xen_api_success(events)
def _ctor_event_dispatch(xenapi, ctor, api_cls, session, args):
result = ctor(xenapi, session, *args)
if result['Status'] == 'Success':
ref = result['Value']
event_dispatch('add', api_cls, ref, '')
return result
def _dtor_event_dispatch(xenapi, dtor, api_cls, session, ref, args):
result = dtor(xenapi, session, ref, *args)
if result['Status'] == 'Success':
event_dispatch('del', api_cls, ref, '')
return result
def _setter_event_dispatch(xenapi, setter, api_cls, attr_name, session, ref,
args):
result = setter(xenapi, session, ref, *args)
if result['Status'] == 'Success':
event_dispatch('mod', api_cls, ref, attr_name)
return result
def event_dispatch(operation, api_cls, ref, attr_name):
assert operation in ['add', 'del', 'mod']
event = {
'timestamp' : now(),
'class' : api_cls,
'operation' : operation,
'ref' : ref,
'obj_uuid' : ref,
'field' : attr_name,
}
for reg in event_registrations.values():
if api_cls in reg['classes']:
event['id'] = reg['next-id']
reg['next-id'] += 1
reg['queue'].put(event)
# ---------------------------------------------------
# Python Method Decorators for input value validation
# ---------------------------------------------------
def trace(func, api_name=''):
"""Decorator to trace XMLRPC Xen API methods.
@param func: function with any parameters
@param api_name: name of the api call for debugging.
"""
if hasattr(func, 'api'):
api_name = func.api
def trace_func(self, *args, **kwargs):
log.debug('%s: %s' % (api_name, args))
return func(self, *args, **kwargs)
trace_func.api = api_name
return trace_func
def catch_typeerror(func):
"""Decorator to catch any TypeErrors and translate them into Xen-API
errors.
@param func: function with params: (self, ...)
@rtype: callable object
"""
def f(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except TypeError, exn:
#log.exception('catch_typeerror')
if hasattr(func, 'api') and func.api in argcounts:
# Assume that if the argument count was wrong and if the
# exception was thrown inside this file, then it is due to an
# invalid call from the client, otherwise it's an internal
# error (which will be handled further up).
expected = argcounts[func.api]
actual = len(args) + len(kwargs)
if expected != actual:
tb = sys.exc_info()[2]
try:
sourcefile = traceback.extract_tb(tb)[-1][0]
if sourcefile == inspect.getsourcefile(BNHostAPI):
return xen_api_error(
['MESSAGE_PARAMETER_COUNT_MISMATCH',
func.api, expected, actual])
finally:
del tb
raise
except XendAPIError, exn:
return xen_api_error(exn.get_api_error())
return f
def session_required(func):
"""Decorator to verify if session is valid before calling method.
@param func: function with params: (self, session, ...)
@rtype: callable object
"""
def check_session(self, session, *args, **kwargs):
if auth_manager().is_session_valid(session) or cmp(session, "SessionForTest") == 0:
return func(self, session, *args, **kwargs)
else:
return xen_api_error(['SESSION_INVALID', session])
return check_session
def _is_valid_ref(ref, validator):
return type(ref) == str and validator(ref)
def _check_ref(validator, clas, func, api, session, ref, *args, **kwargs):
if _is_valid_ref(ref, validator):
return func(api, session, ref, *args, **kwargs)
else:
return xen_api_error(['HANDLE_INVALID', clas, ref])
def _check_host(validator, clas, func, api, session, ref, *args, **kwargs):
#if BNPoolAPI._uuid == ref:
return func(api, session, ref, *args, **kwargs)
#else:
return xen_api_error(['HANDLE_INVALID', clas, ref])
def valid_host(func):
"""Decorator to verify if host_ref is valid before calling method.
@param func: function with params: (self, session, host_ref, ...)
@rtype: callable object
"""
return lambda * args, **kwargs: \
_check_host(None,
'host', func, *args, **kwargs)
classes = {
'host' : valid_host,
}
def singleton(cls, *args, **kw):
instances = {}
def _singleton(*args, **kw):
if cls not in instances:
instances[cls] = cls(*args, **kw)
return instances[cls]
return _singleton
@singleton
class BNHostAPI(object):
__decorated__ = False
__init_lock__ = threading.Lock()
__network_lock__ = threading.Lock()
def __new__(cls, *args, **kwds):
""" Override __new__ to decorate the class only once.
Lock to make sure the classes are not decorated twice.
"""
cls.__init_lock__.acquire()
try:
if not cls.__decorated__:
cls._decorate()
cls.__decorated__ = True
return object.__new__(cls, *args, **kwds)
finally:
cls.__init_lock__.release()
def _decorate(cls):
""" Decorate all the object methods to have validators
and appropriate function attributes.
This should only be executed once for the duration of the
server.
"""
global_validators = [session_required, catch_typeerror]
# Cheat methods _hosts_name_label
# -------------
# Methods that have a trivial implementation for all classes.
# 1. get_by_uuid == getting by ref, so just return uuid for
# all get_by_uuid() methods.
for api_cls in classes.keys():
# We'll let the autoplug classes implement these functions
# themselves - its much cleaner to do it in the base class
get_by_uuid = '%s_get_by_uuid' % api_cls
get_uuid = '%s_get_uuid' % api_cls
get_all_records = '%s_get_all_records' % api_cls
def _get_by_uuid(_1, _2, ref):
return xen_api_success(ref)
def _get_uuid(_1, _2, ref):
return xen_api_success(ref)
def unpack(v):
return v.get('Value')
def _get_all_records(_api_cls):
return lambda s, session: \
xen_api_success(dict([(ref, unpack(getattr(cls, '%s_get_record' % _api_cls)(s, session, ref)))\
for ref in unpack(getattr(cls, '%s_get_all' % _api_cls)(s, session))]))
setattr(cls, get_by_uuid, _get_by_uuid)
setattr(cls, get_uuid, _get_uuid)
setattr(cls, get_all_records, _get_all_records(api_cls))
# Autoplugging classes
# --------------------
# These have all of their methods grabbed out from the implementation
# class, and wrapped up to be compatible with the Xen-API.
def getter(ref, type):
return XendAPIStore.get(ref, type)
def wrap_method(name, new_f):
try:
f = getattr(cls, name)
wrapped_f = (lambda * args: new_f(f, *args))
wrapped_f.api = f.api
wrapped_f.async = f.async
setattr(cls, name, wrapped_f)
except AttributeError:
# Logged below (API call: %s not found)
pass
def setter_event_wrapper(api_cls, attr_name):
setter_name = '%s_set_%s' % (api_cls, attr_name)
wrap_method(
setter_name,
lambda setter, s, session, ref, *args:
_setter_event_dispatch(s, setter, api_cls, attr_name,
session, ref, args))
def ctor_event_wrapper(api_cls):
ctor_name = '%s_create' % api_cls
wrap_method(
ctor_name,
lambda ctor, s, session, *args:
_ctor_event_dispatch(s, ctor, api_cls, session, args))
def dtor_event_wrapper(api_cls):
dtor_name = '%s_destroy' % api_cls
wrap_method(
dtor_name,
lambda dtor, s, session, ref, *args:
_dtor_event_dispatch(s, dtor, api_cls, session, ref, args))
# Wrapping validators around XMLRPC calls
# ---------------------------------------
for api_cls, validator in classes.items():
def doit(n, takes_instance, async_support=False,
return_type=None):
n_ = n.replace('.', '_')
try:
f = getattr(cls, n_)
if n not in argcounts:
argcounts[n] = f.func_code.co_argcount - 1
validators = takes_instance and validator and \
[validator] or []
validators += global_validators
for v in validators:
f = v(f)
f.api = n
f.async = async_support
if return_type:
f.return_type = return_type
setattr(cls, n_, f)
except AttributeError:
log.warn("API call: %s not found" % n)
ro_attrs = getattr(cls, '%s_attr_ro' % api_cls, []) \
+ cls.Base_attr_ro
rw_attrs = getattr(cls, '%s_attr_rw' % api_cls, []) \
+ cls.Base_attr_rw
methods = getattr(cls, '%s_methods' % api_cls, []) \
+ cls.Base_methods
funcs = getattr(cls, '%s_funcs' % api_cls, []) \
+ cls.Base_funcs
# wrap validators around readable class attributes
for attr_name in ro_attrs + rw_attrs:
doit('%s.get_%s' % (api_cls, attr_name), True,
async_support=False)
# wrap validators around writable class attrributes
for attr_name in rw_attrs:
doit('%s.set_%s' % (api_cls, attr_name), True,
async_support=False)
setter_event_wrapper(api_cls, attr_name)
# wrap validators around methods
for method_name, return_type in methods:
doit('%s.%s' % (api_cls, method_name), True,
async_support=True)
# wrap validators around class functions
for func_name, return_type in funcs:
doit('%s.%s' % (api_cls, func_name), False,
async_support=True,
return_type=return_type)
ctor_event_wrapper(api_cls)
dtor_event_wrapper(api_cls)
_decorate = classmethod(_decorate)
def __init__(self, auth):
self.auth = auth
def host_init_structs(self):
'''
@author: wuyuewen
@summary: Init Host structs at Xend start, then sync with other Host in Pool. Contain SRs VMs info.
@return: host_structs.
@rtype: dict.
'''
_host_structs = {}
host_ref = XendNode.instance().uuid
_host_structs[host_ref] = {}
_host_structs[host_ref]['ip'] = getip.get_current_ipaddr()#host_ip
_host_structs[host_ref]['name_label'] = XendNode.instance().name
_host_structs[host_ref]['isOn'] = True
_host_structs[host_ref]['SRs'] = {}
_host_structs[host_ref]['VMs'] = {}
_host_structs[host_ref]['host_metrics'] = XendNode.instance().host_metrics_uuid
# srs and vdis
_host_structs[host_ref]['SRs'] = XendNode.instance().get_all_sr_uuid()
#log.debug('----------> sr vdis: ')
#log.debug(XendNode.instance().srs.values())
vdis = [sr.get_vdis() for sr in XendNode.instance().srs.values()]
_host_structs[host_ref]['VDIs'] = reduce(lambda x, y: x + y, vdis)
# vms and consoles
for d in XendDomain.instance().list('all'):
vm_uuid = d.get_uuid()
if cmp(vm_uuid, DOM0_UUID) == 0:
continue
dom = XendDomain.instance().get_vm_by_uuid(vm_uuid)
_host_structs[host_ref]['VMs'][vm_uuid] = {}
_host_structs[host_ref]['VMs'][vm_uuid]['consoles'] = []
for console in dom.get_consoles():
_host_structs[host_ref]['VMs'][vm_uuid]['consoles'].append(console)
return _host_structs
Base_attr_ro = ['uuid']
Base_attr_rw = ['name_label', 'name_description']
Base_methods = [('get_record', 'Struct')]
Base_funcs = [('get_all', 'Set'), ('get_by_uuid', None), ('get_all_records', 'Set')]
# Xen API: Class host
# ----------------------------------------------------------------
host_attr_ro = ['software_version',
'resident_VMs',
'PBDs',
'PIFs',
'PPCIs',
'PSCSIs',
'PSCSI_HBAs',
'host_CPUs',
'host_CPU_record',
'cpu_configuration',
'metrics',
'capabilities',
'supported_bootloaders',
'sched_policy',
'API_version_major',
'API_version_minor',
'API_version_vendor',
'API_version_vendor_implementation',
'enabled',
'resident_cpu_pools',
'address',
'all_fibers',
'all_usb_scsi',
'avail_fibers',
'avail_usb_scsi',
'bridges',
'interfaces',
'zpool_can_import',
'vm_sr_record',
'memory_manufacturer',]
host_attr_rw = ['name_label',
'name_description',
'other_config',
'logging',
'in_pool',
'is_Master',
'is_Backup',
'SRs',
'ha']
host_methods = [('disable', None),
('enable', None),
('reboot', None),
('shutdown', None),
('add_to_other_config', None),
('remove_from_other_config', None),
('dmesg', 'String'),
('dmesg_clear', 'String'),
('get_log', 'String'),
('send_debug_keys', None),
('tmem_thaw', None),
('tmem_freeze', None),
('tmem_flush', None),
('tmem_destroy', None),
('tmem_list', None),
('tmem_set_weight', None),
('tmem_set_cap', None),
('tmem_set_compress', None),
('tmem_query_freeable_mb', None),
('tmem_shared_auth', None),
('add_host', None),
('copy', None),
('import_zpool', None),
('export_zpool', None),
('gen_license', 'String'),
('verify_license', bool),
('enable_vxlan', bool),
('disable_vxlan', bool),
]
host_funcs = [('get_by_name_label', 'Set(host)'),
('list_methods', None),
('get_self', 'String'),
('create_uuid', 'String'),
('migrate_update_add', None),
('migrate_update_del', None),
('join_add', None),
('get_structs', 'Map'),
('rsync_structs', 'Map'),
('update_structs', 'Map'),
('set_ha', None),
('get_ha', None),
('start_per', None),
('stop_per', None),
('connect_get_all', 'Map'),
('get_record_lite', 'Set'),
('firewall_allow_ping', bool),
('firewall_deny_ping', bool),
# ('firewall_set_rule', bool),
# ('firewall_del_rule', bool),
('firewall_set_rule_list', bool),
('firewall_del_rule_list', bool),
('bind_outer_ip', bool),
('unbind_outer_ip', bool),
('bind_ip_mac', bool),
('unbind_ip_mac', bool),
('limit_add_class', bool),
('limit_del_class', bool),
('limit_add_ip', bool),
('limit_del_ip', bool),
('route_add_eth', bool),
('route_del_eth', bool),
('add_subnet', bool),
('del_subnet', bool),
('assign_ip_address', 'String'),
('add_port_forwarding', bool),
('del_port_forwarding', bool),
('add_PPTP', bool),
('del_PPTP', bool),
('add_open_vpn', bool),
('del_open_vpn', bool),
('add_IO_limit', bool),
('del_IO_limit', bool),
('check_SR', bool),
('active_SR', bool),
('set_load_balancer', bool),
('migrate_template', 'VM'),
]
# add by wufan
def host_connect_get_all(self, session):
'''
@author: wuyuewen
@summary: Internal method.
'''
host_all_records = {}
VM_all_records = {}
SR_all_records = {}
sr_uuids = []
import datetime
for host_ref in BNPoolAPI.get_hosts():
remote_ip = BNPoolAPI.get_host_ip(host_ref)
log.debug('=================get all record remote ip: %s' % remote_ip)
time1 = datetime.datetime.now()
# get all records on host
all_records = xen_rpc_call(remote_ip, "host_get_vm_sr_record", host_ref, sr_uuids).get('Value')
if all_records :
host_all_records.update(all_records.get('host_record', {}))
VM_all_records.update(all_records.get('vm_records', {}))
SR_all_records.update(all_records.get('sr_records', {}))
sr_uuids = SR_all_records.keys()
time2 = datetime.datetime.now()
log.debug('get all records of host: cost time %s' % (time2-time1))
# sr mount_all
xen_rpc_call(remote_ip, 'Async.SR_mount_all')
time3 = datetime.datetime.now()
log.debug('mount_all on host: cost time %s' % (time3-time2))
res_records = {'host_records': host_all_records, 'VM_records': VM_all_records, 'SR_records':SR_all_records}
return xen_api_success(res_records)
# get the host,vm and sr records on the host
def host_get_vm_sr_record(self, session, host_ref, sr_uuids):
'''
@author: wuyuewen
@summary: Internal method.
'''
log.debug('get_host_vm_sr_records')
host_record = {}
vm_records = {}
sr_records = {}
log.debug('get host record')
host_record[host_ref] = self._host_get_record(session, host_ref).get('Value')
import datetime
time1 = datetime.datetime.now()
# get vm records
#all_vms = self._VM_get_all(session).get('Value')
all_vms = [d.get_uuid() for d in XendDomain.instance().list('all')
if d.get_uuid() != DOM0_UUID]
for vm_ref in all_vms:
try:
vm_res = self._VM_get_record(session, vm_ref)
if vm_res.get('Status') == 'Success':
vm_record = vm_res.get('Value')
vm_records[vm_ref] = vm_record
except Exception, exn:
log.debug(exn)
time2 = datetime.datetime.now()
log.debug('get all vm records, cost time: %s' % (time2 - time1))
# get sr records
#all_srs = self._SR_get_all(session).get('Value')
xennode = XendNode.instance()
srs = xennode.get_all_sr_uuid()
all_srs = list(set(srs).difference(set(sr_uuids)))
for sr_ref in all_srs:
try:
# sr_res = self._SR_get_record(session, sr_ref)
# if sr_res.get('Status') == 'Success':
# sr_record = sr_res.get('Value')
sr = xennode.get_sr(sr_ref)
if sr:
sr_records[sr_ref] = sr.get_record()
except Exception, exn:
log.debug(exn)
time3 = datetime.datetime.now()
log.debug('get all sr records, cost time: %s' % (time3 - time2))
all_records = {'host_record' : host_record, 'vm_records': vm_records, 'sr_records': sr_records}
#log.debug('================> sr records')
#log.debug(sr_records)
return xen_api_success(all_records)
def host_set_ha(self, session, host_ref, value):
'''
@author: wuyuewen
@summary: Set Host HA enable or not.
@param session: session of RPC.
@param host_ref: Host's uuid
@param value: True | False
@return: void
@rtype: dict.
'''
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._host_set_ha(session, host_ref, value)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "host_set_ha", host_ref, value)
def _host_set_ha(self, session, host_ref, value):
'''
@author: wuyuewen
@summary: Internal method.
@see: host_set_ha
'''
BNPoolAPI._ha_enable = value
ha_config = "false"
if BNPoolAPI._ha_enable:
ha_config = "true"
f = open("/etc/xen/pool_ha_enable", "w")
f.write(ha_config)
f.close()
return xen_api_success_void()
def host_get_ha(self, session, host_ref):
'''
@author: wuyuewen
@summary: Get Host HA.
@param session: session of RPC.
@param host_ref: Host's uuid
@return: True | False
@rtype: dict.
'''
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._host_get_ha(session, host_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "host_get_ha", host_ref)
def _host_get_ha(self, session, host_ref):
'''
@author: wuyuewen
@summary: Internal method.
@see: host_get_ha
'''
return xen_api_success(BNPoolAPI._ha_enable)
def host_start_per(self, session, host_ref):
'''
@author: wuyuewen
@summary: Start Host performance monitor function.
@param session: session of RPC.
@param host_ref: Host's uuid
@return: void
@rtype: dict.
'''
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._host_start_per(session, host_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "host_start_per", host_ref)
def _host_start_per(self, session, host_ref):
'''
@author: wuyuewen
@summary: Internal method.
@see: host_start_per
'''
from xen.xend import Performance
self.rp = Performance.RunPerformance()
self.rp.start()
# Performance.main()
return xen_api_success_void()
def host_stop_per(self, session, host_ref):
'''
@author: wuyuewen
@summary: Stop Host performance monitor function.
@param session: session of RPC.
@param host_ref: Host's uuid
@return: void
@rtype: dict.
'''
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._host_stop_per(session, host_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "host_stop_per", host_ref)
def _host_stop_per(self, session, host_ref):
'''
@author: wuyuewen
@summary: Internal method.
@see: host_stop_per
'''
self.rp.stop()
# Performance.main()
return xen_api_success_void()
def host_get_structs(self, session):
'''
@author: wuyuewen
@summary: Internal method.
'''
#self.host_init_structs()
host_ref = XendNode.instance().uuid
struct = None
try:
struct = BNPoolAPI._host_structs
except KeyError:
log.exception('key error')
return xen_api_success(struct)
"""
collect the latest state on the machine
return as host_structs
"""
def host_rsync_structs(self, session):
'''
@author: wuyuewen
@summary: Internal method.
'''
#self.host_init_structs()
#host_ref = XendNode.instance().uuid
#struct = None
#try:
# struct = BNPoolAPI._host_structs
#except KeyError:
# log.exception('key error')
struct = self.host_init_structs()
return xen_api_success(struct)
def host_update_structs(self, session):
"""
@author: update the host's state
@summary: NOTE: do not call this function when the host is master,
because this function only update the state of current host
"""
structs = self.host_init_structs()
BNPoolAPI._host_structs = structs
return xen_api_success(structs)
def host_get_SRs(self, session, host_ref):
'''
@author: wuyuewen
@summary: Get SRs(storage) attached to Host.
@param session: session of RPC.
@param host_ref: Host's uuid
@return: list of SRs
@rtype: dict.
'''
return xen_api_success(BNPoolAPI._host_structs[host_ref]['SRs'])
def host_set_SRs(self, session, host_ref, srs):
'''
@author: wuyuewen
@summary: Internal method.
'''
XendNode.instance().set_SRs(srs)
return xen_api_success_void()
'''
check whether sr_uuid is in use
return : (is_valid, if_need_to_create)
sr object == 3 and uuid & location matches return True, do not need to create
sr object == 0 return True, need to create
else return False, donot need to create
'''
def _host_check_SR_valid(self, session, uuid_to_location):
'''
@author: wuyuewen
@summary: Internal method.
'''
all_srs = XendNode.instance().get_all_sr_uuid()
sr_uuids = uuid_to_location.keys()
sr_locations = uuid_to_location.values()
sr_uuid_in_memory = [] # sr uuid of the location in memory
for sr_uuid in all_srs:
sr = XendNode.instance().get_sr(sr_uuid)
if sr.location in sr_locations:
sr_uuid_in_memory.append(sr_uuid)
if len(set(sr_uuid_in_memory)) != 0:
uuid_check = list(set(sr_uuid_in_memory) & set(sr_uuids))
if len(uuid_check) == 3: # uuid and location matches
return (True, False)
else: # uuid and location not match
return (False, False)
assert len(sr_uuids) == 3
existed_srs = list(set(all_srs) & set(sr_uuids))
log.debug('existed srs: %s' % existed_srs)
if len(existed_srs) == 0:
return (True, True)
else:
return (False, False)
# for sr_uuid, sr_location in uuid_to_location.items():
# sr = XendNode.instance().get_sr(sr_uuid)
# log.debug('sr uuid (%s) , sr_location(%s), sr_in memeory location(%s)' % (sr_uuid, sr_location, sr.location))
# if cmp(sr_location, sr.location) != 0:
# need_to_create = False
# return (False, need_to_create)
# return (True, False)
'''
give filesystem type and sr_type
return type when create sr need
'''
def _host_get_sr_type(self, fs_type, sr_type):
'''
@author: wuyuewen
@summary: Internal method.
'''
API_ALL_TYPE = ['iso', 'ha', 'disk']
API_SR_TYPE = {'iso': 'gpfs_iso', 'ha': 'gpfs_ha'}
API_FS_TYPE = {'mfs': 'mfs', 'bfs': 'mfs', 'ocfs2': 'ocfs2', 'local_ocfs2': 'ocfs2', 'ceph': 'ceph'} # sr_type : disk
if sr_type not in API_ALL_TYPE:
return ''
# sr type is iso or ha
if sr_type in API_SR_TYPE:
type = API_SR_TYPE.get(sr_type, '')
# sr type is disk
else:
type = API_FS_TYPE.get(fs_type, '')
log.debug('sr object type: %s' % type)
return type
'''
create sr object on host
'''
def host_create_SR_object(self, session, sr_uuid, path, fs_type, sr_type):
'''
@author: wuyuewen
@summary: Internal method.
'''
type = self._host_get_sr_type(fs_type, sr_type)
if not type:
log.debug('sr type( %s %s) error!' %(fs_type, sr_type))
return False
location = '%s/%s' %(path, sr_type)
deviceConfig = {}
deviceConfig['uuid'] = sr_uuid
deviceConfig['location'] = location
namelabel ='%s_%s' % (sr_type, sr_uuid[:8])
nameDescription = location
try:
uuid = XendNode.instance().create_sr(deviceConfig, '', namelabel, nameDescription, type, '', True, {})
assert sr_uuid == uuid
log.debug('create sr (%s %s %s %s) succeed!' % (sr_uuid, path, fs_type, sr_type))
return True
except Exception, exn:
log.debug(exn)
return False
'''
after host_check_sr is true, create sr object in memory for use
'''
# def host_active_SR(self, session, host_ref, disk_uuid, iso_uuid, ha_uuid, path, fs_type):
# '''
# @author: wuyuewen
# @summary: Internal method.
# '''
# if BNPoolAPI._isMaster:
# if cmp(host_ref, XendNode.instance().uuid) == 0:
# return self._host_active_SR(session, disk_uuid, iso_uuid, ha_uuid, path, fs_type)
# else:
# remote_ip = BNPoolAPI.get_host_ip(host_ref)
# return xen_rpc_call(remote_ip, 'host_active_SR', host_ref, disk_uuid, iso_uuid, ha_uuid, path, fs_type)
# else:
# return self._host_active_SR(session, disk_uuid, iso_uuid, ha_uuid, path, fs_type)
def host_active_SR(self, session, disk_uuid, iso_uuid, ha_uuid, path, fs_type):
log.debug('call xenapi>>>>>>host active SR')
srs = XendNode.instance().get_all_sr_uuid()
# log.debug('XendNode get srs>>>>>>>>')
# log.debug(srs)
uuid_to_location = {}
uuid_to_location[disk_uuid] = '%s/disk' % path
uuid_to_location[iso_uuid] = '%s/iso' % path
uuid_to_location[ha_uuid] = '%s/ha' % path
res, need_to_create = self._host_check_SR_valid(session, uuid_to_location)
log.debug('host_check_SR_valid: %s need to create: %s' % (res, need_to_create))
if not res: # False
return xen_api_success(False)
if not need_to_create:
return xen_api_success(True)
try:
if not self.host_create_SR_object(session, disk_uuid, path, fs_type, 'disk'):
return xen_api_success(False)
if not self.host_create_SR_object(session, iso_uuid, path, fs_type, 'iso'):
return xen_api_success(False)
if not self.host_create_SR_object(session, ha_uuid, path, fs_type, 'ha'):
return xen_api_success(False)
return xen_api_success(True)
except Exception, exn:
log.debug(exn)
return xen_api_success(False)
'''
check whether sr(ip, path, type) is mounted on the host(ip)
cases:
mfs,bfs need ip but if the value isn't given , ip will not be check
ocfs2 do not need ip,, if ip is not '', return false
'''
# def host_check_SR(self, session, host_ref, ip, path, sr_type):
# '''
# @author: wuyuewen
# @summary: Internal method.
# '''
# log.debug('host_check_SR....')
# if BNPoolAPI._isMaster:
# if cmp(host_ref, XendNode.instance().uuid) == 0:
# return self._host_check_SR(session, ip, path, sr_type)
# else:
# remote_ip = BNPoolAPI.get_host_ip(host_ref)
# return xen_rpc_call(remote_ip, 'host_check_SR', host_ref, ip, path, sr_type)
# else:
# return self._host_check_SR(session, ip, path, sr_type)
def host_check_SR(self, session, ip, path, sr_type):
'''
@author: wuyuewen
@summary: Internal method.
'''
is_sr_mount = XendNode.instance()._SR_check_is_mount(ip, path, sr_type)
if is_sr_mount:
return xen_api_success(True)
else:
return xen_api_success(False)
def host_create_uuid(self, session):
'''
@deprecated: not used
'''
return xen_api_success(genuuid.gen_regularUuid())
def host_get_self(self, session):
'''
@deprecated: not used
'''
return xen_api_success(XendNode.instance().uuid)
def host_get_by_uuid(self, session, uuid):
'''
@author: wuyuewen
@summary: Get Host by uuid.
@param session: session of RPC.
@param host_ref: Host's uuid
@return: Host
@rtype: dict.
@raise xen_api_error: XEND_ERROR_UUID_INVALID
'''
if uuid not in BNPoolAPI.get_hosts():
XEND_ERROR_UUID_INVALID.append(type(uuid).__name__)
XEND_ERROR_UUID_INVALID.append(uuid)
return xen_api_error(XEND_ERROR_UUID_INVALID)
return xen_api_success(uuid)
def host_get_in_pool(self, session, host_ref):
'''
@author: wuyuewen
@summary: Check if Host in Pool.
'''
return xen_api_success(BNPoolAPI._inPool)
def host_set_in_pool(self, session, host_ref, is_in):
'''
@deprecated: not used
'''
BNPoolAPI._inPool = is_in
return xen_api_success_void()
def host_get_is_Master(self, session, host_ref):
'''
@author: wuyuewen
@summary: Check if Host is master.
'''
return xen_api_success(BNPoolAPI._isMaster)
def host_set_is_Master(self, session, host_ref, master):
'''
@deprecated: not used
'''
BNPoolAPI._isMaster = master
return xen_api_success_void()
def host_get_is_Backup(self, session, host_ref):
'''
@author: wuyuewen
@summary: Check if Host is backup.
'''
return xen_api_success(BNPoolAPI._isBackup)
def host_set_is_Backup(self, session, host_ref):
'''
@deprecated: not used
'''
#BNPoolAPI._isBackup = backup
BNPoolAPI.pool_make_backup()
return xen_api_success_void()
# host_add_host:
# add another host to this host
# the first time this method is called make this host to be the master node
def host_add_host(self, session, host_ref, slaver_ref, slaver_host_structs):
'''
@deprecated: not used
'''
if BNPoolAPI._host_structs.has_key(slaver_ref):
return xen_api_error("This host has been in the pool")
# become master if not, I'm not sure it should work here
if not BNPoolAPI._isMaster:
log.debug("make master")
BNPoolAPI.pool_make_master()
# update data structs
BNPoolAPI.update_data_struct("host_add", slaver_host_structs)
return xen_api_success_void()
def host_copy(self, session, host_ref, master_ref, host_structs):#, VM_to_Host, consoles_to_VM, sr_to_host):
'''
@deprecated: not used
'''
log.debug('backup start copy')
BNPoolAPI._host_structs = host_structs
# log.debug('%s' % host_structs)
BNPoolAPI.set_master(master_ref)
log.debug('backup finish copy')
return xen_api_success_void()
def host_get_address(self, session, host_ref):
'''
@author: wuyuewen
@summary: Get Host ip address, if Host has several ip address, return the ip of which set in /etc/xen/setting.conf.
'''
return xen_api_success(BNPoolAPI.get_host_ip(host_ref))
# attributes
def host_get_name_label(self, session, host_ref):
'''
@author: wuyuewen
@summary: Get Host's name label.
@param session: session of RPC.
@param host_ref: Host's uuid
@return: Host
@rtype: dict.
'''
if cmp(host_ref, XendNode.instance().uuid) == 0:
log.debug(host_ref)
return self._host_get_name_label(session, host_ref)
else:
log.debug(host_ref)
host_ip = BNPoolAPI.get_host_ip(host_ref)
#log.debug("host ip : " + host_ip)
return xen_rpc_call(host_ip, 'host_get_name_label', host_ref)
def _host_get_name_label(self, session, host_ref):
'''
@author: wuyuewen
@summary: Internal method.
@see: host_get_name_label
'''
return xen_api_success(XendNode.instance().get_name())
def host_set_name_label(self, session, host_ref, new_name):
'''
@author: wuyuewen
@summary: Set Host's name label.
@precondition: Only support english, param has no special character except "_" "-" ".".
@param session: session of RPC.
@param host_ref: Host's uuid
@param new_name: new name of Host
@return: True | False
@rtype: dict.
'''
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._host_set_name_label(session, host_ref, new_name)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'host_set_name_label', host_ref, new_name)
def _host_set_name_label(self, session, host_ref, new_name):
'''
@author: wuyuewen
@summary: Internal method.
@see: host_set_name_label
'''
XendNode.instance().set_name(new_name)
XendNode.instance().save()
return xen_api_success_void()
def host_get_name_description(self, session, host_ref):
'''
@author: wuyuewen
@summary: Get Host's name description.
@param session: session of RPC.
@param host_ref: Host's uuid
@return: name description of Host
@rtype: dict.
'''
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._host_get_name_description(session, host_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'host_get_name_description', host_ref)
def _host_get_name_description(self, session, host_ref):
'''
@author: wuyuewen
@summary: Internal method.
@see: host_get_name_description
'''
return xen_api_success(XendNode.instance().get_description())
def host_set_name_description(self, session, host_ref, new_desc):
'''
@author: wuyuewen
@summary: Set Host's name description.
@precondition: Only support english, param has no special character except "_" "-" ".".
@param session: session of RPC.
@param host_ref: Host's uuid
@param new_desc: new description of Host
@return: True | False
@rtype: dict.
'''
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._host_set_name_description(session, host_ref, new_desc)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'host_set_name_description', host_ref, new_desc)
def _host_set_name_description(self, session, host_ref, new_desc):
'''
@author: wuyuewen
@summary: Internal method.
@see: host_set_name_description
'''
XendNode.instance().set_description(new_desc)
XendNode.instance().save()
return xen_api_success_void()
def host_get_other_config(self, session, host_ref):
'''
@author: wuyuewen
@summary: Internal method.
'''
return xen_api_success(XendNode.instance().other_config)
def host_set_other_config(self, session, host_ref, other_config):
'''
@author: wuyuewen
@summary: Internal method.
'''
node = XendNode.instance()
node.other_config = dict(other_config)
node.save()
return xen_api_success_void()
def host_add_to_other_config(self, session, host_ref, key, value):
'''
@author: wuyuewen
@summary: Internal method.
'''
node = XendNode.instance()
node.other_config[key] = value
node.save()
return xen_api_success_void()
def host_remove_from_other_config(self, session, host_ref, key):
'''
@author: wuyuewen
@summary: Internal method.
'''
node = XendNode.instance()
if key in node.other_config:
del node.other_config[key]
node.save()
return xen_api_success_void()
def host_get_software_version(self, session, host_ref):
'''
@author: wuyuewen
@summary: Internal method.
'''
return xen_api_success(XendNode.instance().xen_version())
def host_get_enabled(self, session, host_ref):
'''
@author: wuyuewen
@summary: Internal method.
'''
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._host_get_enabled(session, host_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'host_get_enabled', host_ref)
def _host_get_enabled(self, session, host_ref):
'''
@author: wuyuewen
@summary: Internal method.
'''
return xen_api_success(XendDomain.instance().allow_new_domains())
def host_get_resident_VMs(self, session, host_ref):
'''
@author: wuyuewen
@summary: Internal method.
'''
return xen_api_success(XendDomain.instance().get_domain_refs())
def host_get_PIFs(self, session, ref):
'''
@deprecated: not used
'''
return xen_api_success(XendNode.instance().get_PIF_refs())
def host_get_PPCIs(self, session, ref):
'''
@deprecated: not used
'''
return xen_api_success(XendNode.instance().get_PPCI_refs())
def host_get_PSCSIs(self, session, ref):
'''
@deprecated: not used
'''
return xen_api_success(XendNode.instance().get_PSCSI_refs())
def host_get_PSCSI_HBAs(self, session, ref):
'''
@deprecated: not used
'''
return xen_api_success(XendNode.instance().get_PSCSI_HBA_refs())
def host_get_host_CPUs(self, session, host_ref):
'''
@author: wuyuewen
@summary: Get Host's CPUs uuid.
@param session: session of RPC.
@param host_ref: Host's uuid
@return: list of CPUs uuid
@rtype: dict.
'''
if BNPoolAPI._isMaster:
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._host_get_host_CPUs(session, host_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'host_get_host_CPUs', host_ref)
else:
return self._host_get_host_CPUs(session, host_ref)
def _host_get_host_CPUs(self, session, host_ref):
'''
@author: wuyuewen
@summary: Internal method.
@see: host_get_host_CPUs
'''
return xen_api_success(XendNode.instance().get_host_cpu_refs())
def host_get_host_CPU_record(self, session, host_ref, cpu_ref):
'''
@author: wuyuewen
@summary: Get Host CPU's record.
@param session: session of RPC.
@param host_ref: Host's uuid
@param cpu_ref: Host CPU's uuid
@return: record
@rtype: dict.
'''
if BNPoolAPI._isMaster:
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._host_get_host_CPU_record(session, cpu_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'host_get_host_CPU_record', host_ref, cpu_ref)
else:
return self._host_get_host_CPU_record(session, cpu_ref)
def _host_get_host_CPU_record(self, session, cpu_ref):
'''
@author: wuyuewen
@summary: Internal method.
@see: host_get_host_CPU_record
'''
return self.host_cpu_get_record(session, cpu_ref)
def host_get_zpool_can_import(self, session, host_ref):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._host_get_zpool_can_import(session, host_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'host_get_zpool_can_import', host_ref)
else:
return self._host_get_zpool_can_import(session, host_ref)
def _host_get_zpool_can_import(self, session, host_ref):
'''
@deprecated: not used
'''
xennode = XendNode.instance()
return xen_api_success(xennode.get_zpool_can_import())
def host_import_zpool(self, session, host_ref, zpool_name):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._host_import_zpool(session, host_ref, zpool_name)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'host_import_zpool', host_ref, zpool_name)
else:
return self._host_import_zpool(session, host_ref, zpool_name)
def _host_import_zpool(self, session, host_ref, zpool_name):
'''
@deprecated: not used
'''
try:
xennode = XendNode.instance()
xennode.import_zpool(zpool_name)
return xen_api_success_void()
except Exception, exn:
return xen_api_error(['ZPOOL_IMPORT_ERROR', zpool_name])
def host_get_metrics(self, _, ref):
'''
@deprecated: not used
'''
if BNPoolAPI._isMaster:
if cmp(ref, XendNode.instance().uuid) == 0:
return self._host_get_metrics(_, ref)
else:
host_ip = BNPoolAPI.get_host_ip(ref)
return xen_rpc_call(host_ip, 'host_get_metrics', ref)
else:
return self._host_get_metrics(_, ref)
def _host_get_metrics(self, _, ref):
'''
@deprecated: not used
'''
return xen_api_success(XendNode.instance().host_metrics_uuid)
def host_get_capabilities(self, session, host_ref):
'''
@deprecated: not used
'''
return xen_api_success(XendNode.instance().get_capabilities())
def host_get_supported_bootloaders(self, session, host_ref):
'''
@deprecated: not used
'''
return xen_api_success(['pygrub'])
def host_get_sched_policy(self, _, host_ref):
'''
@deprecated: not used
'''
return xen_api_success(XendNode.instance().get_vcpus_policy())
def host_get_cpu_configuration(self, _, host_ref):
'''
@deprecated: not used
'''
return xen_api_success(XendNode.instance().get_cpu_configuration())
def host_set_logging(self, _, host_ref, logging):
'''
@deprecated: not used
'''
return xen_api_todo()
def host_get_logging(self, _, host_ref):
'''
@deprecated: not used
'''
return xen_api_todo()
# object methods
def host_disable(self, session, host_ref):
'''
@deprecated: not used
'''
XendDomain.instance().set_allow_new_domains(False)
return xen_api_success_void()
def host_enable(self, session, host_ref):
'''
@deprecated: not used
'''
XendDomain.instance().set_allow_new_domains(True)
return xen_api_success_void()
def host_reboot(self, session, host_ref):
'''
@deprecated: not used
'''
if not XendDomain.instance().allow_new_domains():
return xen_api_error(XEND_ERROR_HOST_RUNNING)
return xen_api_error(XEND_ERROR_UNSUPPORTED)
def host_shutdown(self, session, host_ref):
'''
@deprecated: not used
'''
if not XendDomain.instance().allow_new_domains():
return xen_api_error(XEND_ERROR_HOST_RUNNING)
return xen_api_error(XEND_ERROR_UNSUPPORTED)
def host_dmesg(self, session, host_ref):
'''
@author: wuyuewen
@summary: Get Xen dmesg information.
@param session: session of RPC.
@param host_ref: Host's uuid
@return: dmesg of Xen
@rtype: dict.
'''
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._host_dmesg(session, host_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'host_dmesg', host_ref)
def _host_dmesg(self, session, host_ref):
'''
@author: wuyuewen
@summary: Internal method.
@see: host_dmesg
'''
return xen_api_success(XendDmesg.instance().info())
def host_dmesg_clear(self, session, host_ref):
return xen_api_success(XendDmesg.instance().clear())
def host_get_log(self, session, host_ref):
'''
@author: wuyuewen
@summary: Get Xend log buffer.
@param session: session of RPC.
@param host_ref: Host's uuid
@return: log of Xend
@rtype: dict.
'''
log_file = open(XendLogging.getLogFilename())
log_buffer = log_file.read()
log_buffer = log_buffer.replace('\b', ' ')
log_buffer = log_buffer.replace('\f', '\n')
log_file.close()
return xen_api_success(log_buffer)
def host_send_debug_keys(self, _, host_ref, keys):
'''
@deprecated: not used
'''
node = XendNode.instance()
node.send_debug_keys(keys)
return xen_api_success_void()
def host_get_all_usb_scsi(self, session, host_ref):
'''
@author: wuyuewen
@summary: Get all usb scsi devices on this Host.
@param session: session of RPC.
@param host_ref: Host's uuid
@return: list of fibers
@rtype: dict.
@raise xen_api_error: CANNOT_GET_USB
'''
if BNPoolAPI._isMaster:
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._host_get_all_usb_scsi(session, host_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'host_get_all_usb_scsi', host_ref)
else:
return self._host_get_all_usb_scsi(session, host_ref)
def _host_get_all_usb_scsi(self, session, host_ref):
'''
@author: wuyuewen
@summary: Internal method.
@see: host_get_all_usb_scsi
'''
try:
node = XendNode.instance()
fibers = node.get_usb_scsi()
return xen_api_success(fibers)
except Exception, exn:
log.error(exn)
return xen_api_error(['CANNOT_GET_FIBERS', exn])
def host_get_all_fibers(self, session, host_ref):
'''
@author: wuyuewen
@summary: Get all fibers devices on this Host.
@param session: session of RPC.
@param host_ref: Host's uuid
@return: list of fibers
@rtype: dict.
@raise xen_api_error: CANNOT_GET_FIBERS
'''
if BNPoolAPI._isMaster:
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._host_get_all_fibers(session, host_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'host_get_all_fibers', host_ref)
else:
return self._host_get_all_fibers(session, host_ref)
def _host_get_all_fibers(self, session, host_ref):
'''
@author: wuyuewen
@summary: Internal method.
@see: host_get_all_fibers
'''
try:
node = XendNode.instance()
fibers = node.get_fibers()
return xen_api_success(fibers)
except Exception, exn:
log.error(exn)
return xen_api_error(['CANNOT_GET_FIBERS', exn])
def host_get_avail_usb_scsi(self, session, host_ref):
'''
@author: wuyuewen
@summary: Get all available usb scsi devices on this Host.
@param session: session of RPC.
@param host_ref: Host's uuid
@return: list of fibers
@rtype: dict.
@raise xen_api_error: CANNOT_GET_AVAIL_FIBERS
'''
if BNPoolAPI._isMaster:
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._host_get_avail_usb_scsi(session, host_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'host_get_avail_usb_scsi', host_ref)
else:
return self._host_get_avail_usb_scsi(session, host_ref)
def _host_get_avail_usb_scsi(self, session, host_ref):
'''
@author: wuyuewen
@summary: Internal method.
@see: host_get_avail_usb_scsi
'''
try:
node = XendNode.instance()
response = self._host_get_all_usb_scsi(session, host_ref)
if cmp(response['Status'], 'Failure') == 0:
return response
else:
fibers = response.get('Value')
avail_fibers = []
if fibers and isinstance(fibers, list):
log.debug(fibers)
for fiber in fibers:
if not node.is_usb_scsi_in_use(fiber):
avail_fibers.append(fiber)
return xen_api_success(avail_fibers)
except Exception, exn:
log.error(exn)
return xen_api_error(['CANNOT_GET_AVAIL_USB_SCSI', exn])
def host_get_avail_fibers(self, session, host_ref):
'''
@author: wuyuewen
@summary: Get all available fiber devices on this Host.
@param session: session of RPC.
@param host_ref: Host's uuid
@return: list of fibers
@rtype: dict.
@raise xen_api_error: CANNOT_GET_AVAIL_FIBERS
'''
if BNPoolAPI._isMaster:
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._host_get_avail_fibers(session, host_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'host_get_avail_fibers', host_ref)
else:
return self._host_get_avail_fibers(session, host_ref)
def _host_get_avail_fibers(self, session, host_ref):
'''
@author: wuyuewen
@summary: Internal method.
@see: host_get_avail_fibers
'''
try:
node = XendNode.instance()
response = self._host_get_all_fibers(session, host_ref)
if cmp(response['Status'], 'Failure') == 0:
return response
else:
fibers = response.get('Value')
avail_fibers = []
if fibers and isinstance(fibers, list):
log.debug(fibers)
for fiber in fibers:
if not node.is_fiber_in_use(fiber):
avail_fibers.append(fiber)
return xen_api_success(avail_fibers)
except Exception, exn:
log.error(exn)
return xen_api_error(['CANNOT_GET_AVAIL_FIBERS', exn])
def host_get_bridges(self, session, host_ref):
'''
@author: wuyuewen
@summary: Get all network bridges use on this Host.
@param session: session of RPC.
@param host_ref: Host's uuid
@return: list of network bridges
@rtype: dict.
'''
if BNPoolAPI._isMaster:
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._host_get_bridges(session, host_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'host_get_bridges', host_ref)
else:
return self._host_get_bridges(session, host_ref)
def _host_get_bridges(self, session, host_ref):
'''
@author: wuyuewen
@summary: Internal method.
@see: host_get_bridges
'''
node = XendNode.instance()
return xen_api_success(node.get_bridges())
def host_get_interfaces(self, session, host_ref):
'''
@author: wuyuewen
@summary: Get all network interfaces use on this Host.
@param session: session of RPC.
@param host_ref: Host's uuid
@return: list of network interfaces
@rtype: dict.
'''
if BNPoolAPI._isMaster:
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._host_get_interfaces(session, host_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'host_get_interfaces', host_ref)
else:
return self._host_get_interfaces(session, host_ref)
def _host_get_interfaces(self, session, host_ref):
'''
@author: wuyuewen
@summary: Internal method.
@see: host_get_interfaces
'''
node = XendNode.instance()
return xen_api_success(node.get_interfaces())
def host_enable_vxlan(self, session, host_ref, ovs_name):
'''
@author: wuyuewen
@summary: Enable vxlan and add ovs bridge to vxlan group
@precondition: ovs bridge exists
@param session: session of RPC.
@param host_ref: Host's uuid
@param ovs_name: name of ovs bridge
@return: True | False
@rtype: dict.
'''
if BNPoolAPI._isMaster:
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._host_enable_vxlan(session, host_ref, ovs_name)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'host_enable_vxlan', host_ref, ovs_name)
else:
return self._host_enable_vxlan(session, host_ref, ovs_name)
def _host_enable_vxlan(self, session, host_ref, ovs_name):
'''
@author: wuyuewen
@summary: Internal method.
@see: host_enable_vxlan
'''
xennode = XendNode.instance()
return xen_api_success(xennode.enable_vxlan(ovs_name))
def host_disable_vxlan(self, session, host_ref, ovs_name):
'''
@author: wuyuewen
@summary: Disable vxlan of ovs given
@precondition: ovs bridge exists
@param session: session of RPC.
@param host_ref: Host's uuid
@param ovs_name: name of ovs bridge
@return: True | False
@rtype: dict.
'''
if BNPoolAPI._isMaster:
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._host_disable_vxlan(session, host_ref, ovs_name)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, 'host_disable_vxlan', host_ref, ovs_name)
else:
return self._host_disable_vxlan(session, host_ref, ovs_name)
def _host_disable_vxlan(self, session, host_ref, ovs_name):
'''
@author: wuyuewen
@summary: Internal method.
@see: host_enable_vxlan
'''
xennode = XendNode.instance()
return xen_api_success(xennode.disable_vxlan(ovs_name))
def host_get_record(self, session, host_ref):
'''
@author: wuyuewen
@summary: Get Host record.
@param session: session of RPC.
@param host_ref: Host's uuid
@return: Host record
@rtype: dict.
'''
#log.debug('=================host_get_record:%s' % host_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._host_get_record(session, host_ref)
else:
host_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(host_ip, "host_get_record", host_ref)
def _host_get_record(self, session, host_ref):
'''
@author: wuyuewen
@summary: Internal method.
@see: host_get_record
'''
node = XendNode.instance()
dom = XendDomain.instance()
host_ip_rsp = self.host_get_address(session, host_ref)
if host_ip_rsp.has_key('Value'):
address = host_ip_rsp.get('Value')
record = {'uuid': node.uuid,
'name_label': node.name,
'name_description': '',
'software_version': node.xen_version(),
'enabled': XendDomain.instance().allow_new_domains(),
'other_config': node.other_config,
'resident_VMs': dom.get_domain_refs(),
'host_CPUs': node.get_host_cpu_refs(),
'cpu_configuration': node.get_cpu_configuration(),
'metrics': node.host_metrics_uuid,
'memory_total' : self._host_metrics_get_memory_total(),
'memory_free' : self._host_metrics_get_memory_free(),
'capabilities': node.get_capabilities(),
'supported_bootloaders': ['pygrub'],
'sched_policy': node.get_vcpus_policy(),
'logging': {},
'address' : getip.get_current_ipaddr(),
'is_master' : BNPoolAPI.get_is_master(),
'pool' : BNPoolAPI.get_uuid(),
'in_pool' : BNPoolAPI.get_in_pool(),
}
return xen_api_success(record)
def host_get_record_lite(self, session):
'''
@author: wuyuewen
@summary: Get Host lite record.
@param session: session of RPC.
@return: Host record
@rtype: dict.
'''
node = XendNode.instance()
record_lite = {'uuid': node.uuid,
'in_pool' : BNPoolAPI.get_in_pool(),
}
return xen_api_success(record_lite)
def host_firewall_set_rule_list(self, session, json_obj, ip=None):
'''
@author: wuyuewen
@summary: Set firewall rules on Gateway VM. Gateway VM defined in /etc/xen/setting.conf or set at param<ip>.
@param session: session of RPC.
@param json_obj: firewall rules of json object type
@param ip: Gateway's ip
@return: True | False
@rtype: dict.
'''
flag = Netctl.set_firewall_rule(json_obj, ip)
return xen_api_success(flag)
def host_firewall_del_rule_list(self, session, ip_list, rule_list):
'''
@deprecated: not used
'''
import json
ips = json.loads(ip_list)
rules = json.loads(rule_list)
log.debug('host_firewall_del_list>>>>>')
log.debug(rules)
log.debug(ips)
flag = True
# self.__network_lock__.acquire()
# try:
for ip in ips:
for rule in rules:
protocol = rule.get('protocol', '').lower()
ip_segment = rule.get('IP', '')
if cmp(protocol, 'icmp') == 0:
flag = Netctl.firewall_deny_ping(ip, ip_segment) # to do
elif protocol in ['tcp', 'udp']: # tcp, udp
start_port = rule.get('startPort', '')
end_port = rule.get('endPort', '')
if not start_port or not end_port:
continue
# port = '%s:%s' % (start_port, end_port)
port = end_port
flag = Netctl.del_firewall_rule(protocol, ip, ip_segment, port)
if not flag:
return xen_api_success(flag)
return xen_api_success(flag)
def host_bind_outer_ip(self, session, intra_ip, outer_ip, eth):
'''
@author: wuyuewen
@summary: Set intra/outer ip bonding rules on Gateway VM. Gateway VM defined in /etc/xen/setting.conf.
@param session: session of RPC.
@param intra_ip: intranet ip
@param outer_ip: outernet ip
@param eth: outernet net port on Gateway VM
@return: True | False
@rtype: dict.
'''
self.__network_lock__.acquire()
try:
retv = Netctl.add_nat(intra_ip, outer_ip, eth)
log.debug(retv)
return xen_api_success(retv)
except Exception, exn:
log.exception(exn)
return xen_api_success(False)
finally:
self.__network_lock__.release()
def host_unbind_outer_ip(self, session, intra_ip, outer_ip, eth):
'''
@author: wuyuewen
@summary: Cancel intra/outer ip bonding rules on Gateway VM. Gateway VM defined in /etc/xen/setting.conf.
@param session: session of RPC.
@param intra_ip: intranet ip
@param outer_ip: outernet ip
@param eth: outernet net port on Gateway VM
@return: True | False
@rtype: dict.
'''
self.__network_lock__.acquire()
try:
retv = Netctl.del_nat(intra_ip, outer_ip, eth)
return xen_api_success(retv)
except Exception, exn:
log.exception(exn)
return xen_api_success(False)
finally:
self.__network_lock__.release()
def host_bind_ip_mac(self, session, json_obj):
'''
@author: wuyuewen
@summary: Set intra/mac bonding rules(DHCP) on Gateway VM. Gateway VM defined in /etc/xen/setting.conf.
@param session: session of RPC.
@param json_obj: intra/mac bonding rules of json object type
@return: True | False
@rtype: dict.
'''
self.__network_lock__.acquire()
try:
# log.debug('host bind ip mac>>>>>>>>>')
retv = Netctl.add_mac_bind(json_obj)
# if retv:
# Netctl.set_firewall_rule('tcp', ip, '', '22')
# Netctl.set_firewall_rule('tcp', ip, '', '3389')
# Netctl.firewall_allow_ping(ip, '')
# log.debug('excute host bind ip mac:---> %s' % retv)
return xen_api_success(retv)
except Exception, exn:
log.debug('exception>>>>>>>')
log.exception(exn)
# Netctl.del_mac_bind(json_obj)
return xen_api_success(False)
finally:
self.__network_lock__.release()
def host_unbind_ip_mac(self, session, json_obj):
'''
@author: wuyuewen
@summary: Cancel intra/mac bonding rules(DHCP) on Gateway VM. Gateway VM defined in /etc/xen/setting.conf.
@param session: session of RPC.
@param json_obj: intra/mac bonding rules of json object type
@return: True | False
@rtype: dict.
'''
self.__network_lock__.acquire()
try:
retv = Netctl.del_mac_bind(json_obj)
# if retv:
# Netctl.del_firewall_rule('tcp', ip, '', '22')
# Netctl.del_firewall_rule('tcp', ip, '', '3389')
# Netctl.firewall_deny_ping(ip, '')
return xen_api_success(retv)
except Exception, exn:
log.exception(exn)
return xen_api_success(False)
finally:
self.__network_lock__.release()
def host_limit_add_class(self, session, class_id, speed):
'''
@author: wuyuewen
@summary: Add network speed limit class on Gateway VM. Gateway VM defined in /etc/xen/setting.conf.
@param session: session of RPC.
@param class_id: class id
@param speed: limit speed
@return: True | False
@rtype: dict.
'''
try:
retv = Netctl.limit_add_class(class_id, speed)
return xen_api_success(retv)
except Exception, exn:
log.exception(exn)
return xen_api_success(False)
def host_limit_del_class(self, session, class_id):
'''
@author: wuyuewen
@summary: Del network speed limit class on Gateway VM. Gateway VM defined in /etc/xen/setting.conf.
@param session: session of RPC.
@param class_id: class id
@return: True | False
@rtype: dict.
'''
try:
retv = Netctl.limit_del_class(class_id)
return xen_api_success(retv)
except Exception, exn:
log.exception(exn)
return xen_api_success(False)
def host_limit_add_ip(self, session, ip, class_id):
'''
@author: wuyuewen
@summary: Add ip to a network speed limit class on Gateway VM. Gateway VM defined in /etc/xen/setting.conf.
@param session: session of RPC.
@param ip: ip for speed limit
@param class_id: class id
@return: True | False
@rtype: dict.
'''
try:
retv = Netctl.limit_add_ip(ip, class_id)
return xen_api_success(retv)
except Exception, exn:
log.exception(exn)
return xen_api_success(False)
def host_limit_del_ip(self, session, ip):
'''
@author: wuyuewen
@summary: Delete ip on a network speed limit class on Gateway VM. Gateway VM defined in /etc/xen/setting.conf.
@param session: session of RPC.
@param ip: ip for speed limit
@return: True | False
@rtype: dict.
'''
try:
retv = Netctl.limit_del_ip(ip)
return xen_api_success(retv)
except Exception, exn:
log.exception(exn)
return xen_api_success(False)
def host_route_add_eth(self, session, ip, eth, route_ip, netmask):
'''
@author: wuyuewen
@summary: Add a new network interface in virtual route.
@param session: session of RPC.
@param ip: ip of virtual route
@param eth: interface ethX
@param route_ip: virtual network gateway ip
@param netmask: netmask of interface
@return: True | False
@rtype: dict.
'''
try:
import httplib2
h = httplib2.Http(".cache")
headers = {'x-bws-mac': eth, 'x-bws-ip-address' : route_ip, 'x-bws-netmask' : netmask}
log.debug('route add eth, <ip><eth><route_ip><netmask>=%s, %s, %s, %s' % (ip, eth, route_ip, netmask))
resp, content = h.request("http://%s/Route" % ip, "POST", headers=headers)
status = resp.get('status', '')
if status == '200':
return xen_api_success(True)
else:
log.error("route add eth restful failed! Status: %s, record: %s" % (status, str(headers)))
return xen_api_success(False)
except Exception, exn:
log.exception("route add eth restful exception! %s" % exn)
return xen_api_success(False)
def host_route_del_eth(self, session, ip, eth):
'''
@author: wuyuewen
@summary: Del a network interface in virtual route.
@param session: session of RPC.
@param ip: ip of virtual route
@param eth: interface ethX
@return: True | False
@rtype: dict.
'''
try:
import httplib2
h = httplib2.Http(".cache")
headers = {'x-bws-mac': eth}
log.debug('route del eth, <ip><eth>=%s, %s' % (ip, eth))
resp, content = h.request("http://%s/Route" % ip, "DELETE", headers=headers)
status = resp.get('status', '')
if status == '200':
return xen_api_success(True)
else:
log.error("route del eth restful failed! Status: %s, record: %s" % (status, str(headers)))
return xen_api_success(False)
except Exception, exn:
log.exception("route del eth restful exception! %s" % exn)
return xen_api_success(False)
def host_set_load_balancer(self, session, ip, json_obj):
'''
@author: wuyuewen
@summary: Init load balancer VM's using given config<json_obj>, new config will replace old one.
@param session: session of RPC.
@param ip: ip of load balancer
@param json_obj: config
@return: True | False
@rtype: dict.
'''
try:
import httplib2
log.debug('set load balancer, <ip><rules> = %s,%s' % (ip, json_obj))
h = httplib2.Http(".cache")
resp, content = h.request("http://%s/LoadBalancer" % ip, "PUT", body=json_obj)
status = resp.get('status', '')
if status == '200':
return xen_api_success(True)
else:
log.error("set load balancer restful failed! Status: %s, record: %s" % (status, json_obj))
return xen_api_success(False)
except Exception, exn:
log.exception("set load balancer restful exception! %s" % exn)
return xen_api_success(False)
def host_add_subnet(self, session, ip, json_obj):
'''
@author: wuyuewen
@summary: Add DHCP rules on subnet gateway.
@param session: session of RPC.
@param ip: ip of subnet gateway
@param json_obj: DHCP config
@return: True | False
@rtype: dict.
'''
self.__network_lock__.acquire()
try:
retv = Netctl.add_subnet(ip, json_obj)
return xen_api_success(retv)
except Exception, exn:
log.debug('exception>>>>>>>')
log.exception(exn)
# Netctl.del_subnet(json_obj)
return xen_api_success(False)
finally:
self.__network_lock__.release()
def host_del_subnet(self, session, ip, json_obj):
'''
@author: wuyuewen
@summary: Delete DHCP rules on subnet gateway.
@param session: session of RPC.
@param ip: ip of subnet gateway
@param json_obj: DHCP config
@return: True | False
@rtype: dict.
'''
self.__network_lock__.acquire()
try:
retv = Netctl.del_subnet(ip, json_obj)
return xen_api_success(retv)
except Exception, exn:
log.debug('exception>>>>>>>')
log.exception(exn)
return xen_api_success(False)
finally:
self.__network_lock__.release()
def host_assign_ip_address(self, session, ip, mac, subnet):
'''
@author: wuyuewen
@summary: Set a ip for mac on subnet gateway.
@param session: session of RPC.
@param ip: ip
@param mac: mac
@param subnet: subnet
@return: True | False
@rtype: dict.
'''
self.__network_lock__.acquire()
try:
retv = Netctl.assign_ip_address(ip, mac, subnet)
return xen_api_success(retv)
except Exception, exn:
log.debug('exception>>>>>>>')
log.exception(exn)
return xen_api_success_void()
finally:
self.__network_lock__.release()
def host_add_port_forwarding(self, session, ip, protocol, internal_ip, internal_port, external_ip, external_port):
'''
@author: wuyuewen
@summary: Add a new port forwarding rule on virtual route.
@param session: session of RPC.
@param ip: ip of virtual route
@param protocol: tcp/udp
@param internal_ip: internal ip
@param internal_port: internal port
@param external_ip: external ip
@param external_port: external port
@return: True | False
@rtype: dict.
'''
self.__network_lock__.acquire()
try:
retv = Netctl.add_port_forwarding(ip, protocol, internal_ip, internal_port, external_ip, external_port)
return xen_api_success(retv)
except Exception, exn:
log.debug('exception>>>>>>>')
log.exception(exn)
return xen_api_success(False)
finally:
self.__network_lock__.release()
def host_del_port_forwarding(self, session, ip, protocol, internal_ip, internal_port, external_ip, external_port):
'''
@author: wuyuewen
@summary: Delete a port forwarding rule on virtual route.
@param session: session of RPC.
@param ip: ip of virtual route
@param protocol: tcp/udp
@param internal_ip: internal ip
@param internal_port: internal port
@param external_ip: external ip
@param external_port: external port
@return: True | False
@rtype: dict.
'''
self.__network_lock__.acquire()
try:
retv = Netctl.del_port_forwarding(ip, protocol, internal_ip, internal_port, external_ip, external_port)
return xen_api_success(retv)
except Exception, exn:
log.debug('exception>>>>>>>')
log.exception(exn)
return xen_api_success(False)
finally:
self.__network_lock__.release()
def host_add_PPTP(self, session, ip, json_obj):
'''
@author: wuyuewen
@summary: Add a PPTP(Point to Point Tunneling Protocol) rule on virtual route.
@param session: session of RPC.
@param ip: ip of virtual route
@param json_obj: PPTP rule of json object type
@return: True | False
@rtype: dict.
'''
self.__network_lock__.acquire()
try:
retv = Netctl.add_PPTP(ip, json_obj)
return xen_api_success(retv)
except Exception, exn:
log.debug('exception>>>>>>>')
log.exception(exn)
# Netctl.del_subnet(json_obj)
return xen_api_success(False)
finally:
self.__network_lock__.release()
def host_del_PPTP(self, session, ip):
'''
@author: wuyuewen
@summary: Add a PPTP(Point to Point Tunneling Protocol) rule on virtual route.
@param session: session of RPC.
@param ip: ip of virtual route
@return: True | False
@rtype: dict.
'''
self.__network_lock__.acquire()
try:
retv = Netctl.del_PPTP(ip)
return xen_api_success(retv)
except Exception, exn:
log.debug('exception>>>>>>>')
log.exception(exn)
return xen_api_success(False)
finally:
self.__network_lock__.release()
def host_add_open_vpn(self, session, ip, json_obj):
'''
@author: wuyuewen
@summary: Add a open vpn rule and restart service on virtual route.
@param session: session of RPC.
@param ip: ip of virtual route
@param json_obj: open vpn rule of json object type
@return: True | False
@rtype: dict.
'''
self.__network_lock__.acquire()
try:
retv = Netctl.add_open_vpn(ip, json_obj)
return xen_api_success(retv)
except Exception, exn:
log.debug('exception>>>>>>>')
log.exception(exn)
# Netctl.del_subnet(json_obj)
return xen_api_success(False)
finally:
self.__network_lock__.release()
def host_del_open_vpn(self, session, ip):
'''
@author: wuyuewen
@summary: Delete open vpn rule and restart service on virtual route.
@param session: session of RPC.
@param ip: ip of virtual route
@return: True | False
@rtype: dict.
'''
self.__network_lock__.acquire()
try:
retv = Netctl.del_open_vpn(ip)
return xen_api_success(retv)
except Exception, exn:
log.debug('exception>>>>>>>')
log.exception(exn)
return xen_api_success(False)
finally:
self.__network_lock__.release()
def host_add_IO_limit(self, session, internal_ip, speed):
'''
@deprecated: not used
'''
self.__network_lock__.acquire()
try:
retv = Netctl.add_IO_limit(internal_ip, speed)
return xen_api_success(retv)
except Exception, exn:
log.debug('exception>>>>>>>')
log.exception(exn)
# Netctl.del_subnet(json_obj)
return xen_api_success(False)
finally:
self.__network_lock__.release()
def host_del_IO_limit(self, session, ip):
'''
@deprecated: not used
'''
self.__network_lock__.acquire()
try:
retv = Netctl.del_IO_limit(ip)
return xen_api_success(retv)
except Exception, exn:
log.debug('exception>>>>>>>')
log.exception(exn)
return xen_api_success(False)
finally:
self.__network_lock__.release()
def host_migrate_template(self, session, vm_ref, new_uuid, dest_master_ip):
'''
@author: wuyuewen
@summary: Copy template from a Pool to another Pool, just copy template's config file not disk,
so there are 2 same template(disk are same) in 2 Pool.
WARNING: Do not power on these 2 templates at same time(use same disk, write conflict).
@param session: session of RPC.
@param vm_ref: source template uuid
@param new_uuid: new uuid of clone template
@param dest_master_ip: destination Pool's master ip
@return: True | False
@rtype: dict.
'''
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_vm(vm_ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._host_migrate_template(session, vm_ref, new_uuid, dest_master_ip)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'host_migrate_template', vm_ref, new_uuid, dest_master_ip)
else:
return self._host_migrate_template(session, vm_ref, new_uuid, dest_master_ip)
def _host_migrate_template(self, session, vm_ref, new_uuid, dest_master_ip):
'''
@author: wuyuewen
@summary: Internal method.
@see: host_migrate_template
'''
xendom = XendDomain.instance()
dominfo = xendom.get_vm_by_uuid(vm_ref)
vdis = self._VDI_get_by_vm(session, vm_ref).get('Value')
vm_struct = dominfo.getXenInfo()
if vdis:
for vdi in vdis:
vdi_struct = self._VDI_get_record(session, vdi).get('Value')
log.debug(vdi_struct)
xen_rpc_call(dest_master_ip, 'VDI_create', vdi_struct, False)
if vm_struct:
vm_struct['uuid'] = new_uuid
# vm_struct['name_label'] = str(vm_struct.get('name_label'))
log.debug('_host_migrate_temlate')
log.debug(vm_struct)
return xen_rpc_call(dest_master_ip, 'VM_create_from_vmstruct', vm_struct)
else:
return xen_api_error(['host_migrate_temlate', 'VM: %s' % vm_ref])
def host_gen_license(self, session, host_ref, period):
if BNPoolAPI._isMaster:
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._host_gen_license(session, host_ref, period)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'host_gen_license', host_ref, period)
else:
return self._host_gen_license(session, host_ref, period)
def _host_gen_license(self, session, host_ref, period):
return xen_api_success(LicenseUtil.gen_license(period))
def host_verify_license(self, session, host_ref, license_str):
if BNPoolAPI._isMaster:
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._host_verify_license(session, host_ref, license_str)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'host_verify_license', host_ref, license_str)
else:
return self._host_verify_license(session, host_ref, license_str)
def _host_verify_license(self, session, host_ref, license_str):
return xen_api_success(LicenseUtil.verify_license(license_str))
def host_get_memory_manufacturer(self, session, host_ref):
'''
@author: wuyuewen
@summary: Get Host's memory manufacturer name.
@param session: session of RPC.
@param host_ref: Host's uuid
@return: memory manufacturer name
@rtype: dict.
'''
if BNPoolAPI._isMaster:
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._host_get_memory_manufacturer(session, host_ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
return xen_rpc_call(remote_ip, 'host_get_memory_manufacturer', host_ref)
else:
return self._host_get_memory_manufacturer(session, host_ref)
def _host_get_memory_manufacturer(self, session, host_ref):
'''
@author: wuyuewen
@summary: Get Host's memory manufacturer name.
@param session: session of RPC.
@param host_ref: Host's uuid
@return: memory manufacturer name
@rtype: dict.
'''
xennode = XendNode.instance()
return xen_api_success(xennode.get_memory_manufacturer())
def host_tmem_thaw(self, _, host_ref, cli_id):
node = XendNode.instance()
try:
node.tmem_thaw(cli_id)
except Exception, e:
return xen_api_error(e)
return xen_api_success_void()
def host_tmem_freeze(self, _, host_ref, cli_id):
node = XendNode.instance()
try:
node.tmem_freeze(cli_id)
except Exception, e:
return xen_api_error(e)
return xen_api_success_void()
def host_tmem_flush(self, _, host_ref, cli_id, pages):
node = XendNode.instance()
try:
node.tmem_flush(cli_id, pages)
except Exception, e:
return xen_api_error(e)
return xen_api_success_void()
def host_tmem_destroy(self, _, host_ref, cli_id):
node = XendNode.instance()
try:
node.tmem_destroy(cli_id)
except Exception, e:
return xen_api_error(e)
return xen_api_success_void()
def host_tmem_list(self, _, host_ref, cli_id, use_long):
node = XendNode.instance()
try:
info = node.tmem_list(cli_id, use_long)
except Exception, e:
return xen_api_error(e)
return xen_api_success(info)
def host_tmem_set_weight(self, _, host_ref, cli_id, value):
node = XendNode.instance()
try:
node.tmem_set_weight(cli_id, value)
except Exception, e:
return xen_api_error(e)
return xen_api_success_void()
def host_tmem_set_cap(self, _, host_ref, cli_id, value):
node = XendNode.instance()
try:
node.tmem_set_cap(cli_id, value)
except Exception, e:
return xen_api_error(e)
return xen_api_success_void()
def host_tmem_set_compress(self, _, host_ref, cli_id, value):
node = XendNode.instance()
try:
node.tmem_set_compress(cli_id, value)
except Exception, e:
return xen_api_error(e)
return xen_api_success_void()
def host_tmem_query_freeable_mb(self, _, host_ref):
node = XendNode.instance()
try:
pages = node.tmem_query_freeable_mb()
except Exception, e:
return xen_api_error(e)
return xen_api_success(pages is None and -1 or pages)
def host_tmem_shared_auth(self, _, host_ref, cli_id, uuid_str, auth):
node = XendNode.instance()
try:
node.tmem_shared_auth(cli_id, uuid_str, auth)
except Exception, e:
return xen_api_error(e)
return xen_api_success_void()
# class methods
def host_get_all(self, session):
return xen_api_success(BNPoolAPI.get_hosts())
def host_get_by_name_label(self, session, name):
if BNPoolAPI._isMaster:
result = []
for k in BNPoolAPI.get_hosts():
if cmp(k, XendNode.instance().uuid) == 0:
continue
remote_ip = BNPoolAPI.get_host_ip(k)
res = xen_rpc_call(remote_ip, 'host_get_by_name_label', name)
result.extend(res['Value'])
res = self._host_get_by_name_label(session, name)['Value']
result.extend(res)
return xen_api_success(result)
else:
return self._host_get_by_name_label(session, name)
def _host_get_by_name_label(self, session, name):
result = []
if cmp(name, XendNode.instance().get_name()) == 0:
result.append(XendNode.instance().uuid)
return xen_api_success(result)
def host_list_methods(self, _):
def _funcs():
return [getattr(BNHostAPI, x) for x in BNHostAPI.__dict__]
return xen_api_success([x.api for x in _funcs()
if hasattr(x, 'api')])
# Xen API: Class host_CPU
# ----------------------------------------------------------------
host_cpu_attr_ro = ['host',
'number',
'vendor',
'speed',
'modelname',
'stepping',
'flags',
'utilisation',
'features',
'cpu_pool']
host_cpu_funcs = [('get_unassigned_cpus', 'Set(host_cpu)')]
# attributes
def _host_cpu_get(self, ref, field):
return xen_api_success(
XendNode.instance().get_host_cpu_field(ref, field))
def host_cpu_get_host(self, _, ref):
return xen_api_success(XendNode.instance().uuid)
def host_cpu_get_features(self, _, ref):
return self._host_cpu_get(ref, 'features')
def host_cpu_get_number(self, _, ref):
return self._host_cpu_get(ref, 'number')
def host_cpu_get_vendor(self, _, ref):
return self._host_cpu_get(ref, 'vendor')
def host_cpu_get_speed(self, _, ref):
return self._host_cpu_get(ref, 'speed')
def host_cpu_get_modelname(self, _, ref):
return self._host_cpu_get(ref, 'modelname')
def host_cpu_get_stepping(self, _, ref):
return self._host_cpu_get(ref, 'stepping')
def host_cpu_get_flags(self, _, ref):
return self._host_cpu_get(ref, 'flags')
def host_cpu_get_utilisation(self, _, ref):
return xen_api_success(XendNode.instance().get_host_cpu_load(ref))
def host_cpu_get_cpu_pool(self, _, ref):
return xen_api_success(XendCPUPool.get_cpu_pool_by_cpu_ref(ref))
# object methods
def host_cpu_get_record(self, _, ref):
node = XendNode.instance()
record = dict([(f, node.get_host_cpu_field(ref, f))
for f in self.host_cpu_attr_ro
if f not in ['uuid', 'host', 'utilisation', 'cpu_pool']])
record['uuid'] = ref
record['host'] = node.uuid
record['utilisation'] = node.get_host_cpu_load(ref)
record['cpu_pool'] = XendCPUPool.get_cpu_pool_by_cpu_ref(ref)
return xen_api_success(record)
# class methods
def host_cpu_get_all(self, session):
return xen_api_success(XendNode.instance().get_host_cpu_refs())
def host_cpu_get_unassigned_cpus(self, session):
return xen_api_success(
[ref for ref in XendNode.instance().get_host_cpu_refs()
if len(XendCPUPool.get_cpu_pool_by_cpu_ref(ref)) == 0])
# Xen API: Class host_metrics
# ----------------------------------------------------------------
host_metrics_attr_ro = ['memory_total',
'memory_free',
'last_updated']
host_metrics_attr_rw = []
host_metrics_methods = []
def host_metrics_get_all(self, _):
return xen_api_success([XendNode.instance().host_metrics_uuid])
def _host_metrics_get(self, ref, f):
node = XendNode.instance()
return xen_api_success(getattr(node, f)())
def host_metrics_get_record(self, _, ref):
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_metrics(ref)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return self._host_metrics_get_record(_, ref)
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
# log.debug(remote_ip)
return xen_rpc_call(remote_ip, 'host_metrics_get_record', ref)
else:
metrics = self._host_metrics_get_record(_, ref)
return metrics
def _host_metrics_get_record(self, _, ref):
metrics = {
'uuid' : ref,
'memory_total' : self._host_metrics_get_memory_total(),
'memory_free' : self._host_metrics_get_memory_free(),
'last_updated' : now(),
}
return xen_api_success(metrics)
def host_metrics_get_memory_total(self, _1, _2):
return xen_api_success(self._host_metrics_get_memory_total())
def host_metrics_get_memory_free(self, _1, _2):
if BNPoolAPI._isMaster:
host_ref = BNPoolAPI.get_host_by_metrics(_2)
if cmp(host_ref, XendNode.instance().uuid) == 0:
return xen_api_success(self._host_metrics_get_memory_free())
else:
remote_ip = BNPoolAPI.get_host_ip(host_ref)
log.debug(remote_ip)
return xen_rpc_call(remote_ip, 'host_metrics_get_memory_free', _2)
else:
return xen_api_success(self._host_metrics_get_memory_free())
def host_metrics_get_last_updated(self, _1, _2):
return xen_api_success(now())
def _host_metrics_get_memory_total(self):
node = XendNode.instance()
return node.xc.physinfo()['total_memory'] * 1024
def _host_metrics_get_memory_free(self):
node = XendNode.instance()
xendom = XendDomain.instance()
doms = xendom.list()
doms_mem_total = 0
for dom in doms:
if cmp(dom.get_uuid(), DOM0_UUID) == 0:
continue
dominfo = xendom.get_vm_by_uuid(dom.get_uuid())
doms_mem_total += dominfo.get_memory_dynamic_max()
# log.debug("doms memory total: " + str(doms_mem_total))
# log.debug("host memory total:" + str(node.xc.physinfo()['total_memory'] * 1024))
return node.xc.physinfo()['total_memory'] * 1024 - doms_mem_total
class BNHostAPIAsyncProxy:
""" A redirector for Async.Class.function calls to XendAPI
but wraps the call for use with the XendTaskManager.
@ivar xenapi: Xen API instance
@ivar method_map: Mapping from XMLRPC method name to callable objects.
"""
method_prefix = 'Async.'
def __init__(self, xenapi):
"""Initialises the Async Proxy by making a map of all
implemented Xen API methods for use with XendTaskManager.
@param xenapi: XendAPI instance
"""
self.xenapi = xenapi
self.method_map = {}
for method_name in dir(self.xenapi):
method = getattr(self.xenapi, method_name)
if method_name[0] != '_' and hasattr(method, 'async') \
and method.async == True:
self.method_map[method.api] = method
def _dispatch(self, method, args):
"""Overridden method so that SimpleXMLRPCServer will
resolve methods through this method rather than through
inspection.
@param method: marshalled method name from XMLRPC.
@param args: marshalled arguments from XMLRPC.
"""
# Only deal with method names that start with "Async."
if not method.startswith(self.method_prefix):
return xen_api_error(['MESSAGE_METHOD_UNKNOWN', method])
# Lookup synchronous version of the method
synchronous_method_name = method[len(self.method_prefix):]
if synchronous_method_name not in self.method_map:
return xen_api_error(['MESSAGE_METHOD_UNKNOWN', method])
method = self.method_map[synchronous_method_name]
# Check that we've got enough arguments before issuing a task ID.
needed = argcounts[method.api]
if len(args) != needed:
return xen_api_error(['MESSAGE_PARAMETER_COUNT_MISMATCH',
self.method_prefix + method.api, needed,
len(args)])
# Validate the session before proceeding
session = args[0]
if not auth_manager().is_session_valid(session):
return xen_api_error(['SESSION_INVALID', session])
# create and execute the task, and return task_uuid
return_type = getattr(method, 'return_type', '<none/>')
task_uuid = XendTaskManager.create_task(method, args,
synchronous_method_name,
return_type,
synchronous_method_name,
session)
return xen_api_success(task_uuid)
def instance():
"""Singleton constructror. Use this method instead of the class constructor.
"""
global inst
try:
inst
except:
inst = BNHostAPI(None)
return inst
| mit | -5,844,853,923,022,985,000 | 36.333909 | 127 | 0.507841 | false |
Syntox32/LittleMan | asm.py | 1 | 1539 | #!/usr/bin/env python3
def runLittleMan(mem):
# Initialiser
ac = 0
pc = 0
running = True
# Kjør instruksjonssyklus
while running:
# Fetch
instr = mem[pc]
pc += 1
# Execute
if instr // 100 == 1: # ADD
ac += mem[instr % 100]
elif instr // 100 == 2: # SUB
ac -= mem[instr % 100]
elif instr // 100 == 3: # STA
mem[instr % 100] = ac
elif instr // 100 == 5: # LDA
ac = mem[instr % 100]
elif instr // 100 == 6: # BRA
pc = instr % 100
elif instr // 100 == 7: # BRZ
if ac == 0:
pc = instr % 100
elif instr // 100 == 8: # BRP
if ac > 0:
pc = instr % 100
elif instr == 901: # INP
ac = int(input('Input: '))
elif instr == 902: # OUT
print(ac)
elif instr == 000: # HLT
running = False
else: # ERROR
print('Error')
running = False
# Les inn et tall og skriv det ut igjen
demo1 = [ 901, 902, 0 ]
# Les inn to tall og skriv ut summen
demo2 = [ 901, 306, 901, 106, 902, 0, 0 ]
# Demo ... hva er dette?
demo3 = [ 505, 902, 105, 305, 601, 1 ]
# Les inn maxverdi og skriv ut Fibonaccitallene mindre eller lik maxverdi
demo4 = [ 901, 321, 518, 902, 519,
902, 118, 320, 221, 817,
520, 902, 519, 318, 520,
319, 606, 0, 1, 1,
0, 0]
runLittleMan(demo1) | mit | -3,397,094,038,716,222,000 | 24.65 | 73 | 0.453186 | false |
cctaylor/googleads-python-lib | googleads/dfa.py | 4 | 7469 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client library for the DoubleClick for Advertisers API."""
__author__ = 'Joseph DiLallo'
import os
import suds.client
import suds.sax.element
import suds.transport
import suds.wsse
import googleads.common
import googleads.errors
class DfaClient(object):
"""A central location to set headers and create web service clients.
Attributes:
username: A string representation of your DFA username.
oauth2_client: A googleads.oauth2.GoogleOAuth2Client used to authorize your
requests.
application_name: An arbitrary string which will be used to identify your
application
https_proxy: A string identifying the URL of a proxy that all HTTPS requests
should be routed through. Modifying this value will not affect any SOAP
service clients you've already created.
"""
# The key in the storage yaml which contains DFA data.
_YAML_KEY = 'dfa'
# A list of values which must be provided to use DFA.
_REQUIRED_INIT_VALUES = ('application_name', 'username')
# A list of values which may optionally be provided when using DFA.
_OPTIONAL_INIT_VALUES = ()
# The format of SOAP service WSDLs. A server, version, and service name need
# to be formatted in.
_SOAP_SERVICE_FORMAT = '%s/%s/api/dfa-api/%s?wsdl'
# A giant dictionary of DFA versions and the services they support.
_SERVICE_MAP = {
'v1.19': ('ad', 'advertiser', 'advertisergroup', 'campaign', 'changelog',
'contentcategory', 'creative', 'creativefield', 'creativegroup',
'login', 'network', 'placement', 'site', 'size', 'spotlight',
'strategy', 'subnetwork', 'user', 'userrole', 'report'),
'v1.20': ('ad', 'advertiser', 'advertisergroup', 'campaign', 'changelog',
'contentcategory', 'creative', 'creativefield', 'creativegroup',
'login', 'network', 'placement', 'site', 'size', 'spotlight',
'strategy', 'subnetwork', 'user', 'userrole', 'report'),
}
@classmethod
def LoadFromStorage(cls, path=None):
"""Creates an DfaClient with information stored in a yaml file.
Args:
[optional]
path: The path string to the file containing cached DFA data.
Returns:
A DfaClient initialized with the values cached in the file.
Raises:
A GoogleAdsValueError if the given yaml file does not contain the
information necessary to instantiate a client object - either a
required key was missing or an OAuth 2.0 key was missing.
"""
if path is None:
path = os.path.join(os.path.expanduser('~'), 'googleads.yaml')
return cls(**googleads.common.LoadFromStorage(
path, cls._YAML_KEY, cls._REQUIRED_INIT_VALUES,
cls._OPTIONAL_INIT_VALUES))
def __init__(self, username, oauth2_client, application_name,
https_proxy=None, cache=None):
"""Initializes a DfaClient.
For more information on these arguments, see our SOAP headers guide:
https://developers.google.com/doubleclick-advertisers/docs/SOAP_headers
Args:
username: A string representation of your DFA username. This is likely not
the same as your Google Account name.
oauth2_client: A googleads.oauth2.GoogleOAuth2Client used to authorize
your requests.
application_name: An arbitrary string which will be used to identify your
application
[optional]
https_proxy: A string identifying the proxy that all HTTPS requests
should be routed through.
cache: A subclass of suds.cache.Cache; defaults to None.
"""
self.username = username
self.oauth2_client = oauth2_client
self.application_name = application_name
self.https_proxy = https_proxy
self.cache = cache
self._header_handler = _DfaHeaderHandler(self)
def GetService(self, service_name, version=sorted(_SERVICE_MAP.keys())[-1],
server='https://advertisersapi.doubleclick.com'):
"""Creates a service client for the given service.
Args:
service_name: A string identifying which DFA service to create a service
client for.
[optional]
version: A string identifying the DFA version to connect to. This defaults
to what is currently the latest version. This will be updated in
future releases to point to what is then the latest version.
server: A string identifying the webserver hosting the DFA API.
Returns:
A suds.client.ServiceSelector which has the headers and proxy configured
for use.
Raises:
A GoogleAdsValueError if the service or version provided do not exist.
"""
server = server[:-1] if server[-1] == '/' else server
try:
proxy_option = None
if self.https_proxy:
proxy_option = {
'https': self.https_proxy
}
client = suds.client.Client(
self._SOAP_SERVICE_FORMAT % (server, version, service_name),
proxy=proxy_option, cache=self.cache, timeout=3600)
except suds.transport.TransportError:
if version in self._SERVICE_MAP:
if service_name in self._SERVICE_MAP[version]:
raise
else:
raise googleads.errors.GoogleAdsValueError(
'Unrecognized service for the DFA API. Service given: %s '
'Supported services: %s'
% (service_name, self._SERVICE_MAP[version]))
else:
raise googleads.errors.GoogleAdsValueError(
'Unrecognized version of the DFA API. Version given: %s Supported '
'versions: %s' % (version, self._SERVICE_MAP.keys()))
return googleads.common.SudsServiceProxy(client, self._header_handler)
class _DfaHeaderHandler(googleads.common.HeaderHandler):
"""Handler which sets the headers for a DFA SOAP call."""
# The library signature for DFA, to be appended to all application_names.
_LIB_SIG = googleads.common.GenerateLibSig('DfaApi-Python')
def __init__(self, dfa_client):
"""Initializes a DfaHeaderHandler.
Args:
dfa_client: The DfaClient whose data will be used to fill in the headers.
We retain a reference to this object so that the header handler picks
up changes to the client.
"""
self._dfa_client = dfa_client
def SetHeaders(self, suds_client):
"""Sets the SOAP and HTTP headers on the given suds client."""
wsse_header = suds.wsse.Security()
wsse_header.tokens.append(
suds.wsse.UsernameToken(self._dfa_client.username))
request_header = suds.sax.element.Element('RequestHeader')
request_header.append(
suds.sax.element.Element('applicationName').setText(
''.join([self._dfa_client.application_name, self._LIB_SIG])))
suds_client.set_options(
wsse=wsse_header, soapheaders=request_header,
headers=self._dfa_client.oauth2_client.CreateHttpHeader())
| apache-2.0 | -4,060,715,951,778,119,000 | 38.310526 | 80 | 0.676931 | false |
batermj/algorithm-challenger | code-analysis/programming_anguage/python/source_codes/Python3.5.9/Python-3.5.9/Lib/test/test_pickletools.py | 20 | 2428 | import struct
import pickle
import pickletools
from test import support
from test.pickletester import AbstractPickleTests
from test.pickletester import AbstractPickleModuleTests
class OptimizedPickleTests(AbstractPickleTests, AbstractPickleModuleTests):
def dumps(self, arg, proto=None):
return pickletools.optimize(pickle.dumps(arg, proto))
def loads(self, buf, **kwds):
return pickle.loads(buf, **kwds)
# Test relies on precise output of dumps()
test_pickle_to_2x = None
def test_optimize_long_binget(self):
data = [str(i) for i in range(257)]
data.append(data[-1])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
pickled = pickle.dumps(data, proto)
unpickled = pickle.loads(pickled)
self.assertEqual(unpickled, data)
self.assertIs(unpickled[-1], unpickled[-2])
pickled2 = pickletools.optimize(pickled)
unpickled2 = pickle.loads(pickled2)
self.assertEqual(unpickled2, data)
self.assertIs(unpickled2[-1], unpickled2[-2])
self.assertNotIn(pickle.LONG_BINGET, pickled2)
self.assertNotIn(pickle.LONG_BINPUT, pickled2)
def test_optimize_binput_and_memoize(self):
pickled = (b'\x80\x04\x95\x15\x00\x00\x00\x00\x00\x00\x00'
b']\x94(\x8c\x04spamq\x01\x8c\x03ham\x94h\x02e.')
# 0: \x80 PROTO 4
# 2: \x95 FRAME 21
# 11: ] EMPTY_LIST
# 12: \x94 MEMOIZE
# 13: ( MARK
# 14: \x8c SHORT_BINUNICODE 'spam'
# 20: q BINPUT 1
# 22: \x8c SHORT_BINUNICODE 'ham'
# 27: \x94 MEMOIZE
# 28: h BINGET 2
# 30: e APPENDS (MARK at 13)
# 31: . STOP
self.assertIn(pickle.BINPUT, pickled)
unpickled = pickle.loads(pickled)
self.assertEqual(unpickled, ['spam', 'ham', 'ham'])
self.assertIs(unpickled[1], unpickled[2])
pickled2 = pickletools.optimize(pickled)
unpickled2 = pickle.loads(pickled2)
self.assertEqual(unpickled2, ['spam', 'ham', 'ham'])
self.assertIs(unpickled2[1], unpickled2[2])
self.assertNotIn(pickle.BINPUT, pickled2)
def test_main():
support.run_unittest(OptimizedPickleTests)
support.run_doctest(pickletools)
if __name__ == "__main__":
test_main()
| apache-2.0 | -8,702,992,211,242,311,000 | 34.705882 | 75 | 0.600906 | false |
ifduyue/sentry | src/sentry/api/bases/user.py | 3 | 1222 | from __future__ import absolute_import
from sentry.api.base import Endpoint
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.api.permissions import ScopedPermission
from sentry.models import User
from sentry.auth.superuser import is_active_superuser
class UserPermission(ScopedPermission):
def has_object_permission(self, request, view, user=None):
if user is None:
user = request.user
if request.user == user:
return True
if request.auth:
return False
if is_active_superuser(request):
return True
return False
class UserEndpoint(Endpoint):
permission_classes = (UserPermission, )
def convert_args(self, request, user_id, *args, **kwargs):
try:
if user_id == 'me':
if not request.user.is_authenticated():
raise ResourceDoesNotExist
user_id = request.user.id
user = User.objects.get(
id=user_id,
)
except User.DoesNotExist:
raise ResourceDoesNotExist
self.check_object_permissions(request, user)
kwargs['user'] = user
return (args, kwargs)
| bsd-3-clause | -9,057,488,364,407,186,000 | 28.095238 | 62 | 0.620295 | false |
sih4sing5hong5/hue7jip8 | 匯入/management/commands/台語文語料庫蒐集及語料庫為本台語書面語音節詞頻統計.py | 1 | 4491 | from csv import DictReader
import io
from os import walk
from os.path import join, basename, dirname
from tempfile import TemporaryDirectory
from urllib.request import urlopen
from zipfile import ZipFile
from 臺灣言語工具.解析整理.拆文分析器 import 拆文分析器
from 臺灣言語服務.models import 訓練過渡格式
from 匯入.指令 import 匯入枋模
from 臺灣言語工具.解析整理.解析錯誤 import 解析錯誤
class Command(匯入枋模):
help = 'http://ip194097.ntcu.edu.tw/giankiu/keoe/KKH/guliau-supin/guliau-supin.asp'
zip網址 = 'https://github.com/Taiwanese-Corpus/Ungian_2005_guliau-supin/archive/master.zip'
來源 = '台語文語料庫蒐集及語料庫為本台語書面語音節詞頻統計'
公家內容 = {
'年代': '2005',
'種類': '語句',
}
欄位表 = {
'luipiat': '類別',
'chokchia': '作者',
'piautoe': '標題',
'tongmia': '檔名',
'nitai': '年代',
'lamlu': '男女',
}
性別表 = {'b': '查某', 'p': '查甫', 'm': '毋知'}
def add_arguments(self, parser):
parser.add_argument(
'--錯誤印部份就好',
action='store_true',
help='因為CI有限制輸出4M',
)
def 全部資料(self, *args, **參數):
self.錯誤全印 = not 參數['錯誤印部份就好']
匯入數量 = 0
for 資料 in self._全部資料():
yield 訓練過渡格式(
**資料,
**self.公家內容
)
匯入數量 += 1
if 匯入數量 % 1000 == 0:
self.stdout.write('匯入 {} 筆'.format(匯入數量))
def _全部資料(self):
with TemporaryDirectory() as 資料夾:
with urlopen(self.zip網址) as 網路檔:
with io.BytesIO(網路檔.read()) as 檔:
with ZipFile(檔) as 資料:
資料.extractall(資料夾)
實際資料夾 = join(資料夾, 'Ungian_2005_guliau-supin-master/')
yield from self.轉規類(join(實際資料夾, '轉換後資料'), 'HL')
yield from self.轉規類(join(實際資料夾, '轉換後資料'), 'POJ')
def 轉規類(self, 語料資料夾, 類):
目錄 = {}
with open(join(語料資料夾, '{}.csv'.format(類))) as csvfile:
reader = DictReader(csvfile)
for row in reader:
資料 = {}
for 欄位, 內容 in row.items():
try:
資料[self.欄位表[欄位]] = 內容
except KeyError:
'Ki-thann na-ui ing be tioh.'
資料['男女'] = self.性別表[資料['男女']]
檔名 = 資料.pop('檔名')
目錄[檔名] = 資料
for 所在, _資料夾, 檔名陣列 in walk(join(語料資料夾, 類)):
for 檔名 in 檔名陣列:
with open(join(所在, 檔名)) as 檔:
try:
來源內容 = 目錄.pop(檔名[:-4])
作者 = 來源內容['作者']
類別 = 來源內容['類別']
except KeyError:
作者 = basename(所在)
類別 = basename(dirname(所在))
for 一逝 in 檔.readlines():
文本資料 = 一逝.strip()
if len(文本資料) > 0:
try:
台文 = 拆文分析器.建立句物件(文本資料).看分詞()
except 解析錯誤 as 錯誤:
if self.錯誤全印:
self.stderr.write(錯誤)
else:
self.stderr.write(str(錯誤)[:40])
else:
yield {
'來源': '{}-{}'.format(
self.來源, 類, 作者, 類別
),
'文本': 台文,
}
if len(目錄) > 0:
self.stderr.write(
'表有物件無對著!!目錄賰:'.format(目錄.keys()),
)
| mit | 2,468,378,922,569,310,000 | 31.495495 | 93 | 0.411422 | false |
pspacek/freeipa | ipatests/test_webui/test_automember.py | 5 | 8981 | # Authors:
# Petr Vobornik <[email protected]>
#
# Copyright (C) 2013 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Automember tests
"""
from ipatests.test_webui.ui_driver import UI_driver
from ipatests.test_webui.ui_driver import screenshot
import ipatests.test_webui.data_hostgroup as hostgroup
from ipatests.test_webui.test_host import host_tasks
ENTITY = 'automember'
USER_GROUP_PKEY = 'admins'
USER_GROUP_DATA = {
'pkey': USER_GROUP_PKEY,
'add': [
('combobox', 'cn', USER_GROUP_PKEY),
],
'mod': [
('textarea', 'description', 'user group rule description'),
],
}
HOST_GROUP_DATA = {
'pkey': hostgroup.PKEY,
'add': [
('combobox', 'cn', hostgroup.PKEY),
],
'mod': [
('textarea', 'description', 'host group rule description'),
],
}
class test_automember(UI_driver):
@screenshot
def test_crud(self):
"""
Basic CRUD: automember
"""
self.init_app()
# user group rule
self.basic_crud(ENTITY, USER_GROUP_DATA,
search_facet='searchgroup',
default_facet='usergrouprule',
details_facet='usergrouprule',
)
# prepare host group
self.basic_crud(hostgroup.ENTITY, hostgroup.DATA,
default_facet=hostgroup.DEFAULT_FACET,
delete=False)
# host group rule
self.navigate_by_menu('identity/automember/amhostgroup')
self.basic_crud(ENTITY, HOST_GROUP_DATA,
search_facet='searchhostgroup',
default_facet='hostgrouprule',
details_facet='hostgrouprule',
navigate=False,
breadcrumb='Host group rules',
)
# cleanup
self.delete(hostgroup.ENTITY, [hostgroup.DATA])
@screenshot
def test_rebuild_membership_hosts(self):
"""
Test automember rebuild membership feature for hosts
"""
self.init_app()
host_util = host_tasks()
host_util.setup(self.driver, self.config)
domain = self.config.get('ipa_domain')
host1 = 'web1.%s' % domain
host2 = 'web2.%s' % domain
# Add a hostgroup
self.add_record('hostgroup', {
'pkey': 'webservers',
'add': [
('textbox', 'cn', 'webservers'),
('textarea', 'description', 'webservers'),
]
})
# Add hosts
self.add_record('host', host_util.get_data("web1", domain));
self.add_record('host', host_util.get_data("web2", domain));
# Add an automember rule
self.add_record(
'automember',
{'pkey': 'webservers', 'add': [('combobox', 'cn', 'webservers')]},
facet='searchhostgroup'
)
# Add a condition for automember rule
self.navigate_to_record('webservers')
self.add_table_record(
'automemberinclusiveregex',
{'fields': [
('selectbox', 'key', 'fqdn'),
('textbox', 'automemberinclusiveregex', '^web[1-9]+')
]}
)
# Assert that hosts are not members of hostgroup
self.navigate_to_record('webservers', entity='hostgroup')
self.facet_button_click('refresh')
self.wait_for_request()
self.assert_record(host1, negative=True)
self.assert_record(host2, negative=True)
# Rebuild membership for first host, using action on host details facet
self.navigate_to_record(host1, entity='host')
self.action_list_action('automember_rebuild')
# Assert that host is now a member of hostgroup
self.navigate_to_record('webservers', entity='hostgroup')
self.facet_button_click('refresh')
self.wait_for_request()
self.assert_record(host1)
self.assert_record(host2, negative=True)
# Remove host from hostgroup
self.delete_record(host1)
# Assert that host is not a member of hostgroup
self.facet_button_click('refresh')
self.wait_for_request()
self.assert_record(host1, negative=True)
self.assert_record(host2, negative=True)
# Rebuild membership for all hosts, using action on hosts search facet
self.navigate_by_menu('identity/host')
self.navigate_by_breadcrumb('Hosts')
self.action_list_action('automember_rebuild')
# Assert that hosts are now members of hostgroup
self.navigate_to_record('webservers', entity='hostgroup')
self.facet_button_click('refresh')
self.wait_for_request()
self.assert_record(host1)
self.assert_record(host2)
# Delete hostgroup, hosts and automember rule
self.delete('hostgroup', [{'pkey': 'webservers'}])
self.delete('host', [{'pkey': host1}, {'pkey': host2}])
self.delete('automember', [{'pkey': 'webservers'}],
facet='searchhostgroup')
@screenshot
def test_rebuild_membership_users(self):
"""
Test automember rebuild membership feature for users
"""
self.init_app()
# Add a group
self.add_record('group', {
'pkey': 'devel',
'add': [
('textbox', 'cn', 'devel'),
('textarea', 'description', 'devel'),
]
})
# Add a user
self.add_record('user', {
'pkey': 'dev1',
'add': [
('textbox', 'uid', 'dev1'),
('textbox', 'givenname', 'Dev'),
('textbox', 'sn', 'One'),
]
})
# Add another user
self.add_record('user', {
'pkey': 'dev2',
'add': [
('textbox', 'uid', 'dev2'),
('textbox', 'givenname', 'Dev'),
('textbox', 'sn', 'Two'),
]
})
# Add an automember rule
self.add_record(
'automember',
{'pkey': 'devel', 'add': [('combobox', 'cn', 'devel')]},
facet='searchgroup'
)
# Add a condition for automember rule
self.navigate_to_record('devel')
self.add_table_record(
'automemberinclusiveregex',
{'fields': [
('selectbox', 'key', 'uid'),
('textbox', 'automemberinclusiveregex', '^dev[1-9]+')
]}
)
# Assert that users are not members of group
self.navigate_to_record('devel', entity='group')
self.facet_button_click('refresh')
self.wait_for_request()
self.assert_record('dev1', negative=True)
self.assert_record('dev2', negative=True)
# Rebuild membership for first user, using action on user details facet
self.navigate_to_record('dev1', entity='user')
self.action_list_action('automember_rebuild')
# Assert that user is now a member of group
self.navigate_to_record('devel', entity='group')
self.facet_button_click('refresh')
self.wait_for_request()
self.assert_record('dev1')
self.assert_record('dev2', negative=True)
# Remove user from group
self.delete_record('dev1')
# Assert that user is not a member of group
self.facet_button_click('refresh')
self.wait_for_request()
self.assert_record('dev1', negative=True)
self.assert_record('dev2', negative=True)
# Rebuild membership for all users, using action on users search facet
self.navigate_by_menu('identity/user')
self.navigate_by_breadcrumb('Users')
self.action_list_action('automember_rebuild')
# Assert that users are now members of group
self.navigate_to_record('devel', entity='group')
self.facet_button_click('refresh')
self.wait_for_request()
self.assert_record('dev1')
self.assert_record('dev2')
# Delete group, users and automember rule
self.delete('group', [{'pkey': 'devel'}])
self.delete('user', [{'pkey': 'dev1'}, {'pkey': 'dev2'}])
self.delete('automember', [{'pkey': 'devel'}], facet='searchgroup')
| gpl-3.0 | 2,198,048,054,454,316,300 | 32.140221 | 79 | 0.566418 | false |
WinterNis/sqlalchemy | doc/build/conf.py | 17 | 12251 | # -*- coding: utf-8 -*-
#
# SQLAlchemy documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 26 19:50:10 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import traceback
def force_install_reqs():
import logging
log = logging.getLogger("pip")
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(logging.Formatter("[pip] %(message)s"))
log.addHandler(handler)
log.setLevel(logging.INFO)
log.info("READTHEDOCS is set, force-installing requirements.txt")
from pip.commands import install
req = os.path.join(os.path.dirname(__file__), "requirements.txt")
cmd = install.InstallCommand()
options, args = cmd.parse_args(["-v", "-U", "-r", req])
cmd.run(options, args)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../lib'))
sys.path.insert(0, os.path.abspath('../..')) # examples
sys.path.insert(0, os.path.abspath('.'))
import sqlalchemy
# attempt to force pip to definitely get the latest
# versions of libraries, see
# https://github.com/rtfd/readthedocs.org/issues/1293
rtd = os.environ.get('READTHEDOCS', None) == 'True'
if rtd:
try:
force_install_reqs()
except:
traceback.print_exc()
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'zzzeeksphinx',
'changelog',
'sphinx_paramlinks',
#'corrections'
]
# Add any paths that contain templates here, relative to this directory.
# not sure why abspath() is needed here, some users
# have reported this.
templates_path = [os.path.abspath('templates')]
nitpicky = True
# The suffix of source filenames.
source_suffix = '.rst'
# section names used by the changelog extension.
changelog_sections = ["general", "orm", "orm declarative", "orm querying", \
"orm configuration", "engine", "sql", \
"schema", \
"postgresql", "mysql", "sqlite", "mssql", \
"oracle", "firebird"]
# tags to sort on inside of sections
changelog_inner_tag_sort = ["feature", "changed", "removed", "bug", "moved"]
# how to render changelog links
changelog_render_ticket = "http://www.sqlalchemy.org/trac/ticket/%s"
changelog_render_pullreq = {
"bitbucket": "https://bitbucket.org/zzzeek/sqlalchemy/pull-request/%s",
"default": "https://bitbucket.org/zzzeek/sqlalchemy/pull-request/%s",
"github": "https://github.com/zzzeek/sqlalchemy/pull/%s",
}
changelog_render_changeset = "http://www.sqlalchemy.org/trac/changeset/%s"
autodocmods_convert_modname = {
"sqlalchemy.sql.sqltypes": "sqlalchemy.types",
"sqlalchemy.sql.type_api": "sqlalchemy.types",
"sqlalchemy.sql.schema": "sqlalchemy.schema",
"sqlalchemy.sql.elements": "sqlalchemy.sql.expression",
"sqlalchemy.sql.selectable": "sqlalchemy.sql.expression",
"sqlalchemy.sql.dml": "sqlalchemy.sql.expression",
"sqlalchemy.sql.ddl": "sqlalchemy.schema",
"sqlalchemy.sql.base": "sqlalchemy.sql.expression",
"sqlalchemy.engine.base": "sqlalchemy.engine",
"sqlalchemy.engine.result": "sqlalchemy.engine",
}
autodocmods_convert_modname_w_class = {
("sqlalchemy.engine.interfaces", "Connectable"): "sqlalchemy.engine",
("sqlalchemy.sql.base", "DialectKWArgs"): "sqlalchemy.sql.base",
}
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'contents'
# General information about the project.
project = u'SQLAlchemy'
copyright = u'2007-2015, the SQLAlchemy authors and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.1"
# The full version, including alpha/beta/rc tags.
release = "1.1.0b1"
release_date = "not released"
site_base = os.environ.get("RTD_SITE_BASE", "http://www.sqlalchemy.org")
site_adapter_template = "docs_adapter.mako"
site_adapter_py = "docs_adapter.py"
# arbitrary number recognized by builders.py, incrementing this
# will force a rebuild
build_number = 3
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# have the "gettext" build generate .pot for each individual
# .rst
gettext_compact = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'zzzeeksphinx'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'default.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "%s %s Documentation" % (project, version)
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%m/%d/%Y %H:%M:%S'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
html_copy_source = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'SQLAlchemydoc'
#autoclass_content = 'both'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('contents', 'sqlalchemy_%s.tex' % release.replace('.', '_'), ur'SQLAlchemy Documentation',
ur'Mike Bayer', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
# sets TOC depth to 2.
latex_preamble = '\setcounter{tocdepth}{3}'
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
#latex_elements = {
# 'papersize': 'letterpaper',
# 'pointsize': '10pt',
#}
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sqlalchemy', u'SQLAlchemy Documentation',
[u'SQLAlchemy authors'], 1)
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'SQLAlchemy'
epub_author = u'SQLAlchemy authors'
epub_publisher = u'SQLAlchemy authors'
epub_copyright = u'2007-2015, SQLAlchemy authors'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
intersphinx_mapping = {
'alembic': ('http://alembic.readthedocs.org/en/latest/', None),
'psycopg2': ('http://pythonhosted.org/psycopg2', None),
}
| mit | -3,344,454,117,097,351,700 | 31.239474 | 93 | 0.69578 | false |
wolfgangmauerer/prosoda | prosoda/cluster/PersonInfo.py | 1 | 11196 | # This file is part of prosoda. prosoda is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Copyright 2010, 2011 by Wolfgang Mauerer <[email protected]>
# Copyright 2012, 2013, Siemens AG, Wolfgang Mauerer <[email protected]>
# All Rights Reserved.
from logging import getLogger; log = getLogger(__name__)
from prosoda.commit_analysis import tag_types, active_tag_types, proximity_relation \
, file_relation, committer2author_relation, all_link_types
class PersonInfo:
""" Information about a commiter, and his relation to other commiters"""
def __init__(self, subsys_names = [], ID=None, name="", email=""):
self.ID = ID
self.name = name
self.email = email
self.subsys_names = subsys_names
# Store from which developers the person received a tag
self.associations = {}
for link_type in all_link_types:
self.associations[link_type] = {}
# See addSendRelation on the meaning of the following
self.inv_associations = {}
self.tagged_commits = {}
for link_type in all_link_types + ["author"]:
self.inv_associations[link_type] = {}
# See computeTagStats()
for tag in tag_types + ["author"]:
self.tagged_commits[tag] = []
self.tag_fraction = {}
self.subsys_fraction = {}
# List of all commits authored by this person (commit instances)
# The commit_stats is a hash with summary statistics, see
# computeCommitStats() for details
self.commit_list = []
self.commit_stats = None
# Which subsystems were touched in which role. The entry
# for each tag role is a hash with all subsystems as key, and the
# count how often the subsystem was touched in that role as value
self.subsys_touched = {}
# "author" is a special-purpose role that is not included in
# the generic tag role list.
for link_type in all_link_types + ["author"]:
self.subsys_touched[link_type] = {}
# General is used if the commit does not touch any well-defined
# subsystem(s), for instance when a generic header is modified.
for subsys in subsys_names + ["general"]:
self.subsys_touched[link_type][subsys] = 0
self.subsys_touched[link_type]["general"] = 0
# Count how often the person has made a link to someone else (i.e., given a
# signed-off, made a commit in close proximity, committed someone code)
self.linksPerformed = 0
# Count how many tags (independent of tag category) have been
# received form a specific ID.
self.all_tags_received_by_id = {}
# Count how many active tags (without "passive" categories like CC)
# have beenreceived form a specific ID.
self.active_tags_received_by_id = {}
#count how many links based on the proximity metric were received by
#a given ID
self.proximity_links_recieved_by_id = {}
#count how many links based on committer -> author were received by
#a given ID
self.committer_links_recieved_by_id = {}
#count how many links based on commits in the same file were received by
#a given ID
self.file_links_recieved_by_id = {}
def setID(self, ID):
self.ID = ID
def getID(self):
return self.ID
def setName(self, name):
self.name = name
def getName(self):
if self.name == "":
return self.email
return self.name
def setEmail(self, email):
self.email = email
def getEmail(self):
return self.email
def getCommitList(self):
return self.commit_list
def addCommit(self, cmt):
self.commit_list.append(cmt)
def _getLinksReceivedByID(self, link_hash, ID):
if ID in link_hash.keys():
return link_hash[ID]
else:
return 0
def getActiveTagsReceivedByID(self, ID):
return self._getLinksReceivedByID(self.active_tags_received_by_id, ID)
def getLinksReceivedByID(self, ID, link_type):
if link_type == proximity_relation:
return self._getLinksReceivedByID(self.proximity_links_recieved_by_id, ID)
elif link_type == committer2author_relation:
return self._getLinksReceivedByID(self.committer_links_recieved_by_id, ID)
elif link_type == file_relation:
return self._getLinksReceivedByID(self.file_links_recieved_by_id, ID)
def getAllTagsReceivedByID(self, ID):
return self._getTagsReceivedByID(self.all_tags_received_by_id, ID)
def addRelation(self, relation_type, ID, assoc, weight=1):
"""State that the person has received or given a tag from/to ID.
The distinction between taking and giving is made in other
functions."""
if (ID in assoc[relation_type]):
assoc[relation_type][ID] += weight
else:
assoc[relation_type][ID] = weight
def addReceiveRelation(self, relation_type, ID, weight=1):
'''
add a one directional relation from the person identified by
ID and this person instance (ie. self)
eg. ID ----> self
the weight parameter specified the edge strength
'''
self.addRelation(relation_type, ID, self.associations, weight)
def addSendRelation(self, relation_type, ID, cmt, weight=1):
'''
add a one directional relation from the person instance
(ie. self) and the person identified by ID
eg. self ----> ID
the weight parameter specified the edge strength
'''
self.addRelation(relation_type, ID, self.inv_associations)
if relation_type in tag_types:
self.tagged_commits[relation_type].append(cmt.id)
self.linksPerformed +=1
self.addCmt2Subsys(cmt, relation_type)
def addCmt2Subsys(self, cmt, relation_type):
'''record which subsystem the commit was made to and what type of
link was performed (proximity, tag, committed)'''
cmt_subsys = cmt.getSubsystemsTouched()
for subsys in cmt_subsys:
self.subsys_touched[relation_type][subsys] += cmt_subsys[subsys]
def getPerformTagRelations(self, relation_type):
return self.inv_associations[relation_type]
def getSubsysFraction(self):
return self.subsys_fraction
# Helper for computeTagStats, see below
def _sum_relations(self, relation_type, rcv_by_id_hash):
for ID in self.associations[relation_type]:
if ID in rcv_by_id_hash:
rcv_by_id_hash[ID] += self.associations[relation_type][ID]
else:
rcv_by_id_hash[ID] = self.associations[relation_type][ID]
def computeStats(self, link_type):
#computer tag specific stats
if link_type == "Tag":
self.computeTagStats()
#determine fraction of relation types
#for each subsystem
self.computeSubsysFraction()
#sum over the relation types
self.computeRelationSums()
def computeTagStats(self):
"""Compute statistics inferred from the tag information.
While this can be called anytime, it makes sense to call the function
only once all link data have been collected."""
if self.linksPerformed == 0:
log.warning("{0} did not perform any links?!".
format(self.getName()))
return
# print("{0} performed {1} tags".format(self.getName(),
# self.tagsPerformed))
# Per-author distribution of tags (e..g, 30% Signed-Off, 60%
# CC, 10% Acked-By)
for tag in tag_types + ["author"]:
self.tag_fraction[tag] = \
len(self.tagged_commits[tag])/float(self.linksPerformed)
def computeSubsysFraction(self):
total_links = 0
# Summarise over all different link variants
for subsys in self.subsys_names + ["general"]:
self.subsys_fraction[subsys] = 0
for link_type in all_link_types + ["author"]:
self.subsys_fraction[subsys] += \
self.subsys_touched[link_type][subsys]
total_links += self.subsys_fraction[subsys]
# ... and normalise accordingly
if (total_links != 0):
for subsys in self.subsys_names + ["general"]:
self.subsys_fraction[subsys] /= float(total_links)
def computeRelationSums(self):
# Summarise the links given _to_ (i.e, received by) the developer
# from a specific ID
for tag in tag_types:
self._sum_relations(tag, self.all_tags_received_by_id)
# Active tags do not include things like CC, which can
# be issued without the second party's consent
for tag in active_tag_types:
self._sum_relations(tag, self.active_tags_received_by_id)
#sum other possible link types
self._sum_relations(proximity_relation, self.proximity_links_recieved_by_id)
self._sum_relations(committer2author_relation, self.committer_links_recieved_by_id)
self._sum_relations(file_relation, self.file_links_recieved_by_id)
def getTagStats(self):
return self.tag_fraction
def getSubsysStats(self):
return self.subsys_touched
def getSubsysDistribution(self):
return self.subsys_fraction
def computeCommitStats(self):
"""Infer statistical information from per-author commits.
While this function can be called at any time, it naturally
makes sense to only use it once all commits have been associated
with the author"""
self.commit_stats = {}
self.commit_stats["added"] = 0
self.commit_stats["deleted"] = 0
self.commit_stats["numcommits"] = len(self.commit_list)
# NOTE: We use only a single difftype although the information
# from multiple is available
for cmt in self.commit_list:
self.commit_stats["added"] += cmt.getAddedLines(0)
self.commit_stats["deleted"] += cmt.getDeletedLines(0)
# TODO: There are many other summary statistics that we could
# compute, but which ones make sense?
def getCommitStats(self):
return self.commit_stats
############################ Test cases #########################
if __name__ == "__main__":
personInfo = PersonInfo("sepp")
# TODO: Implement a couple of test cases
| gpl-2.0 | -2,712,697,897,476,194,300 | 36.196013 | 91 | 0.628707 | false |
SanketDG/networkx | networkx/algorithms/flow/tests/test_mincost.py | 48 | 18133 | # -*- coding: utf-8 -*-
import networkx as nx
from nose.tools import assert_equal, assert_raises
import os
class TestMinCostFlow:
def test_simple_digraph(self):
G = nx.DiGraph()
G.add_node('a', demand = -5)
G.add_node('d', demand = 5)
G.add_edge('a', 'b', weight = 3, capacity = 4)
G.add_edge('a', 'c', weight = 6, capacity = 10)
G.add_edge('b', 'd', weight = 1, capacity = 9)
G.add_edge('c', 'd', weight = 2, capacity = 5)
flowCost, H = nx.network_simplex(G)
soln = {'a': {'b': 4, 'c': 1},
'b': {'d': 4},
'c': {'d': 1},
'd': {}}
assert_equal(flowCost, 24)
assert_equal(nx.min_cost_flow_cost(G), 24)
assert_equal(H, soln)
assert_equal(nx.min_cost_flow(G), soln)
assert_equal(nx.cost_of_flow(G, H), 24)
flowCost, H = nx.capacity_scaling(G)
assert_equal(flowCost, 24)
assert_equal(nx.cost_of_flow(G, H), 24)
assert_equal(H, soln)
def test_negcycle_infcap(self):
G = nx.DiGraph()
G.add_node('s', demand = -5)
G.add_node('t', demand = 5)
G.add_edge('s', 'a', weight = 1, capacity = 3)
G.add_edge('a', 'b', weight = 3)
G.add_edge('c', 'a', weight = -6)
G.add_edge('b', 'd', weight = 1)
G.add_edge('d', 'c', weight = -2)
G.add_edge('d', 't', weight = 1, capacity = 3)
assert_raises(nx.NetworkXUnfeasible, nx.network_simplex, G)
assert_raises(nx.NetworkXUnbounded, nx.capacity_scaling, G)
def test_sum_demands_not_zero(self):
G = nx.DiGraph()
G.add_node('s', demand = -5)
G.add_node('t', demand = 4)
G.add_edge('s', 'a', weight = 1, capacity = 3)
G.add_edge('a', 'b', weight = 3)
G.add_edge('a', 'c', weight = -6)
G.add_edge('b', 'd', weight = 1)
G.add_edge('c', 'd', weight = -2)
G.add_edge('d', 't', weight = 1, capacity = 3)
assert_raises(nx.NetworkXUnfeasible, nx.network_simplex, G)
assert_raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G)
def test_no_flow_satisfying_demands(self):
G = nx.DiGraph()
G.add_node('s', demand = -5)
G.add_node('t', demand = 5)
G.add_edge('s', 'a', weight = 1, capacity = 3)
G.add_edge('a', 'b', weight = 3)
G.add_edge('a', 'c', weight = -6)
G.add_edge('b', 'd', weight = 1)
G.add_edge('c', 'd', weight = -2)
G.add_edge('d', 't', weight = 1, capacity = 3)
assert_raises(nx.NetworkXUnfeasible, nx.network_simplex, G)
assert_raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G)
def test_transshipment(self):
G = nx.DiGraph()
G.add_node('a', demand = 1)
G.add_node('b', demand = -2)
G.add_node('c', demand = -2)
G.add_node('d', demand = 3)
G.add_node('e', demand = -4)
G.add_node('f', demand = -4)
G.add_node('g', demand = 3)
G.add_node('h', demand = 2)
G.add_node('r', demand = 3)
G.add_edge('a', 'c', weight = 3)
G.add_edge('r', 'a', weight = 2)
G.add_edge('b', 'a', weight = 9)
G.add_edge('r', 'c', weight = 0)
G.add_edge('b', 'r', weight = -6)
G.add_edge('c', 'd', weight = 5)
G.add_edge('e', 'r', weight = 4)
G.add_edge('e', 'f', weight = 3)
G.add_edge('h', 'b', weight = 4)
G.add_edge('f', 'd', weight = 7)
G.add_edge('f', 'h', weight = 12)
G.add_edge('g', 'd', weight = 12)
G.add_edge('f', 'g', weight = -1)
G.add_edge('h', 'g', weight = -10)
flowCost, H = nx.network_simplex(G)
soln = {'a': {'c': 0},
'b': {'a': 0, 'r': 2},
'c': {'d': 3},
'd': {},
'e': {'r': 3, 'f': 1},
'f': {'d': 0, 'g': 3, 'h': 2},
'g': {'d': 0},
'h': {'b': 0, 'g': 0},
'r': {'a': 1, 'c': 1}}
assert_equal(flowCost, 41)
assert_equal(nx.min_cost_flow_cost(G), 41)
assert_equal(H, soln)
assert_equal(nx.min_cost_flow(G), soln)
assert_equal(nx.cost_of_flow(G, H), 41)
flowCost, H = nx.capacity_scaling(G)
assert_equal(flowCost, 41)
assert_equal(nx.cost_of_flow(G, H), 41)
assert_equal(H, soln)
def test_max_flow_min_cost(self):
G = nx.DiGraph()
G.add_edge('s', 'a', bandwidth = 6)
G.add_edge('s', 'c', bandwidth = 10, cost = 10)
G.add_edge('a', 'b', cost = 6)
G.add_edge('b', 'd', bandwidth = 8, cost = 7)
G.add_edge('c', 'd', cost = 10)
G.add_edge('d', 't', bandwidth = 5, cost = 5)
soln = {'s': {'a': 5, 'c': 0},
'a': {'b': 5},
'b': {'d': 5},
'c': {'d': 0},
'd': {'t': 5},
't': {}}
flow = nx.max_flow_min_cost(G, 's', 't', capacity = 'bandwidth',
weight = 'cost')
assert_equal(flow, soln)
assert_equal(nx.cost_of_flow(G, flow, weight = 'cost'), 90)
G.add_edge('t', 's', cost = -100)
flowCost, flow = nx.capacity_scaling(G, capacity = 'bandwidth',
weight = 'cost')
G.remove_edge('t', 's')
assert_equal(flowCost, -410)
assert_equal(flow['t']['s'], 5)
del flow['t']['s']
assert_equal(flow, soln)
assert_equal(nx.cost_of_flow(G, flow, weight = 'cost'), 90)
def test_digraph1(self):
# From Bradley, S. P., Hax, A. C. and Magnanti, T. L. Applied
# Mathematical Programming. Addison-Wesley, 1977.
G = nx.DiGraph()
G.add_node(1, demand = -20)
G.add_node(4, demand = 5)
G.add_node(5, demand = 15)
G.add_edges_from([(1, 2, {'capacity': 15, 'weight': 4}),
(1, 3, {'capacity': 8, 'weight': 4}),
(2, 3, {'weight': 2}),
(2, 4, {'capacity': 4, 'weight': 2}),
(2, 5, {'capacity': 10, 'weight': 6}),
(3, 4, {'capacity': 15, 'weight': 1}),
(3, 5, {'capacity': 5, 'weight': 3}),
(4, 5, {'weight': 2}),
(5, 3, {'capacity': 4, 'weight': 1})])
flowCost, H = nx.network_simplex(G)
soln = {1: {2: 12, 3: 8},
2: {3: 8, 4: 4, 5: 0},
3: {4: 11, 5: 5},
4: {5: 10},
5: {3: 0}}
assert_equal(flowCost, 150)
assert_equal(nx.min_cost_flow_cost(G), 150)
assert_equal(H, soln)
assert_equal(nx.min_cost_flow(G), soln)
assert_equal(nx.cost_of_flow(G, H), 150)
flowCost, H = nx.capacity_scaling(G)
assert_equal(flowCost, 150)
assert_equal(H, soln)
assert_equal(nx.cost_of_flow(G, H), 150)
def test_digraph2(self):
# Example from ticket #430 from mfrasca. Original source:
# http://www.cs.princeton.edu/courses/archive/spr03/cs226/lectures/mincost.4up.pdf, slide 11.
G = nx.DiGraph()
G.add_edge('s', 1, capacity=12)
G.add_edge('s', 2, capacity=6)
G.add_edge('s', 3, capacity=14)
G.add_edge(1, 2, capacity=11, weight=4)
G.add_edge(2, 3, capacity=9, weight=6)
G.add_edge(1, 4, capacity=5, weight=5)
G.add_edge(1, 5, capacity=2, weight=12)
G.add_edge(2, 5, capacity=4, weight=4)
G.add_edge(2, 6, capacity=2, weight=6)
G.add_edge(3, 6, capacity=31, weight=3)
G.add_edge(4, 5, capacity=18, weight=4)
G.add_edge(5, 6, capacity=9, weight=5)
G.add_edge(4, 't', capacity=3)
G.add_edge(5, 't', capacity=7)
G.add_edge(6, 't', capacity=22)
flow = nx.max_flow_min_cost(G, 's', 't')
soln = {1: {2: 6, 4: 5, 5: 1},
2: {3: 6, 5: 4, 6: 2},
3: {6: 20},
4: {5: 2, 't': 3},
5: {6: 0, 't': 7},
6: {'t': 22},
's': {1: 12, 2: 6, 3: 14},
't': {}}
assert_equal(flow, soln)
G.add_edge('t', 's', weight=-100)
flowCost, flow = nx.capacity_scaling(G)
G.remove_edge('t', 's')
assert_equal(flow['t']['s'], 32)
assert_equal(flowCost, -3007)
del flow['t']['s']
assert_equal(flow, soln)
assert_equal(nx.cost_of_flow(G, flow), 193)
def test_digraph3(self):
"""Combinatorial Optimization: Algorithms and Complexity,
Papadimitriou Steiglitz at page 140 has an example, 7.1, but that
admits multiple solutions, so I alter it a bit. From ticket #430
by mfrasca."""
G = nx.DiGraph()
G.add_edge('s', 'a', {0: 2, 1: 4})
G.add_edge('s', 'b', {0: 2, 1: 1})
G.add_edge('a', 'b', {0: 5, 1: 2})
G.add_edge('a', 't', {0: 1, 1: 5})
G.add_edge('b', 'a', {0: 1, 1: 3})
G.add_edge('b', 't', {0: 3, 1: 2})
"PS.ex.7.1: testing main function"
sol = nx.max_flow_min_cost(G, 's', 't', capacity=0, weight=1)
flow = sum(v for v in sol['s'].values())
assert_equal(4, flow)
assert_equal(23, nx.cost_of_flow(G, sol, weight=1))
assert_equal(sol['s'], {'a': 2, 'b': 2})
assert_equal(sol['a'], {'b': 1, 't': 1})
assert_equal(sol['b'], {'a': 0, 't': 3})
assert_equal(sol['t'], {})
G.add_edge('t', 's', {1: -100})
flowCost, sol = nx.capacity_scaling(G, capacity=0, weight=1)
G.remove_edge('t', 's')
flow = sum(v for v in sol['s'].values())
assert_equal(4, flow)
assert_equal(sol['t']['s'], 4)
assert_equal(flowCost, -377)
del sol['t']['s']
assert_equal(sol['s'], {'a': 2, 'b': 2})
assert_equal(sol['a'], {'b': 1, 't': 1})
assert_equal(sol['b'], {'a': 0, 't': 3})
assert_equal(sol['t'], {})
assert_equal(nx.cost_of_flow(G, sol, weight=1), 23)
def test_zero_capacity_edges(self):
"""Address issue raised in ticket #617 by arv."""
G = nx.DiGraph()
G.add_edges_from([(1, 2, {'capacity': 1, 'weight': 1}),
(1, 5, {'capacity': 1, 'weight': 1}),
(2, 3, {'capacity': 0, 'weight': 1}),
(2, 5, {'capacity': 1, 'weight': 1}),
(5, 3, {'capacity': 2, 'weight': 1}),
(5, 4, {'capacity': 0, 'weight': 1}),
(3, 4, {'capacity': 2, 'weight': 1})])
G.node[1]['demand'] = -1
G.node[2]['demand'] = -1
G.node[4]['demand'] = 2
flowCost, H = nx.network_simplex(G)
soln = {1: {2: 0, 5: 1},
2: {3: 0, 5: 1},
3: {4: 2},
4: {},
5: {3: 2, 4: 0}}
assert_equal(flowCost, 6)
assert_equal(nx.min_cost_flow_cost(G), 6)
assert_equal(H, soln)
assert_equal(nx.min_cost_flow(G), soln)
assert_equal(nx.cost_of_flow(G, H), 6)
flowCost, H = nx.capacity_scaling(G)
assert_equal(flowCost, 6)
assert_equal(H, soln)
assert_equal(nx.cost_of_flow(G, H), 6)
def test_digon(self):
"""Check if digons are handled properly. Taken from ticket
#618 by arv."""
nodes = [(1, {}),
(2, {'demand': -4}),
(3, {'demand': 4}),
]
edges = [(1, 2, {'capacity': 3, 'weight': 600000}),
(2, 1, {'capacity': 2, 'weight': 0}),
(2, 3, {'capacity': 5, 'weight': 714285}),
(3, 2, {'capacity': 2, 'weight': 0}),
]
G = nx.DiGraph(edges)
G.add_nodes_from(nodes)
flowCost, H = nx.network_simplex(G)
soln = {1: {2: 0},
2: {1: 0, 3: 4},
3: {2: 0}}
assert_equal(flowCost, 2857140)
assert_equal(nx.min_cost_flow_cost(G), 2857140)
assert_equal(H, soln)
assert_equal(nx.min_cost_flow(G), soln)
assert_equal(nx.cost_of_flow(G, H), 2857140)
flowCost, H = nx.capacity_scaling(G)
assert_equal(flowCost, 2857140)
assert_equal(H, soln)
assert_equal(nx.cost_of_flow(G, H), 2857140)
def test_infinite_capacity_neg_digon(self):
"""An infinite capacity negative cost digon results in an unbounded
instance."""
nodes = [(1, {}),
(2, {'demand': -4}),
(3, {'demand': 4}),
]
edges = [(1, 2, {'weight': -600}),
(2, 1, {'weight': 0}),
(2, 3, {'capacity': 5, 'weight': 714285}),
(3, 2, {'capacity': 2, 'weight': 0}),
]
G = nx.DiGraph(edges)
G.add_nodes_from(nodes)
assert_raises(nx.NetworkXUnbounded, nx.network_simplex, G)
assert_raises(nx.NetworkXUnbounded, nx.capacity_scaling, G)
def test_finite_capacity_neg_digon(self):
"""The digon should receive the maximum amount of flow it can handle.
Taken from ticket #749 by @chuongdo."""
G = nx.DiGraph()
G.add_edge('a', 'b', capacity=1, weight=-1)
G.add_edge('b', 'a', capacity=1, weight=-1)
min_cost = -2
assert_equal(nx.min_cost_flow_cost(G), min_cost)
flowCost, H = nx.capacity_scaling(G)
assert_equal(flowCost, -2)
assert_equal(H, {'a': {'b': 1}, 'b': {'a': 1}})
assert_equal(nx.cost_of_flow(G, H), -2)
def test_multidigraph(self):
"""Multidigraphs are acceptable."""
G = nx.MultiDiGraph()
G.add_weighted_edges_from([(1, 2, 1), (2, 3, 2)], weight='capacity')
flowCost, H = nx.network_simplex(G)
assert_equal(flowCost, 0)
assert_equal(H, {1: {2: {0: 0}}, 2: {3: {0: 0}}, 3: {}})
flowCost, H = nx.capacity_scaling(G)
assert_equal(flowCost, 0)
assert_equal(H, {1: {2: {0: 0}}, 2: {3: {0: 0}}, 3: {}})
def test_negative_selfloops(self):
"""Negative selfloops should cause an exception if uncapacitated and
always be saturated otherwise.
"""
G = nx.DiGraph()
G.add_edge(1, 1, weight=-1)
assert_raises(nx.NetworkXUnbounded, nx.network_simplex, G)
assert_raises(nx.NetworkXUnbounded, nx.capacity_scaling, G)
G[1][1]['capacity'] = 2
flowCost, H = nx.network_simplex(G)
assert_equal(flowCost, -2)
assert_equal(H, {1: {1: 2}})
flowCost, H = nx.capacity_scaling(G)
assert_equal(flowCost, -2)
assert_equal(H, {1: {1: 2}})
G = nx.MultiDiGraph()
G.add_edge(1, 1, 'x', weight=-1)
G.add_edge(1, 1, 'y', weight=1)
assert_raises(nx.NetworkXUnbounded, nx.network_simplex, G)
assert_raises(nx.NetworkXUnbounded, nx.capacity_scaling, G)
G[1][1]['x']['capacity'] = 2
flowCost, H = nx.network_simplex(G)
assert_equal(flowCost, -2)
assert_equal(H, {1: {1: {'x': 2, 'y': 0}}})
flowCost, H = nx.capacity_scaling(G)
assert_equal(flowCost, -2)
assert_equal(H, {1: {1: {'x': 2, 'y': 0}}})
def test_bone_shaped(self):
# From #1283
G = nx.DiGraph()
G.add_node(0, demand=-4)
G.add_node(1, demand=2)
G.add_node(2, demand=2)
G.add_node(3, demand=4)
G.add_node(4, demand=-2)
G.add_node(5, demand=-2)
G.add_edge(0, 1, capacity=4)
G.add_edge(0, 2, capacity=4)
G.add_edge(4, 3, capacity=4)
G.add_edge(5, 3, capacity=4)
G.add_edge(0, 3, capacity=0)
flowCost, H = nx.network_simplex(G)
assert_equal(flowCost, 0)
assert_equal(
H, {0: {1: 2, 2: 2, 3: 0}, 1: {}, 2: {}, 3: {}, 4: {3: 2}, 5: {3: 2}})
flowCost, H = nx.capacity_scaling(G)
assert_equal(flowCost, 0)
assert_equal(
H, {0: {1: 2, 2: 2, 3: 0}, 1: {}, 2: {}, 3: {}, 4: {3: 2}, 5: {3: 2}})
def test_exceptions(self):
G = nx.Graph()
assert_raises(nx.NetworkXNotImplemented, nx.network_simplex, G)
assert_raises(nx.NetworkXNotImplemented, nx.capacity_scaling, G)
G = nx.MultiGraph()
assert_raises(nx.NetworkXNotImplemented, nx.network_simplex, G)
assert_raises(nx.NetworkXNotImplemented, nx.capacity_scaling, G)
G = nx.DiGraph()
assert_raises(nx.NetworkXError, nx.network_simplex, G)
assert_raises(nx.NetworkXError, nx.capacity_scaling, G)
G.add_node(0, demand=float('inf'))
assert_raises(nx.NetworkXError, nx.network_simplex, G)
assert_raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G)
G.node[0]['demand'] = 0
G.add_node(1, demand=0)
G.add_edge(0, 1, weight=-float('inf'))
assert_raises(nx.NetworkXError, nx.network_simplex, G)
assert_raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G)
G[0][1]['weight'] = 0
G.add_edge(0, 0, weight=float('inf'))
assert_raises(nx.NetworkXError, nx.network_simplex, G)
#assert_raises(nx.NetworkXError, nx.capacity_scaling, G)
G[0][0]['weight'] = 0
G[0][1]['capacity'] = -1
assert_raises(nx.NetworkXUnfeasible, nx.network_simplex, G)
#assert_raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G)
G[0][1]['capacity'] = 0
G[0][0]['capacity'] = -1
assert_raises(nx.NetworkXUnfeasible, nx.network_simplex, G)
#assert_raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G)
def test_large(self):
fname = os.path.join(os.path.dirname(__file__), 'netgen-2.gpickle.bz2')
G = nx.read_gpickle(fname)
flowCost, flowDict = nx.network_simplex(G)
assert_equal(6749969302, flowCost)
assert_equal(6749969302, nx.cost_of_flow(G, flowDict))
flowCost, flowDict = nx.capacity_scaling(G)
assert_equal(6749969302, flowCost)
assert_equal(6749969302, nx.cost_of_flow(G, flowDict))
| bsd-3-clause | 5,892,096,361,716,632,000 | 39.295556 | 101 | 0.489108 | false |
goodwinnk/intellij-community | python/helpers/pydev/_pydev_imps/_pydev_sys_patch.py | 28 | 2241 |
import sys
def patch_sys_module():
def patched_exc_info(fun):
def pydev_debugger_exc_info():
type, value, traceback = fun()
if type == ImportError:
#we should not show frame added by plugin_import call
if traceback and hasattr(traceback, "tb_next"):
return type, value, traceback.tb_next
return type, value, traceback
return pydev_debugger_exc_info
system_exc_info = sys.exc_info
sys.exc_info = patched_exc_info(system_exc_info)
if not hasattr(sys, "system_exc_info"):
sys.system_exc_info = system_exc_info
def patched_reload(orig_reload):
def pydev_debugger_reload(module):
orig_reload(module)
if module.__name__ == "sys":
# if sys module was reloaded we should patch it again
patch_sys_module()
return pydev_debugger_reload
def patch_reload():
if sys.version_info[0] >= 3:
import builtins # Py3
else:
import __builtin__ as builtins
if hasattr(builtins, "reload"):
sys.builtin_orig_reload = builtins.reload
builtins.reload = patched_reload(sys.builtin_orig_reload) # @UndefinedVariable
try:
import imp
sys.imp_orig_reload = imp.reload
imp.reload = patched_reload(sys.imp_orig_reload) # @UndefinedVariable
except:
pass
else:
try:
import importlib
sys.importlib_orig_reload = importlib.reload # @UndefinedVariable
importlib.reload = patched_reload(sys.importlib_orig_reload) # @UndefinedVariable
except:
pass
del builtins
def cancel_patches_in_sys_module():
sys.exc_info = sys.system_exc_info # @UndefinedVariable
if sys.version_info[0] >= 3:
import builtins # Py3
else:
import __builtin__ as builtins
if hasattr(sys, "builtin_orig_reload"):
builtins.reload = sys.builtin_orig_reload
if hasattr(sys, "imp_orig_reload"):
import imp
imp.reload = sys.imp_orig_reload
if hasattr(sys, "importlib_orig_reload"):
import importlib
importlib.reload = sys.importlib_orig_reload
del builtins
| apache-2.0 | 4,061,540,324,933,224,400 | 28.88 | 94 | 0.606872 | false |
googleads/googleads-python-lib | examples/ad_manager/v202105/live_stream_event_service/update_live_stream_events.py | 1 | 2600 | #!/usr/bin/env python
#
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example updates live stream events.
To determine which live stream events exist, run get_all_live_stream_events.py.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
# Set ID of the LiveStreamEvent to get live stream events from.
LIVE_STREAM_EVENT_ID = 'INSERT_LIVE_STREAM_EVENT_ID_HERE'
def main(client, live_stream_event_id):
# Initialize appropriate services.
live_stream_events_service = client.GetService(
'LiveStreamEventService', version='v202105')
# Create statement object to only select matching live stream event.
statement = (ad_manager.StatementBuilder(version='v202105')
.Where(('Id = :id'))
.WithBindVariable('id', int(live_stream_event_id))
.Limit(500))
# Get live stream events by statement.
response = live_stream_events_service.getLiveStreamEventsByStatement(
statement.ToStatement())
# Set adTags to be updated.
new_ad_tags = ['INSERT_NEW_AD_TAGS_HERE']
if 'results' in response and len(response['results']):
# Update each local live stream event by changing its attributes.
updated_live_stream_events = []
for live_stream_event in response['results']:
live_stream_event['startDateTimeType'] = 'IMMEDIATELY'
live_stream_event['adTags'] = new_ad_tags
updated_live_stream_events.append(live_stream_event)
# Update live stream events.
live_stream_events = live_stream_events_service.updateLiveStreamEvents(
updated_live_stream_events)
# Display results.
for live_stream_event in live_stream_events:
print('Live stream event with id "%s", named "%s" and status %s was '
'updated.' % (live_stream_event['id'], live_stream_event['name'],
live_stream_event['status']))
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, LIVE_STREAM_EVENT_ID)
| apache-2.0 | 1,451,714,767,980,867,300 | 36.681159 | 79 | 0.71 | false |
wolet/deepy | deepy/trainers/cores/rprop.py | 8 | 1040 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import theano.tensor as T
import theano
def rprop_core(params, gradients, rprop_increase=1.01, rprop_decrease=0.99, rprop_min_step=0, rprop_max_step=100,
learning_rate=0.01):
"""
Rprop optimizer.
See http://sci2s.ugr.es/keel/pdf/algorithm/articulo/2003-Neuro-Igel-IRprop+.pdf.
"""
for param, grad in zip(params, gradients):
grad_tm1 = theano.shared(np.zeros_like(param.get_value()), name=param.name + '_grad')
step_tm1 = theano.shared(np.zeros_like(param.get_value()) + learning_rate, name=param.name+ '_step')
test = grad * grad_tm1
same = T.gt(test, 0)
diff = T.lt(test, 0)
step = T.minimum(rprop_max_step, T.maximum(rprop_min_step, step_tm1 * (
T.eq(test, 0) +
same * rprop_increase +
diff * rprop_decrease)))
grad = grad - diff * grad
yield param, param - T.sgn(grad) * step
yield grad_tm1, grad
yield step_tm1, step
| mit | -5,815,403,678,163,274,000 | 36.142857 | 113 | 0.599038 | false |
chaoli314/openbn | classes/Variable.py | 1 | 1115 | # This Python file uses the following encoding: utf-8
""" Subject line.
Main text.
"""
from functools import total_ordering
__author__ = 'Chao Li'
@total_ordering
class Variable(object):
def __init__(self, variable_index: int, variable_name, values: list):
self._variable_index = variable_index
self._variable_name = variable_name
self._values = values
self._cardinality = len(values)
@property
def index(self):
return self._variable_index
@property
def name(self):
return self._variable_name
@property
def card(self):
return self._cardinality
@property
def values(self):
return self._values
def get_value_name(self, index):
return self.values[index]
def get_value_index(self, value):
return self.values.index(value)
def __hash__(self) -> int:
return hash(self.index)
def __lt__(self, other):
return self.index < other.index
def __eq__(self, other):
return self.index == other.index
def __repr__(self) -> str:
return self.name
| apache-2.0 | 4,882,317,098,621,766,000 | 20.037736 | 73 | 0.609865 | false |
SDX2000/scons | engine/SCons/Tool/tlib.py | 21 | 1890 | """SCons.Tool.tlib
XXX
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/tlib.py 5357 2011/09/09 21:31:03 bdeegan"
import SCons.Tool
import SCons.Tool.bcc32
import SCons.Util
def generate(env):
SCons.Tool.bcc32.findIt('tlib', env)
"""Add Builders and construction variables for ar to an Environment."""
SCons.Tool.createStaticLibBuilder(env)
env['AR'] = 'tlib'
env['ARFLAGS'] = SCons.Util.CLVar('')
env['ARCOM'] = '$AR $TARGET $ARFLAGS /a $SOURCES'
env['LIBPREFIX'] = ''
env['LIBSUFFIX'] = '.lib'
def exists(env):
return SCons.Tool.bcc32.findIt('tlib', env)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit | 5,499,538,396,082,308,000 | 34.660377 | 101 | 0.721693 | false |
meabsence/python-for-android | python-modules/twisted/twisted/web/_auth/wrapper.py | 49 | 8174 | # -*- test-case-name: twisted.web.test.test_httpauth -*-
# Copyright (c) 2008-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
A guard implementation which supports HTTP header-based authentication
schemes.
If no I{Authorization} header is supplied, an anonymous login will be
attempted by using a L{Anonymous} credentials object. If such a header is
supplied and does not contain allowed credentials, or if anonymous login is
denied, a 401 will be sent in the response along with I{WWW-Authenticate}
headers for each of the allowed authentication schemes.
"""
from zope.interface import implements
from twisted.python import log
from twisted.python.components import proxyForInterface
from twisted.web.resource import IResource, ErrorPage
from twisted.web import util
from twisted.cred import error
from twisted.cred.credentials import Anonymous
class UnauthorizedResource(object):
"""
Simple IResource to escape Resource dispatch
"""
implements(IResource)
isLeaf = True
def __init__(self, factories):
self._credentialFactories = factories
def render(self, request):
"""
Send www-authenticate headers to the client
"""
def generateWWWAuthenticate(scheme, challenge):
l = []
for k,v in challenge.iteritems():
l.append("%s=%s" % (k, quoteString(v)))
return "%s %s" % (scheme, ", ".join(l))
def quoteString(s):
return '"%s"' % (s.replace('\\', '\\\\').replace('"', '\\"'),)
request.setResponseCode(401)
for fact in self._credentialFactories:
challenge = fact.getChallenge(request)
request.responseHeaders.addRawHeader(
'www-authenticate',
generateWWWAuthenticate(fact.scheme, challenge))
return 'Unauthorized'
def getChildWithDefault(self, path, request):
"""
Disable resource dispatch
"""
return self
class HTTPAuthSessionWrapper(object):
"""
Wrap a portal, enforcing supported header-based authentication schemes.
@ivar _portal: The L{Portal} which will be used to retrieve L{IResource}
avatars.
@ivar _credentialFactories: A list of L{ICredentialFactory} providers which
will be used to decode I{Authorization} headers into L{ICredentials}
providers.
"""
implements(IResource)
isLeaf = False
def __init__(self, portal, credentialFactories):
"""
Initialize a session wrapper
@type portal: C{Portal}
@param portal: The portal that will authenticate the remote client
@type credentialFactories: C{Iterable}
@param credentialFactories: The portal that will authenticate the
remote client based on one submitted C{ICredentialFactory}
"""
self._portal = portal
self._credentialFactories = credentialFactories
def _authorizedResource(self, request):
"""
Get the L{IResource} which the given request is authorized to receive.
If the proper authorization headers are present, the resource will be
requested from the portal. If not, an anonymous login attempt will be
made.
"""
authheader = request.getHeader('authorization')
if not authheader:
return util.DeferredResource(self._login(Anonymous()))
factory, respString = self._selectParseHeader(authheader)
if factory is None:
return UnauthorizedResource(self._credentialFactories)
try:
credentials = factory.decode(respString, request)
except error.LoginFailed:
return UnauthorizedResource(self._credentialFactories)
except:
log.err(None, "Unexpected failure from credentials factory")
return ErrorPage(500, None, None)
else:
return util.DeferredResource(self._login(credentials))
def render(self, request):
"""
Find the L{IResource} avatar suitable for the given request, if
possible, and render it. Otherwise, perhaps render an error page
requiring authorization or describing an internal server failure.
"""
return self._authorizedResource(request).render(request)
def getChildWithDefault(self, path, request):
"""
Inspect the Authorization HTTP header, and return a deferred which,
when fired after successful authentication, will return an authorized
C{Avatar}. On authentication failure, an C{UnauthorizedResource} will
be returned, essentially halting further dispatch on the wrapped
resource and all children
"""
# Don't consume any segments of the request - this class should be
# transparent!
request.postpath.insert(0, request.prepath.pop())
return self._authorizedResource(request)
def _login(self, credentials):
"""
Get the L{IResource} avatar for the given credentials.
@return: A L{Deferred} which will be called back with an L{IResource}
avatar or which will errback if authentication fails.
"""
d = self._portal.login(credentials, None, IResource)
d.addCallbacks(self._loginSucceeded, self._loginFailed)
return d
def _loginSucceeded(self, (interface, avatar, logout)):
"""
Handle login success by wrapping the resulting L{IResource} avatar
so that the C{logout} callback will be invoked when rendering is
complete.
"""
class ResourceWrapper(proxyForInterface(IResource, 'resource')):
"""
Wrap an L{IResource} so that whenever it or a child of it
completes rendering, the cred logout hook will be invoked.
An assumption is made here that exactly one L{IResource} from
among C{avatar} and all of its children will be rendered. If
more than one is rendered, C{logout} will be invoked multiple
times and probably earlier than desired.
"""
def getChildWithDefault(self, name, request):
"""
Pass through the lookup to the wrapped resource, wrapping
the result in L{ResourceWrapper} to ensure C{logout} is
called when rendering of the child is complete.
"""
return ResourceWrapper(self.resource.getChildWithDefault(name, request))
def render(self, request):
"""
Hook into response generation so that when rendering has
finished completely (with or without error), C{logout} is
called.
"""
request.notifyFinish().addBoth(lambda ign: logout())
return super(ResourceWrapper, self).render(request)
return ResourceWrapper(avatar)
def _loginFailed(self, result):
"""
Handle login failure by presenting either another challenge (for
expected authentication/authorization-related failures) or a server
error page (for anything else).
"""
if result.check(error.Unauthorized, error.LoginFailed):
return UnauthorizedResource(self._credentialFactories)
else:
log.err(
result,
"HTTPAuthSessionWrapper.getChildWithDefault encountered "
"unexpected error")
return ErrorPage(500, None, None)
def _selectParseHeader(self, header):
"""
Choose an C{ICredentialFactory} from C{_credentialFactories}
suitable to use to decode the given I{Authenticate} header.
@return: A two-tuple of a factory and the remaining portion of the
header value to be decoded or a two-tuple of C{None} if no
factory can decode the header value.
"""
elements = header.split(' ')
scheme = elements[0].lower()
for fact in self._credentialFactories:
if fact.scheme == scheme:
return (fact, ' '.join(elements[1:]))
return (None, None)
| apache-2.0 | -5,794,425,844,413,194,000 | 35.654709 | 88 | 0.639467 | false |
konsulko/libsigrokdecode | decoders/ir_rc5/lists.py | 13 | 3251 | ##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2014 Uwe Hermann <[email protected]>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
##
# Systems/addresses (0..31). Items that are not listed are reserved/unknown.
system = {
0: ['TV receiver 1', 'TV1'],
1: ['TV receiver 2', 'TV2'],
2: ['Teletext', 'Txt'],
3: ['Extension to TV1 and TV2', 'Ext TV1/TV2'],
4: ['LaserVision player', 'LV'],
5: ['Video cassette recorder 1', 'VCR1'],
6: ['Video cassette recorder 2', 'VCR2'],
7: ['Experimental', 'Exp'],
8: ['Satellite TV receiver 1', 'Sat1'],
9: ['Extension to VCR1 and VCR2', 'Ext VCR1/VCR2'],
10: ['Satellite TV receiver 2', 'Sat2'],
12: ['Compact disc video player', 'CD-Video'],
13: ['Camcorder', 'Cam'],
14: ['Photo on compact disc player', 'CD-Photo'],
16: ['Audio preamplifier 1', 'Preamp1'],
17: ['Radio tuner', 'Tuner'],
18: ['Analog cassette recoder 1', 'Rec1'],
19: ['Audio preamplifier 2', 'Preamp2'],
20: ['Compact disc player', 'CD'],
21: ['Audio stack or record player', 'Combi'],
22: ['Audio satellite', 'Sat'],
23: ['Analog cassette recoder 2', 'Rec2'],
26: ['Compact disc recorder', 'CD-R'],
29: ['Lighting 1', 'Light1'],
30: ['Lighting 2', 'Light2'],
31: ['Telephone', 'Phone'],
}
digits = {
0: ['0', '0'],
1: ['1', '1'],
2: ['2', '2'],
3: ['3', '3'],
4: ['4', '4'],
5: ['5', '5'],
6: ['6', '6'],
7: ['7', '7'],
8: ['8', '8'],
9: ['9', '9'],
}
# Commands (0..63 for RC-5, and 0..127 for Extended RC-5).
# Items that are not listed are reserved/unknown.
command = {
'TV': dict(list(digits.items()) + list({
10: ['-/--', '-/--'],
11: ['Channel/program', 'Ch/P'],
12: ['Standby', 'StBy'],
13: ['Mute', 'M'],
14: ['Personal preferences', 'PP'],
14: ['Display', 'Disp'],
16: ['Volume up', 'Vol+'],
17: ['Volume down', 'Vol-'],
18: ['Brightness up', 'Br+'],
19: ['Brightness down', 'Br-'],
20: ['Saturation up', 'S+'],
21: ['Saturation down', 'S-'],
32: ['Program up', 'P+'],
33: ['Program down', 'P-'],
}.items())),
'VCR': dict(list(digits.items()) + list({
10: ['-/--', '-/--'],
12: ['Standby', 'StBy'],
32: ['Program up', 'P+'],
33: ['Program down', 'P-'],
50: ['Fast rewind', 'FRW'],
52: ['Fast forward', 'FFW'],
53: ['Play', 'Pl'],
54: ['Stop', 'St'],
55: ['Recording', 'Rec'],
}.items())),
}
| gpl-3.0 | -1,176,506,645,864,708,900 | 33.585106 | 76 | 0.541372 | false |
leviroth/praw | praw/models/subreddits.py | 1 | 4340 | """Provide the Subreddits class."""
from . import Subreddit
from .base import PRAWBase
from .listing.generator import ListingGenerator
from .util import stream_generator
from ..const import API_PATH
class Subreddits(PRAWBase):
"""Subreddits is a Listing class that provides various subreddit lists."""
@staticmethod
def _to_list(subreddit_list):
return ",".join([str(x) for x in subreddit_list])
def default(self, **generator_kwargs):
"""Return a :class:`.ListingGenerator` for default subreddits."""
return ListingGenerator(
self._reddit, API_PATH["subreddits_default"], **generator_kwargs
)
def gold(self, **generator_kwargs):
"""Return a :class:`.ListingGenerator` for gold subreddits."""
return ListingGenerator(
self._reddit, API_PATH["subreddits_gold"], **generator_kwargs
)
def new(self, **generator_kwargs):
"""Return a :class:`.ListingGenerator` for new subreddits."""
return ListingGenerator(
self._reddit, API_PATH["subreddits_new"], **generator_kwargs
)
def popular(self, **generator_kwargs):
"""Return a :class:`.ListingGenerator` for popular subreddits."""
return ListingGenerator(
self._reddit, API_PATH["subreddits_popular"], **generator_kwargs
)
def recommended(self, subreddits, omit_subreddits=None):
"""Return subreddits recommended for the given list of subreddits.
:param subreddits: A list of Subreddit instances and/or subreddit
names.
:param omit_subreddits: A list of Subreddit instances and/or subreddit
names to exclude from the results (Reddit's end may not work as
expected).
"""
if not isinstance(subreddits, list):
raise TypeError("subreddits must be a list")
if omit_subreddits is not None and not isinstance(
omit_subreddits, list
):
raise TypeError("omit_subreddits must be a list or None")
params = {"omit": self._to_list(omit_subreddits or [])}
url = API_PATH["sub_recommended"].format(
subreddits=self._to_list(subreddits)
)
return [
Subreddit(self._reddit, sub["sr_name"])
for sub in self._reddit.get(url, params=params)
]
def search(self, query, **generator_kwargs):
"""Return a :class:`.ListingGenerator` of subreddits matching ``query``.
Subreddits are searched by both their title and description. To search
names only see ``search_by_name``.
:param query: The query string to filter subreddits by.
"""
self._safely_add_arguments(generator_kwargs, "params", q=query)
return ListingGenerator(
self._reddit, API_PATH["subreddits_search"], **generator_kwargs
)
def search_by_name(self, query, include_nsfw=True, exact=False):
"""Return list of Subreddits whose names begin with ``query``.
:param query: Search for subreddits beginning with this string.
:param include_nsfw: Include subreddits labeled NSFW (default: True).
:param exact: Return only exact matches to ``query`` (default: False).
"""
result = self._reddit.post(
API_PATH["subreddits_name_search"],
data={
"include_over_18": include_nsfw,
"exact": exact,
"query": query,
},
)
return [self._reddit.subreddit(x) for x in result["names"]]
def search_by_topic(self, query):
"""Return list of Subreddits whose topics match ``query``.
:param query: Search for subreddits relevant to the search topic.
"""
result = self._reddit.get(
API_PATH["subreddits_by_topic"], params={"query": query}
)
return [
self._reddit.subreddit(x["name"]) for x in result if x.get("name")
]
def stream(self, **stream_options):
"""Yield new subreddits as they are created.
Subreddits are yielded oldest first. Up to 100 historical subreddits
will initially be returned.
Keyword arguments are passed to :func:`.stream_generator`.
"""
return stream_generator(self.new, **stream_options)
| bsd-2-clause | 7,079,015,187,029,998,000 | 35.166667 | 80 | 0.615207 | false |
natanielruiz/android-yolo | jni-build/jni/include/tensorflow/python/kernel_tests/padding_fifo_queue_test.py | 5 | 53221 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.PaddingFIFOQueue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
class PaddingFIFOQueueTest(tf.test.TestCase):
def testConstructor(self):
with tf.Graph().as_default():
q = tf.PaddingFIFOQueue(10, tf.float32, ((None,),), name="Q")
self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
self.assertEquals(tf.string_ref, q.queue_ref.dtype)
self.assertProtoEquals("""
name:'Q' op:'PaddingFIFOQueue'
attr { key: 'component_types' value { list { type: DT_FLOAT } } }
attr { key: 'shapes' value { list { shape { dim { size: -1 } } } } }
attr { key: 'capacity' value { i: 10 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testMultiQueueConstructor(self):
with tf.Graph().as_default():
q = tf.PaddingFIFOQueue(5, (tf.int32, tf.float32),
((), ()),
shared_name="foo", name="Q")
self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
self.assertEquals(tf.string_ref, q.queue_ref.dtype)
self.assertProtoEquals("""
name:'Q' op:'PaddingFIFOQueue'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list { shape { } shape { } } } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: 'foo' } }
""", q.queue_ref.op.node_def)
def testConstructorWithShapes(self):
with tf.Graph().as_default():
q = tf.PaddingFIFOQueue(5, (tf.int32, tf.float32),
shapes=(tf.TensorShape([1, 1, 2, 3]),
tf.TensorShape([5, 8])), name="Q")
self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
self.assertEquals(tf.string_ref, q.queue_ref.dtype)
self.assertProtoEquals("""
name:'Q' op:'PaddingFIFOQueue'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {
shape { dim { size: 1 }
dim { size: 1 }
dim { size: 2 }
dim { size: 3 } }
shape { dim { size: 5 }
dim { size: 8 } }
} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testEnqueue(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
enqueue_op = q.enqueue((10.0,))
enqueue_op.run()
def testEnqueueWithShape(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, shapes=((3, 2),))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
self.assertEqual(1, q.size().eval())
def testEnqueueManyWithShape(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, [tf.int32, tf.int32],
shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertEqual(4, q.size().eval())
def testParallelEnqueue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
sess.run(enqueue_op)
threads = [self.checkedThread(target=enqueue, args=(e,))
for e in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in xrange(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
vals = dequeued_t.eval()
self.assertEqual([elems[i]], vals)
def testEnqueueAndBlockingDequeue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(3, tf.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
sess.run(enqueue_op)
results = []
def dequeue():
for _ in xrange(len(elems)):
results.append(sess.run(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
for elem, result in zip(elems, results):
self.assertEqual([elem], result)
def testMultiEnqueueAndDequeue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, (tf.int32, tf.float32), ((), ()))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
x_val, y_val = sess.run(dequeued_t)
x, y = elems[i]
self.assertEqual([x], x_val)
self.assertEqual([y], y_val)
def testQueueSizeEmpty(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
self.assertEqual([0], q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual(1, size.eval())
dequeued_t.op.run()
self.assertEqual(0, size.eval())
def testEnqueueMany(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
vals = dequeued_t.eval()
self.assertEqual([elems[i % 4]], vals)
def testEmptyEnqueueMany(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((None, None),))
empty_t = tf.constant([], dtype=tf.float32,
shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
self.assertEqual([0], size_t.eval())
enqueue_op.run()
self.assertEqual([0], size_t.eval())
def testEmptyDequeueMany(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, shapes=((),))
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testEmptyDequeueManyWithDynamicShape(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, shapes=((None,),))
enqueue_op = q.enqueue(([10.0],))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testEmptyDequeueUpToWithDynamicShape(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, shapes=((None,),))
enqueue_op = q.enqueue(([10.0],))
dequeued_t = q.dequeue_up_to(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testConstructPaddingFIFOQueueWithNoShape(self):
with self.test_session():
with self.assertRaisesRegexp(
ValueError,
r"When providing partial shapes, a list of shapes must be provided."):
tf.PaddingFIFOQueue(10, tf.float32, None).queue_ref.eval()
def testMultiEnqueueMany(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, (tf.float32, tf.int32), ((), (2,)))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
float_val, int_val = sess.run(dequeued_t)
self.assertEqual(float_elems[i % 4], float_val)
self.assertAllEqual(int_elems[i % 4], int_val)
def testMultiEnqueueManyWithPartiallyKnownShapes(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(
10, (tf.float32, tf.int32), shapes=((), (None,)))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
float_val, int_val = sess.run(dequeued_t)
self.assertEqual(float_elems[i % 4], float_val)
self.assertAllEqual(int_elems[i % 4], int_val)
def testDequeueMany(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], dequeued_t.eval())
self.assertAllEqual(elems[4:8], dequeued_t.eval())
def testDequeueUpToNoBlocking(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], dequeued_t.eval())
self.assertAllEqual(elems[4:8], dequeued_t.eval())
def testMultiDequeueMany(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, (tf.float32, tf.int32),
shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],
[11, 12], [13, 14], [15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
float_val, int_val = sess.run(dequeued_single_t)
self.assertAllEqual(float_elems[8], float_val)
self.assertAllEqual(int_elems[8], int_val)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
def testMultiDequeueManyWithPartiallyKnownShapes(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, (tf.float32, tf.int32), shapes=((), (None,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],
[11, 12], [13, 14], [15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertTrue(
tf.TensorShape(float_val.shape).is_compatible_with(
dequeued_t[0].get_shape()))
self.assertTrue(
tf.TensorShape(int_val.shape).is_compatible_with(
dequeued_t[1].get_shape()))
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
float_val, int_val = sess.run(dequeued_single_t)
self.assertAllEqual(float_elems[8], float_val)
self.assertAllEqual(int_elems[8], int_val)
self.assertTrue(
tf.TensorShape(float_val.shape).is_compatible_with(
dequeued_single_t[0].get_shape()))
self.assertTrue(
tf.TensorShape(int_val.shape).is_compatible_with(
dequeued_single_t[1].get_shape()))
def testMultiDequeueManyWithPartiallyKnownShapesAndVariableSizeInput(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, (tf.string, tf.int32),
shapes=((None,), (1, None)))
str_elems = [
["a"],
["ab"],
["abc"],
["abc", "d"],
["abc", "d", "e"],
["abc", "d", "e", "f"]]
int_elems = [
[[1]],
[[2]],
[[3]],
[[1, 2]],
[[1, 2, 3]],
[[1, 2, 3, 4]]]
enqueue_ops = [q.enqueue((str_elems[i], int_elems[i])) for i in range(6)]
dequeued_t = q.dequeue_many(5)
dequeued_single_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
string_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(
[[b"a", b"", b""], [b"ab", b"", b""], [b"abc", b"", b""],
[b"abc", b"d", b""], [b"abc", b"d", b"e"]], string_val)
self.assertAllEqual(
[[[1, 0, 0]],
[[2, 0, 0]],
[[3, 0, 0]],
[[1, 2, 0]],
[[1, 2, 3]]],
int_val)
self.assertTrue(
tf.TensorShape(string_val.shape).is_compatible_with(
dequeued_t[0].get_shape()))
self.assertTrue(
tf.TensorShape(int_val.shape).is_compatible_with(
dequeued_t[1].get_shape()))
string_val, int_val = sess.run(dequeued_single_t)
self.assertAllEqual([b"abc", b"d", b"e", b"f"], string_val)
self.assertAllEqual([[1, 2, 3, 4]], int_val)
self.assertTrue(
tf.TensorShape(string_val.shape).is_compatible_with(
dequeued_single_t[0].get_shape()))
self.assertTrue(
tf.TensorShape(int_val.shape).is_compatible_with(
dequeued_single_t[1].get_shape()))
def testMultiDequeueUpToPartiallyKnownShapesAndVariableInputNoBlocking(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, (tf.string, tf.int32),
shapes=((None,), (1, None)))
str_elems = [
["a"],
["ab"],
["abc"],
["abc", "d"],
["abc", "d", "e"],
["abc", "d", "e", "f"]]
int_elems = [
[[1]],
[[2]],
[[3]],
[[1, 2]],
[[1, 2, 3]],
[[1, 2, 3, 4]]]
enqueue_ops = [q.enqueue((str_elems[i], int_elems[i])) for i in range(6)]
dequeued_t = q.dequeue_up_to(5)
dequeued_single_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
string_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(
[[b"a", b"", b""], [b"ab", b"", b""], [b"abc", b"", b""],
[b"abc", b"d", b""], [b"abc", b"d", b"e"]], string_val)
self.assertAllEqual(
[[[1, 0, 0]],
[[2, 0, 0]],
[[3, 0, 0]],
[[1, 2, 0]],
[[1, 2, 3]]],
int_val)
self.assertTrue(
tf.TensorShape(string_val.shape).is_compatible_with(
dequeued_t[0].get_shape()))
self.assertTrue(
tf.TensorShape(int_val.shape).is_compatible_with(
dequeued_t[1].get_shape()))
string_val, int_val = sess.run(dequeued_single_t)
self.assertAllEqual([b"abc", b"d", b"e", b"f"], string_val)
self.assertAllEqual([[1, 2, 3, 4]], int_val)
self.assertTrue(
tf.TensorShape(string_val.shape).is_compatible_with(
dequeued_single_t[0].get_shape()))
self.assertTrue(
tf.TensorShape(int_val.shape).is_compatible_with(
dequeued_single_t[1].get_shape()))
def testHighDimension(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.int32, ((4, 4, 4, 4),))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertAllEqual(dequeued_t.eval(), elems)
def testPartiallyKnownHighDimension(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.int32, ((4, None, 4, None),))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertAllEqual(dequeued_t.eval(), elems)
def testEnqueueWrongShape(self):
q = tf.PaddingFIFOQueue(10, (tf.int32, tf.int32), ((), (2,)))
with self.assertRaises(ValueError):
q.enqueue(([1, 2], [2, 2]))
with self.assertRaises(ValueError):
q.enqueue_many((7, [[1, 2], [3, 4], [5, 6]]))
def testBatchSizeMismatch(self):
q = tf.PaddingFIFOQueue(10, (tf.int32, tf.int32, tf.int32), ((), (), ()))
with self.assertRaises(ValueError):
q.enqueue_many(([1, 2, 3], [1, 2], [1, 2, 3]))
with self.assertRaises(ValueError):
q.enqueue_many(([1, 2, 3], [1, 2], tf.placeholder(tf.int32)))
with self.assertRaises(ValueError):
q.enqueue_many((tf.placeholder(tf.int32), [1, 2], [1, 2, 3]))
def testEnqueueManyEmptyTypeConversion(self):
q = tf.PaddingFIFOQueue(10, (tf.int32, tf.float32), ((), ()))
enq = q.enqueue_many(([], []))
self.assertEqual(tf.int32, enq.inputs[1].dtype)
self.assertEqual(tf.float32, enq.inputs[2].dtype)
def testEnqueueWrongType(self):
q = tf.PaddingFIFOQueue(10, (tf.int32, tf.float32), ((), ()))
with self.assertRaises(ValueError):
q.enqueue((tf.placeholder(tf.int32), tf.placeholder(tf.int32)))
with self.assertRaises(ValueError):
q.enqueue_many((tf.placeholder(tf.int32), tf.placeholder(tf.int32)))
def testEnqueueWrongPartiallyKnownShapeAtRuntime(self):
with self.test_session() as sess:
# First dimension of second component is unknown, second
# dimension must be 3.
q = tf.PaddingFIFOQueue(10, (tf.int32, tf.int32), ((2, 2), (None, 3)))
elems_ok = np.array([1] * 4).reshape((2, 2)).astype(np.int32)
elems_bad = tf.placeholder(tf.int32)
enqueue_op = q.enqueue((elems_ok, elems_bad))
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError, r"Expected \[\?,3\], got \[3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 12).reshape((3, 4))})
def testEnqueueDequeueManyWrongPartiallyKnownShape(self):
with self.test_session() as sess:
# First dimension of second component is unknown, second
# dimension must be 3.
q = tf.PaddingFIFOQueue(10, (tf.int32, tf.int32), ((2, 2), (None, 3)))
elems_ok = np.array([1] * 8).reshape((2, 2, 2)).astype(np.int32)
elems_bad = tf.placeholder(tf.int32)
enqueue_op = q.enqueue_many((elems_ok, elems_bad))
dequeued_t = q.dequeue_many(2)
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError,
"Shape mismatch in tuple component 1. "
r"Expected \[2,\?,3\], got \[2,3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 24).reshape((2, 3, 4))})
dequeued_t.eval()
def testParallelEnqueueMany(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(1000, tf.float32, shapes=((),))
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
sess.run(enqueue_op)
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(dequeued_t.eval(), elems * 10)
def testParallelDequeueMany(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(1000, tf.float32, shapes=((),))
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpTo(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(1000, tf.float32, shapes=((),))
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(101)
enqueue_op.run()
close_op.run()
# Dequeue up to 101 items in parallel on 10 threads, from closed queue.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelEnqueueAndDequeue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(50, tf.float32, shapes=((),))
initial_elements = [10.0] * 49
q.enqueue_many((initial_elements,)).run()
enqueue_op = q.enqueue((20.0,))
dequeued_t = q.dequeue()
def enqueue():
for _ in xrange(100):
sess.run(enqueue_op)
def dequeue():
for _ in xrange(100):
self.assertTrue(sess.run(dequeued_t) in (10.0, 20.0))
enqueue_threads = [self.checkedThread(target=enqueue) for _ in range(10)]
dequeue_threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for enqueue_thread in enqueue_threads:
enqueue_thread.start()
for dequeue_thread in dequeue_threads:
dequeue_thread.start()
for enqueue_thread in enqueue_threads:
enqueue_thread.join()
for dequeue_thread in dequeue_threads:
dequeue_thread.join()
# Dequeue the initial count of elements to clean up.
cleanup_elems = q.dequeue_many(49).eval()
for elem in cleanup_elems:
self.assertTrue(elem in (10.0, 20.0))
def testMixtureOfEnqueueAndEnqueueMany(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.int32, shapes=((),))
enqueue_placeholder = tf.placeholder(tf.int32, shape=())
enqueue_op = q.enqueue((enqueue_placeholder,))
enqueuemany_placeholder = tf.placeholder(
tf.int32, shape=(None,))
enqueuemany_op = q.enqueue_many((enqueuemany_placeholder,))
dequeued_t = q.dequeue()
close_op = q.close()
def dequeue():
for i in xrange(250):
self.assertEqual(i, sess.run(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
elements_enqueued = 0
while elements_enqueued < 250:
# With equal probability, run Enqueue or enqueue_many.
if random.random() > 0.5:
enqueue_op.run({enqueue_placeholder: elements_enqueued})
elements_enqueued += 1
else:
count = random.randint(0, min(20, 250 - elements_enqueued))
range_to_enqueue = np.arange(elements_enqueued,
elements_enqueued + count,
dtype=np.int32)
enqueuemany_op.run({enqueuemany_placeholder: range_to_enqueue})
elements_enqueued += count
close_op.run()
dequeue_thread.join()
self.assertEqual(0, q.size().eval())
def testMixtureOfDequeueAndDequeueMany(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.int32, shapes=((),))
enqueue_op = q.enqueue_many((np.arange(250, dtype=np.int32),))
dequeued_t = q.dequeue()
count_placeholder = tf.placeholder(tf.int32, shape=())
dequeuemany_t = q.dequeue_many(count_placeholder)
def enqueue():
sess.run(enqueue_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
elements_dequeued = 0
while elements_dequeued < 250:
# With equal probability, run Dequeue or dequeue_many.
if random.random() > 0.5:
self.assertEqual(elements_dequeued, dequeued_t.eval())
elements_dequeued += 1
else:
count = random.randint(0, min(20, 250 - elements_dequeued))
expected_range = np.arange(elements_dequeued,
elements_dequeued + count,
dtype=np.int32)
self.assertAllEqual(
expected_range, dequeuemany_t.eval({count_placeholder: count}))
elements_dequeued += count
q.close().run()
enqueue_thread.join()
self.assertEqual(0, q.size().eval())
def testBlockingDequeueMany(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
sess.run(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testBlockingDequeueUpTo(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
sess.run(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testDequeueManyWithTensorParameter(self):
with self.test_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = tf.PaddingFIFOQueue(100, tf.int32, ((),))
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = tf.PaddingFIFOQueue(total_count, tf.int32, ((),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
# that number of elements.
dequeued_t = q.dequeue_many(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertEqual(elems, dequeued_elems)
def testDequeueFromClosedQueue(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
close_op.run()
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
dequeued_t.eval()
def testBlockingDequeueFromClosedQueue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def dequeue():
for elem in elems:
self.assertEqual([elem], sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testDequeueUpToFromClosedQueueReturnsRemainder(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems[:3], sess.run(dequeued_t))
self.assertAllEqual(elems[3:], sess.run(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue()
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyFromClosedQueue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems, sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyButNotAllFromClosedQueue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems[:3], sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueManyLargerThanCapacityWithConcurrentDequeueMany(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(4, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
cleanup_dequeue_t = q.dequeue()
def enqueue():
sess.run(enqueue_op)
def dequeue():
self.assertAllEqual(elems[0:3], sess.run(dequeued_t))
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(dequeued_t)
self.assertEqual(elems[3], sess.run(cleanup_dequeue_t))
def close():
sess.run(close_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_thread = self.checkedThread(target=close)
close_thread.start()
enqueue_thread.join()
dequeue_thread.join()
close_thread.join()
def testClosedBlockingDequeueManyRestoresPartialBatch(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(4, (tf.float32, tf.float32), ((), ()))
elems_a = [1.0, 2.0, 3.0]
elems_b = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems_a, elems_b))
dequeued_a_t, dequeued_b_t = q.dequeue_many(4)
cleanup_dequeue_a_t, cleanup_dequeue_b_t = q.dequeue()
close_op = q.close()
enqueue_op.run()
def dequeue():
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run([dequeued_a_t, dequeued_b_t])
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
# Test that the elements in the partially-dequeued batch are
# restored in the correct order.
for elem_a, elem_b in zip(elems_a, elems_b):
val_a, val_b = sess.run([cleanup_dequeue_a_t, cleanup_dequeue_b_t])
self.assertEqual(elem_a, val_a)
self.assertEqual(elem_b, val_b)
self.assertEqual(0, q.size().eval())
def testBlockingDequeueManyFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueUpToFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_up_to(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueToClosedQueue(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.AbortedError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.AbortedError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(4, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
self.assertEqual([50.0], dequeued_t.eval())
thread.join()
def testBlockingEnqueueManyToFullQueue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(4, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
time.sleep(0.01)
self.assertEqual([50.0], dequeued_t.eval())
self.assertEqual([60.0], dequeued_t.eval())
def testBlockingEnqueueBeforeClose(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(4, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
# Expect the operation to succeed once the dequeue op runs.
sess.run(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
sess.run(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, dequeued_t.eval())
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 40.0, 50.0]:
self.assertEqual(elem, dequeued_t.eval())
self.assertEqual(0, q.size().eval())
def testBlockingEnqueueManyBeforeClose(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(4, tf.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
sess.run(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, dequeued_t.eval())
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 50.0, 60.0]:
self.assertEqual(elem, dequeued_t.eval())
def testDoesNotLoseValue(self):
with self.test_session():
q = tf.PaddingFIFOQueue(1, tf.float32, ((),))
enqueue_op = q.enqueue((10.0,))
size_t = q.size()
enqueue_op.run()
for _ in range(500):
self.assertEqual(size_t.eval(), [1])
def testSharedQueueSameSession(self):
with self.test_session():
q1 = tf.PaddingFIFOQueue(
1, tf.float32, ((),), shared_name="shared_queue")
q1.enqueue((10.0,)).run()
q2 = tf.PaddingFIFOQueue(
1, tf.float32, ((),), shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q2.dequeue().eval(), [10.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
q2.enqueue((20.0,)).run()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q1.dequeue().eval(), [20.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
def testIncompatibleSharedQueueErrors(self):
with self.test_session():
q_a_1 = tf.PaddingFIFOQueue(10, tf.float32, ((),), shared_name="q_a")
q_a_2 = tf.PaddingFIFOQueue(15, tf.float32, ((),), shared_name="q_a")
q_a_1.queue_ref.eval()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.eval()
q_b_1 = tf.PaddingFIFOQueue(10, tf.float32, ((),), shared_name="q_b")
q_b_2 = tf.PaddingFIFOQueue(10, tf.int32, ((),), shared_name="q_b")
q_b_1.queue_ref.eval()
with self.assertRaisesOpError("component types"):
q_b_2.queue_ref.eval()
q_c_1 = tf.PaddingFIFOQueue(10, tf.float32, ((),), shared_name="q_c")
q_c_2 = tf.PaddingFIFOQueue(
10, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_c")
q_c_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_c_2.queue_ref.eval()
q_d_1 = tf.PaddingFIFOQueue(
10, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_2 = tf.PaddingFIFOQueue(10, tf.float32, ((),), shared_name="q_d")
q_d_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.eval()
q_e_1 = tf.PaddingFIFOQueue(
10, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
q_e_2 = tf.PaddingFIFOQueue(
10, tf.float32, shapes=[(1, 1, 2, 4)], shared_name="q_e")
q_e_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.eval()
q_f_1 = tf.PaddingFIFOQueue(10, tf.float32, ((),), shared_name="q_f")
q_f_2 = tf.PaddingFIFOQueue(
10, (tf.float32, tf.int32), ((), ()), shared_name="q_f")
q_f_1.queue_ref.eval()
with self.assertRaisesOpError("component types"):
q_f_2.queue_ref.eval()
def testSelectQueue(self):
with self.test_session():
num_queues = 10
qlist = list()
for _ in xrange(num_queues):
qlist.append(tf.PaddingFIFOQueue(10, tf.float32, ((),)))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
q = tf.PaddingFIFOQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.test_session():
q1 = tf.PaddingFIFOQueue(10, tf.float32, ((),))
q2 = tf.PaddingFIFOQueue(15, tf.float32, ((),))
enq_q = tf.PaddingFIFOQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("Index must be in the range"):
enq_q.dequeue().eval()
def _blockingDequeue(self, sess, dequeue_op):
with self.assertRaisesOpError("Dequeue operation was cancelled"):
sess.run(dequeue_op)
def _blockingDequeueMany(self, sess, dequeue_many_op):
with self.assertRaisesOpError("Dequeue operation was cancelled"):
sess.run(dequeue_many_op)
def _blockingEnqueue(self, sess, enqueue_op):
with self.assertRaisesOpError("Enqueue operation was cancelled"):
sess.run(enqueue_op)
def _blockingEnqueueMany(self, sess, enqueue_many_op):
with self.assertRaisesOpError("Enqueue operation was cancelled"):
sess.run(enqueue_many_op)
def testResetOfBlockingOperation(self):
with self.test_session() as sess:
q_empty = tf.PaddingFIFOQueue(5, tf.float32, ((),))
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
q_full = tf.PaddingFIFOQueue(5, tf.float32, ((),))
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
self.checkedThread(self._blockingDequeue, args=(sess, dequeue_op)),
self.checkedThread(self._blockingDequeueMany, args=(sess,
dequeue_many_op)),
self.checkedThread(self._blockingEnqueue, args=(sess, enqueue_op)),
self.checkedThread(self._blockingEnqueueMany, args=(sess,
enqueue_many_op))]
for t in threads:
t.start()
time.sleep(0.1)
sess.close() # Will cancel the blocked operations.
for t in threads:
t.join()
def testBigEnqueueMany(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(5, tf.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
sess.run(enq)
enq_done.append(True)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The enqueue should start and then block.
results = []
results.append(deq.eval()) # Will only complete after the enqueue starts.
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
for _ in range(3):
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
# This dequeue will unblock the thread.
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 2)
thread.join()
for i in range(5):
self.assertEqual(size_op.eval(), 5 - i)
results.append(deq.eval())
self.assertEqual(size_op.eval(), 5 - i - 1)
self.assertAllEqual(elem, results)
def testBigDequeueMany(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(2, tf.int32, ((),))
elem = np.arange(4, dtype=np.int32)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(sess.run(deq))
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
for enq in enq_list:
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(results), 0)
sess.run(enq)
# Enough enqueued to unblock the dequeue
thread.join()
self.assertAllEqual(elem, results)
def testDtypes(self):
with self.test_session() as sess:
dtypes = [tf.float32, tf.float64, tf.int32, tf.uint8, tf.int16, tf.int8,
tf.int64, tf.bool, tf.complex64, tf.complex128]
shape = (32, 4, 128)
q = tf.PaddingFIFOQueue(32, dtypes, [shape[1:]] * len(dtypes))
input_tuple = []
for dtype in dtypes:
np_dtype = dtype.as_numpy_dtype
np_array = np.random.randint(-10, 10, shape)
if dtype == tf.bool:
np_array = np_array > 0
elif dtype in (tf.complex64, tf.complex128):
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
input_tuple.append(np_array)
q.enqueue_many(input_tuple).run()
output_tuple_t = q.dequeue_many(32)
output_tuple = sess.run(output_tuple_t)
for (input_elem, output_elem) in zip(input_tuple, output_tuple):
self.assertAllEqual(input_elem, output_elem)
def testUnknownRank(self):
with self.assertRaisesRegexp(ValueError, "must have a defined rank"):
tf.PaddingFIFOQueue(32, [tf.float32], [tf.TensorShape(None)])
if __name__ == "__main__":
tf.test.main()
| apache-2.0 | -6,575,144,917,407,246,000 | 34.790854 | 80 | 0.595874 | false |
adamchainz/django-mysql | src/django_mysql/models/fields/sizes.py | 2 | 2719 | from django.core import checks
from django.db.models import BinaryField, TextField
class SizedBinaryField(BinaryField):
def __init__(self, *args, **kwargs):
self.size_class = kwargs.pop("size_class", 4)
super().__init__(*args, **kwargs)
def check(self, **kwargs):
errors = super().check(**kwargs)
if self.size_class not in (1, 2, 3, 4):
errors.append(
checks.Error(
"size_class must be 1, 2, 3, or 4",
hint=None,
obj=self,
id="django_mysql.E007",
)
)
return errors
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
bad_paths = (
"django_mysql.models.fields.sizes.SizedBinaryField",
"django_mysql.models.fields.SizedBinaryField",
)
if path in bad_paths:
path = "django_mysql.models.SizedBinaryField"
kwargs["size_class"] = self.size_class
return name, path, args, kwargs
def db_type(self, connection):
if self.size_class == 1:
return "tinyblob"
elif self.size_class == 2:
return "blob"
elif self.size_class == 3:
return "mediumblob"
else: # don't check size_class == 4 as a safeguard for invalid values
return "longblob"
class SizedTextField(TextField):
def __init__(self, *args, **kwargs):
self.size_class = kwargs.pop("size_class", 4)
super().__init__(*args, **kwargs)
def check(self, **kwargs):
errors = super().check(**kwargs)
if self.size_class not in (1, 2, 3, 4):
errors.append(
checks.Error(
"size_class must be 1, 2, 3, or 4",
hint=None,
obj=self,
id="django_mysql.E008",
)
)
return errors
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
bad_paths = (
"django_mysql.models.fields.sizes.SizedTextField",
"django_mysql.models.fields.SizedTextField",
)
if path in bad_paths:
path = "django_mysql.models.SizedTextField"
kwargs["size_class"] = self.size_class
return name, path, args, kwargs
def db_type(self, connection):
if self.size_class == 1:
return "tinytext"
elif self.size_class == 2:
return "text"
elif self.size_class == 3:
return "mediumtext"
else: # don't check size_class == 4 as a safeguard for invalid values
return "longtext"
| bsd-3-clause | -2,181,917,365,051,417,300 | 30.616279 | 78 | 0.525193 | false |
SepehrMN/nest-simulator | pynest/examples/spatial/conncon_targets.py | 20 | 2449 | # -*- coding: utf-8 -*-
#
# conncon_targets.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Connect two populations with convergent projection and rectangular mask, visualize connections from source perspective
-----------------------------------------------------------------------------------------------------------------------
Create two populations of iaf_psc_alpha neurons on a 30x30 grid
BCCN Tutorial @ CNS*09
Hans Ekkehard Plesser, UMB
"""
import nest
import matplotlib.pyplot as plt
import numpy as np
nest.ResetKernel()
pos = nest.spatial.grid(shape=[30, 30], extent=[3., 3.], edge_wrap=True)
########################################################################
# create and connect two populations
a = nest.Create('iaf_psc_alpha', positions=pos)
b = nest.Create('iaf_psc_alpha', positions=pos)
cdict = {'rule': 'pairwise_bernoulli',
'p': 0.5,
'use_on_source': True,
'mask': {'rectangular': {'lower_left': [-0.2, -0.5],
'upper_right': [0.2, 0.5]}}}
nest.Connect(a, b,
conn_spec=cdict,
syn_spec={'weight': nest.random.uniform(0.5, 2.)})
#####################################################################
# first, clear existing figure, get current figure
plt.clf()
fig = plt.gcf()
# plot targets of two source neurons into same figure, with mask
for src_index in [30 * 15 + 15, 0]:
# obtain node id for center
src = a[src_index:src_index + 1]
nest.PlotTargets(src, b, mask=cdict['mask'], fig=fig)
# beautify
plt.axes().set_xticks(np.arange(-1.5, 1.55, 0.5))
plt.axes().set_yticks(np.arange(-1.5, 1.55, 0.5))
plt.grid(True)
plt.axis([-2.0, 2.0, -2.0, 2.0])
plt.axes().set_aspect('equal', 'box')
plt.title('Connection targets')
plt.show()
# plt.savefig('conncon_targets.pdf')
| gpl-2.0 | -6,948,630,001,401,315,000 | 31.223684 | 119 | 0.60392 | false |
AkaZuko/gstudio | gnowsys-ndf/gnowsys_ndf/ndf/views/imageDashboard.py | 2 | 10598 | ''' -- imports from installed packages -- '''
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response, render
from django.template import RequestContext
try:
from bson import ObjectId
except ImportError: # old pymongo
from pymongo.objectid import ObjectId
from gnowsys_ndf.ndf.models import File
''' -- imports from application folders/files -- '''
from gnowsys_ndf.settings import META_TYPE, GAPPS, MEDIA_ROOT
from gnowsys_ndf.ndf.models import node_collection
from gnowsys_ndf.ndf.views.methods import get_node_common_fields,create_grelation_list,get_execution_time
from gnowsys_ndf.ndf.views.methods import get_node_metadata
from gnowsys_ndf.ndf.management.commands.data_entry import create_gattribute
gapp_mt = node_collection.one({'_type': "MetaType", 'name': META_TYPE[0]})
GST_IMAGE = node_collection.one({'member_of': gapp_mt._id, 'name': GAPPS[3]})
@get_execution_time
def imageDashboard(request, group_id, image_id=None):
'''
fetching image acording to group name
'''
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_id) is False :
group_ins = node_collection.find_one({'_type': "Group", "name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else :
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth :
group_id = str(auth._id)
else :
pass
if image_id is None:
image_ins = node_collection.find_one({'_type': "GSystemType", "name": "Image"})
if image_ins:
image_id = str(image_ins._id)
img_col = node_collection.find({'_type': 'File', 'member_of': {'$all': [ObjectId(image_id)]}, 'group_set': {'$all': [ObjectId(group_id)]}})
template = "ndf/ImageDashboard.html"
already_uploaded=request.GET.getlist('var',"")
variable = RequestContext(request, {'imageCollection': img_col,'already_uploaded':already_uploaded,'groupid':group_id,'group_id':group_id })
return render_to_response(template, variable)
@get_execution_time
def getImageThumbnail(request, group_id, _id):
'''
this funciton can be called to get thumbnail of image throw url
'''
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_id) is False :
group_ins = node_collection.find_one({'_type': "Group","name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else :
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth :
group_id = str(auth._id)
else :
pass
img_obj = node_collection.one({"_type": u"File", "_id": ObjectId(_id)})
if img_obj is not None:
# getting latest uploaded pic's _id
img_fs = img_obj.fs_file_ids[2]
if (img_obj.fs.files.exists(img_fs)):
f = img_obj.fs.files.get(ObjectId(img_fs))
return HttpResponse(f.read(),content_type=f.content_type)
else:
return HttpResponse("")
@get_execution_time
def getFullImage(request, group_id, _id, file_name = ""):
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_id) is False :
group_ins = node_collection.find_one({'_type': "Group", "name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else :
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth :
group_id = str(auth._id)
else :
pass
img_obj = node_collection.one({"_id": ObjectId(_id)})
if img_obj is not None:
if (img_obj.fs.files.exists(img_obj.fs_file_ids[0])):
f = img_obj.fs.files.get(ObjectId(img_obj.fs_file_ids[0]))
return HttpResponse(f.read(), content_type=f.content_type)
else:
return HttpResponse("")
else:
return HttpResponse("")
@get_execution_time
def get_mid_size_img(request, group_id, _id):
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_id) is False :
group_ins = node_collection.find_one({'_type': "Group","name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else :
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth :
group_id = str(auth._id)
else :
pass
img_obj = node_collection.one({"_id": ObjectId(_id)})
try:
f = img_obj.fs.files.get(ObjectId(img_obj.fs_file_ids[2]))
return HttpResponse(f.read(), content_type=f.content_type)
except IndexError:
f = img_obj.fs.files.get(ObjectId(img_obj.fs_file_ids[0]))
return HttpResponse(f.read(), content_type=f.content_type)
@get_execution_time
def image_search(request,group_id):
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_id) is False :
group_ins = node_collection.find_one({'_type': "Group","name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else :
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth :
group_id = str(auth._id)
else :
pass
imgcol = node_collection.find({"_type": "File", 'mime_type': {'$regex': 'image'}})
if request.method=="GET":
keyword=request.GET.get("search","")
img_search=node_collection.find({'$and':[{'mime_type':{'$regex': 'image'}},{'$or':[{'name':{'$regex':keyword}},{'tags':{'$regex':keyword}}]}]})
template="ndf/file_search.html"
variable=RequestContext(request,{'file_collection':img_search,'view_name':'image_search','groupid':group_id,'group_id':group_id})
return render_to_response(template,variable)
@get_execution_time
def image_detail(request, group_id, _id):
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_id) is False :
group_ins = node_collection.find_one({'_type': "Group","name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else :
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth :
group_id = str(auth._id)
else :
pass
img_node = node_collection.one({"_id": ObjectId(_id)})
# First get the navigation list till topic from theme map
nav_l=request.GET.get('nav_li','')
breadcrumbs_list = []
nav_li = ""
if nav_l:
nav_li = nav_l
if img_node._type == "GSystemType":
return imageDashboard(request, group_id, _id)
img_node.get_neighbourhood(img_node.member_of)
imageCollection = node_collection.find({'member_of': {'$all': [ObjectId(GST_IMAGE._id)]},
'_type': 'File','fs_file_ids': {'$ne': []},
'group_set': {'$all': [ObjectId(group_id)]},
'$or': [
{'access_policy': u"PUBLIC"},
{'$and': [
{'access_policy': u"PRIVATE"},
{'created_by': request.user.id}
]
}
]
}).sort("last_update", -1)
return render_to_response("ndf/image_detail.html",
{ 'node': img_node,
'group_id': group_id, 'nav_list':nav_li,
'groupid':group_id, 'imageCollection': imageCollection
},
context_instance = RequestContext(request)
)
@get_execution_time
def image_edit(request,group_id,_id):
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_id) is False :
group_ins = node_collection.find_one({'_type': "Group","name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else :
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth :
group_id = str(auth._id)
else :
pass
img_node = node_collection.one({"_id": ObjectId(_id)})
title = GST_IMAGE.name
if request.method == "POST":
# get_node_common_fields(request, img_node, group_id, GST_IMAGE)
img_node.save(is_changed=get_node_common_fields(request, img_node, group_id, GST_IMAGE))
get_node_metadata(request,img_node)
teaches_list = request.POST.get('teaches_list','') # get the teaches list
if teaches_list !='':
teaches_list=teaches_list.split(",")
create_grelation_list(img_node._id,"teaches",teaches_list)
assesses_list = request.POST.get('assesses_list','')
if assesses_list !='':
assesses_list=assesses_list.split(",")
create_grelation_list(img_node._id,"assesses",assesses_list)
return HttpResponseRedirect(reverse('image_detail', kwargs={'group_id': group_id, '_id': img_node._id}))
else:
img_node.get_neighbourhood(img_node.member_of)
return render_to_response("ndf/image_edit.html",
{ 'node': img_node,'title': title,
'group_id': group_id,
'groupid':group_id
},
context_instance=RequestContext(request)
)
| agpl-3.0 | -8,909,396,364,202,302,000 | 42.793388 | 151 | 0.560294 | false |
geobricks/geobricks_storage_manager | geobricks_storage_manager/core/storage_manager.py | 1 | 1359 | import subprocess
from geobricks_common.core.log import logger
log = logger(__file__)
class StorageManager():
def __init__(self, config):
self.config = config["settings"]
def publish_raster_to_storage(self, folder_path):
self._publish_to_storage(folder_path, "raster")
def publish_vector_to_storage(self, folder_path):
self._publish_to_storage(folder_path, "vector")
def _publish_to_storage(self, input_path, layer_type):
folder_server = self.config["folders"]["storage"] + layer_type
server = self.config["server"]
subprocess.call(['ls', '-1'], shell=True)
args = [
'sshpass',
'-p', server["password"],
'rsync',
'-azP',
input_path,
server["user"] + '@' + server["url"] + ":" + folder_server
]
proc = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout_value, stderr_value = proc.communicate()
log.info(repr(stdout_value))
log.info(repr(stderr_value))
# storage_manager = StorageManager()
# storage_manager.publish_raster_to_ftp("/home/vortex/Desktop/LAYERS/processing/test/", "fenix")
| gpl-2.0 | 2,983,300,947,460,227,000 | 31.357143 | 96 | 0.551876 | false |
jesusVMayor/account-payment | __unported__/nan_account_bank_statement/__init__.py | 15 | 1387 | # -*- encoding: latin-1 -*-
##############################################################################
#
# Copyright (c) 2009 Àngel Àlvarez - NaN (http://www.nan-tic.com) All Rights Reserved.
#
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import account_statement
| agpl-3.0 | -8,542,480,013,495,619,000 | 45.233333 | 88 | 0.678443 | false |
umutseven92/GameOfLife | src/cgl.py | 1 | 1972 | import pygame
import time
import sys
from grid import grid
# Colors used
black = (0, 0, 0)
white = (255, 255, 255)
gray = (192, 192, 192)
MARGIN = 5 # Margin between cells
WIDTH = 20 # Cell width
HEIGHT = 20 # Cell height
ROW = 40 # Random row count
COL = 40 # Random column count
speed_delta = 10
max_speed = 125
min_speed = 5
speed = min_speed
done = False
pause = False
pygame.init()
pygame.display.set_caption("Game of Life")
clock = pygame.time.Clock()
main = grid()
if len(sys.argv) > 1:
main.load_map(sys.argv[1])
else:
main.load_random(ROW, COL)
window_size = [WIDTH * len(main.grid[0]) + MARGIN * len(main.grid[0]) + MARGIN, HEIGHT * len(main.grid)+ MARGIN * len(main.grid) + MARGIN]
screen = pygame.display.set_mode(window_size)
screen.fill(gray)
def draw_grid():
for row in range(len(main.grid)):
for col in range(len(main.grid[row])):
if main.grid[row][col] == 0:
color = white
elif main.grid[row][col] == 1:
color = black
pygame.draw.rect(screen, color, [(MARGIN + WIDTH) * col + MARGIN, (MARGIN + HEIGHT) * row + MARGIN, WIDTH, HEIGHT])
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
# Quit
done = True
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_p:
# Pause
pause = not pause
elif event.key == pygame.K_r:
# Reset
if main.map_type == "random":
main.load_random(ROW, COL)
else:
main.load_map(sys.argv[1])
speed = min_speed
if pause:
pause = not pause
elif event.key == pygame.K_ESCAPE:
# Quit
done = True
elif event.key == pygame.K_RIGHT:
# Speed up
speed += speed_delta
if speed > max_speed:
speed = max_speed
elif event.key == pygame.K_LEFT:
# Speed down
speed -= speed_delta
if speed < min_speed:
speed = min_speed
if not pause:
screen.fill(gray)
draw_grid()
main.apply_rules()
clock.tick(speed)
pygame.display.flip()
pygame.quit()
| mit | 4,594,087,182,103,531,000 | 19.102041 | 138 | 0.63198 | false |
pasv/Empire | lib/stagers/hop_php.py | 22 | 2123 | from lib.common import helpers
class Stager:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Launcher',
'Author': ['@harmj0y'],
'Description': ('Generates a hop.php redirector for an Empire listener.'),
'Comments': [
''
]
}
# any options needed by the stager, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Listener' : {
'Description' : 'Listener to generate stager for.',
'Required' : True,
'Value' : ''
},
'OutFile' : {
'Description' : 'File to output php redirector to.',
'Required' : True,
'Value' : '/tmp/hop.php'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
# extract all of our options
listenerID = self.options['Listener']['Value']
# extract out the listener config information
listener = self.mainMenu.listeners.get_listener(listenerID)
if listener:
# extract out the listener config information
name = listener[1]
host = listener[2]
port = listener[3]
certPath = listener[4]
profile = listener[8]
listenerType = listener[-2]
redirectTarget = listener[-1]
resources = profile.split("|")[0]
code = self.mainMenu.stagers.generate_hop_php(host, resources)
return code
else:
print helpers.color("[!] Error in hop.php generation.")
return ""
| bsd-3-clause | 7,728,688,183,123,161,000 | 28.901408 | 86 | 0.503062 | false |
FullStackEmbedded/Weather_Shield | 02_Code/SensorDrivers/sht21_class.py | 2 | 6157 | #!/usr/bin/python
##==============================================================================##
## FULL STACK EMBEDDED 2016 ##
##==============================================================================##
## File : sht21.py ##
## Author: FA ##
## Board : Raspberry Pi ##
## Brief : Sensor layer. Functions for sensor access ##
## Note : ##
#===============================================================================##
## IMPORTS
from __future__ import print_function
from smbus import SMBus
import time
##GLOBAL DEFINITION
DEBUG = 0
class SensorError(Exception):
"""Problem occured while communicating with sensor."""
class i2cError(SensorError):
"""Raised when the i2c error occurs"""
class SHT21:
"""Class to read temperature and humidity from SHT21"""
## Control constants
_SOFTRESET = 0xFE
_SLAVE_ADDRESS = 0x40
_TRIGGER_TEMPERATURE_NO_HOLD = 0xF3
_TRIGGER_HUMIDITY_NO_HOLD = 0xF5
_STATUS_BITS_MASK = 0xFFFC
# Wait a bit more than recommended
_TEMPERATURE_WAIT_TIME = 0.086 # (datasheet: typ=66, max=85)
_HUMIDITY_WAIT_TIME = 0.030 # (datasheet: typ=22, max=29)
def __init__(self, device_number = 1):
"""Opens the i2c device (assuming that the kernel modules have been
loaded) & run soft reset. (user register leaved to default value)"""
self.bus = SMBus(device_number)
self.bus.write_byte(self._SLAVE_ADDRESS, self._SOFTRESET)
time.sleep(0.015)
if DEBUG:
print("SHT21 init done.")
def getTemperature(self):
"""Reads the temperature from the sensor. Not that this call blocks
for ~86ms to allow the sensor to return the data """
self.bus.write_byte(self._SLAVE_ADDRESS, self._TRIGGER_TEMPERATURE_NO_HOLD)
data = []
time.sleep(self._TEMPERATURE_WAIT_TIME)
data.append(self.bus.read_byte(self._SLAVE_ADDRESS))
data.append(self.bus.read_byte(self._SLAVE_ADDRESS))
Temperature = self._get_temperature_from_buffer(data)
if DEBUG:
print("Temp[C] = ", Temperature)
return Temperature
def getHumidity(self):
"""Reads the humidity from the sensor. Not that this call blocks
for ~30ms to allow the sensor to return the data"""
self.bus.write_byte(self._SLAVE_ADDRESS, self._TRIGGER_HUMIDITY_NO_HOLD)
data = []
time.sleep(self._HUMIDITY_WAIT_TIME)
data.append(self.bus.read_byte(self._SLAVE_ADDRESS))
data.append(self.bus.read_byte(self._SLAVE_ADDRESS))
Humidity = self._get_humidity_from_buffer(data)
if DEBUG:
print("Humidity[%] = ", Humidity)
return Humidity
@staticmethod
def _get_temperature_from_buffer(data):
"""This function reads the first two bytes of data and
returns the temperature in C by using the following function:
T = =46.82 + (172.72 * (ST/2^16))where ST is the value from the sensor """
unadjusted = ((data[0]) << 8) + (data[1])
unadjusted &= SHT21._STATUS_BITS_MASK # zero the status bits
unadjusted *= 175.72
unadjusted /= 1 << 16 # divide by 2^16
unadjusted -= 46.85
return unadjusted
@staticmethod
def _get_humidity_from_buffer(data):
"""This function reads the first two bytes of data and returns
the relative humidity in percent by using the following function:
RH = -6 + (125 * (SRH / 2 ^16)) where SRH is the value read from the sensor """
unadjusted = (data[0] << 8) + data[1]
unadjusted &= SHT21._STATUS_BITS_MASK # zero the status bits
unadjusted *= 125.0
unadjusted /= 1 << 16 # divide by 2^16
unadjusted -= 6
return unadjusted
@staticmethod
def _calculate_checksum(data, number_of_bytes):
"""5.7 CRC Checksum using the polynomial given in the datasheet"""
# CRC
POLYNOMIAL = 0x131 # //P(x)=x^8+x^5+x^4+1 = 100110001
crc = 0
# calculates 8-Bit checksum with given polynomial
for byteCtr in range(number_of_bytes):
crc ^= (data[byteCtr])
for bit in range(8, 0, -1):
if crc & 0x80:
crc = (crc << 1) ^ POLYNOMIAL
else:
crc = (crc << 1)
return crc
class SensorInterface(object):
"""Abstract common interface for hardware sensors."""
def __init__(self):
self.error_count = 0
def get_value(self):
try:
return self._get_value()
except SensorError as e:
# TODO: Let errors expire after given time
if self.error_count < 3:
pass
else:
raise e
def _get_value():
raise NotImplementedError
class SHT21_Sensor(SensorInterface):
"""Sensor using SHT21 hardware."""
def __init__(self):
super(SHT21_Sensor, self).__init__()
self._hw_sensor = SHT21()
class TemperatureSensor(SHT21_Sensor):
"""Implements common interface for temperatur sensor"""
def _get_value(self):
"""Read sensor value."""
return self._hw_sensor.getTemperature()
class HumiditySensor(SHT21_Sensor):
"""Implements common interface for humidity sensor"""
def _get_value(self):
"""Read sensor value."""
return self._hw_sensor.getHumidity()
if __name__ == "__main__":
tmpSens = TemperatureSensor()
humiditySens = HumiditySensor()
while 1:
print("Temperature[C] = ",tmpSens.get_value()," ","AirHumidity[%] =",
humiditySens.get_value())
print("_________________________________________________________________")
time.sleep(1)
| gpl-2.0 | -4,909,613,834,279,437,000 | 34.589595 | 91 | 0.529479 | false |
pablolizardo/dotfiles | blender/scripts/dynamictextpanel.py | 1 | 17634 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
'''
Add-on : Dynamic Text
Par : Double Z.
Description :
Cet add-on permet le changement du contenu d'un objet Text 3D en fonction du temps.
Par la suite, des éléments tel que le baking (pour permettre le linkage sur d'autres projets), les transitions et l'import de fichiers *.srt ont été rajoutés.
Remerciements :
Dono pour l'idée des transitions
Boby pour l'idée du baking du texte.
e5dy/edddy pour m'avoir averti sur les potentiels bugs du a l'utilisation des bpy.ops au sein d'un script ainsi qu'a l'explication d'un début de solution très utile pour contourner cela.
Le forum du Blenderclan pour être un bon forum Francophone sur Blender.
'''
bl_info = {
'name': "Dynamic Text",
'author': "DoubleZ",
'version': (0, 7, 0),
'blender': (2, 7, 0),
'api': 60991,
'location': "",
'warning': "Now beta ?",
'description': "",
'wiki_url': "",
'category': "Object"}
import bpy
from bpy_extras.io_utils import ImportHelper
#Fonction appelé pour mettre à jour l'affichage du text 3D
#en fonction des paramètres de l'utilisateur
def updateTexts(self):
current = bpy.context.scene.frame_current
for obj in bpy.data.objects :
if obj.type == "FONT" and obj.dynamicSettings.update_blocs :
inTimeline = False
for ind,bloc in enumerate(sorted(obj.dynamicBlocs, key=lambda db:db.frameStart)) :
nextPos = sorted(obj.dynamicBlocs, key=lambda db:db.frameStart)[ind+1].frameStart if ind < len(obj.dynamicBlocs) - 1 else bpy.context.scene.frame_end + 1000
if current >= bloc.frameStart and current < nextPos:
inTimeline = True
obj.dynamicSettings.lastBlocUsed = ind
finale = bloc.body
for ind,custom in enumerate(obj.dynamicCustom) :
if custom.typeBloc == "FLOAT" :
nbrStr = str(round(custom.number, custom.afterPoint)) if custom.afterPoint else str(int(custom.number))
finale = finale.replace("%" + str(ind), nbrStr)
if custom.typeBloc == "TIMER" :
showTime = custom.number
if showTime < 0 :
showTime *= -1
final = "-"
else :
final = ""
day = int(showTime/216000)
hour = int(showTime/3600) % 24
min = int(showTime/60) % 60
sec = int(showTime) % 60
if custom.typeTimer == "SS" :
final += str(sec).zfill(2)
if custom.typeTimer == "MM_SS" :
final += str(min).zfill(2) + ":" + str(sec).zfill(2)
if custom.typeTimer == "HH_MM_SS" :
final += str(hour).zfill(2) + ":" + str(min).zfill(2) + ":" + str(sec).zfill(2)
if custom.typeTimer == "DDD_HH_MM_SS" :
final += str(day).zfill(3) + ":" + str(hour).zfill(2) + ":" + str(min).zfill(2) + ":" + str(sec).zfill(2)
virg = abs(custom.number - int(custom.number))
if custom.typeMs == "_M" :
final += "." + str(int(virg*10))
if custom.typeMs == "_MM" :
final += "." + str(int(virg*100)).zfill(2)
if custom.typeMs == "_MMM" :
final += "." + str(int(virg*1000)).zfill(3)
if custom.typeMs == "_MMMM" :
final += "." + str(int(virg*10000)).zfill(4)
finale = finale.replace("%" + str(ind), final)
obj.data.body = finale
break
if not inTimeline and len(obj.dynamicBlocs):
obj.data.body = ""
if len(obj.material_slots) :
if obj.material_slots[0].material :
if obj.material_slots[0].material.use_transparency :
alphaIn = ( current - obj.dynamicBlocs[obj.dynamicSettings.lastBlocUsed].frameStart + 1) / float(obj.dynamicSettings.fadeStart)
alphaOut = ( nextPos - current) / float(obj.dynamicSettings.fadeEnd)
obj.material_slots[0].material.alpha = alphaIn if alphaIn < alphaOut else alphaOut
bpy.context.scene.update()
def callUt(self, context) : #utilisée de manière ponctuelle
updateTexts(self)
#Fonction convertissant du timecode d'un fichier srt en frame correspondante
def getFrame(time,state):
temps = time.split(" ")
fps = bpy.context.scene.render.fps
if state == "debut" :
pos = 0
elif state == "fin" :
pos = 2
else :
pos = 0
hour = int(temps[pos].split(":")[0])
minute = int(temps[pos].split(":")[1])
seconde = int(temps[pos].split(":")[2].split(",")[0])
frame = int(temps[pos].split(":")[2].split(",")[1])
return hour*3600*fps + minute*60*fps + seconde*fps + int(frame/(1000/fps))
#Paramètres données pour le text 3D pour une périodé donné
class DynamicTextBlocs(bpy.types.PropertyGroup) :
#Tous
bloc_items = [
("TEXT", "Text", "", 1),
("NUMBER", "Number", "", 2),
("TIMER", "Timer", "", 3)]
frameStart = bpy.props.IntProperty(default=1, min=1, update=callUt)
frameEnd = bpy.props.IntProperty(default=1, min=1, update=callUt)
body = bpy.props.StringProperty(default="", update=callUt)
#Paramètres appliqué a l'ensemble des blocs
class DynamicTextSettings(bpy.types.PropertyGroup) :
fadeStart = bpy.props.IntProperty(default=1, min=1)
fadeEnd = bpy.props.IntProperty(default=1, min=1)
lastBlocUsed = bpy.props.IntProperty(default=-1)
update_blocs = bpy.props.BoolProperty(default=False, update=callUt)
class DynamicTextCustomisableValue(bpy.types.PropertyGroup):
bloc_items = [
("FLOAT", "Float", "", 1),
("TIMER", "Timer", "", 2)]
typeBloc = bpy.props.EnumProperty(items=bloc_items, update=callUt)
#Type Float
number = bpy.props.FloatProperty(default=0, update=callUt)
afterPoint = bpy.props.IntProperty(default=1, min=0, max=10, update=callUt)
#Type Timer (share the "number" value)
timer_items = [
("SS", "ss", "", 1),
("MM_SS", "mm:ss", "", 2),
("HH_MM_SS", "hh:mm:ss", "", 3),
("DDD_HH_MM_SS", "ddd:hh:mm:ss", "", 4)]
ms_items = [
("NONE", "None", "", 1),
("_M", ".m", "", 2),
("_MM", ".mm", "", 3),
("_MMM", ".mmm", "", 4),
("_MMMM", ".mmmm", "", 5)]
typeTimer = bpy.props.EnumProperty(items=timer_items, default="HH_MM_SS", update=callUt)
typeMs = bpy.props.EnumProperty(items=ms_items, update=callUt)
bpy.utils.register_class(DynamicTextBlocs)
bpy.utils.register_class(DynamicTextSettings)
bpy.utils.register_class(DynamicTextCustomisableValue)
bpy.types.Object.dynamicBlocs = bpy.props.CollectionProperty(type=DynamicTextBlocs)
bpy.types.Object.dynamicSettings = bpy.props.PointerProperty(type=DynamicTextSettings)
bpy.types.Object.dynamicCustom = bpy.props.CollectionProperty(type=DynamicTextCustomisableValue)
#Panel contenant les propriétés modifiables
class DynamicTextPanel(bpy.types.Panel):
bl_label = "Dynamic Text Panel"
bl_idname = "TEXT_PT_DYNAMIC_TEXT"
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "data"
@classmethod
def poll(cls, context):
return True if context.object.type == "FONT" else False
def draw_header(self, context):
layout = self.layout
ds = context.object.dynamicSettings
layout.prop(ds,"update_blocs", text="")
def draw(self, context):
layout = self.layout
col = layout.column(align=False)
obj = context.object
col.operator("dynamictext.bake_blocs","Bake bloc(s)")
tog = col.column(align=True)
tog.operator("dynamictext.add_bloc","Add to timeline", icon="ZOOMIN")
tog.operator("dynamictext.add_custom","Add Custom", icon="ZOOMIN")
tog.scale_y = 1.3
col.label("Fade(s) length:")
row = layout.row(align=True)
row.prop(obj.dynamicSettings,"fadeStart", text="In")
row.prop(obj.dynamicSettings,"fadeEnd", text="Out")
if obj.dynamicSettings.fadeStart > 1 or obj.dynamicSettings.fadeEnd > 1 :
if len(obj.material_slots) :
if not obj.material_slots[0].material :
layout.label("Material needed",icon="ERROR")
else :
if not obj.material_slots[0].material.use_transparency :
layout.label("Material transparency needed",icon="ERROR")
else :
layout.label("Material needed",icon="ERROR")
box = layout.box()
for ind,bloc in enumerate(obj.dynamicCustom) :
col = box.column()
row = col.row()
row.prop(bloc,"typeBloc", text="%" + str(ind))
row.prop(bloc,"number", text="")
row.operator("dynamictext.remove_custom", text="", icon="X", emboss=False).index = ind
if bloc.typeBloc == "FLOAT" :
col.prop(bloc, "afterPoint", text="After Points")
if bloc.typeBloc == "TIMER" :
col.prop(bloc,"typeTimer", text="Display")
col.prop(bloc,"typeMs", text="Precision")
for ind,bloc in enumerate(sorted(obj.dynamicBlocs, key=lambda db:db.frameStart)) :
box = layout.box()
col = box.column()
row = col.row()
row.prop(bloc,"frameStart",text="Start")
row.operator("dynamictext.remove_bloc", text="", icon="X", emboss=False).index = ind
row = col.row(align=True)
sub = row.row()
sub.scale_x = 3
sub.prop(bloc, "body", text="")
row.operator("dynamictext.set_text", text="set").index = ind
#Ajoute une nouvelle variable customisable
class AddCustom(bpy.types.Operator) :
bl_idname = "dynamictext.add_custom"
bl_label = "Dynamic text : Add customisable variable"
@classmethod
def poll(cls, context):
return True if bpy.context.object.type == "FONT" else False
def execute(self, context):
bpy.context.object.dynamicCustom.add()
return {"FINISHED"}
#Ajoute une nouvelle variable customisable
class RemoveCustom(bpy.types.Operator) :
bl_idname = "dynamictext.remove_custom"
bl_label = "Dynamic text : Remove customisable variable"
index = bpy.props.IntProperty()
@classmethod
def poll(cls, context):
return True if bpy.context.object.type == "FONT" else False
def execute(self, context):
bpy.context.object.dynamicCustom.remove(self.index)
return {"FINISHED"}
#Ajoute un nouveau bloc avec les paramètres par défaut selon la frame en cours et le texte affiché sur le texte 3D
class AddBloc(bpy.types.Operator) :
bl_idname = "dynamictext.add_bloc"
bl_label = "Dynamic text : Add new text bloc"
@classmethod
def poll(cls, context):
return True if bpy.context.object.type == "FONT" else False
def execute(self, context):
bloc = bpy.context.object.dynamicBlocs.add()
bloc.body = bpy.context.object.data.body
bloc.frameStart = bpy.context.scene.frame_current
bloc.frameEnd = bpy.context.scene.frame_current + 100
return {'FINISHED'}
#Supprime le bloc
class RemoveBloc(bpy.types.Operator) :
bl_idname = "dynamictext.remove_bloc"
bl_label = "Dynamic text : Remove bloc"
index = bpy.props.IntProperty()
@classmethod
def poll(cls, context):
return True if bpy.context.object.type == "FONT" else False
def execute(self, context):
context.object.dynamicBlocs.remove(self.index)
return {'FINISHED'}
#Remplace le texte du bloc en cours par ce qui est affiché sur la vue 3D
class SetText(bpy.types.Operator) :
bl_idname = "dynamictext.set_text"
bl_label = "Dynamic text : Set Text"
index = bpy.props.IntProperty()
@classmethod
def poll(cls, context):
return True if bpy.context.object.type == "FONT" else False
def execute(self, context):
context.object.dynamicBlocs[self.index].body = context.object.data.body
return {'FINISHED'}
#Converti le texte dynamique en plusieurs textes non dynamique
#Utile pour linker les élements sur d'autre projet et personnaliser plus en détail le texte
class BakeBlocs(bpy.types.Operator):
bl_idname = "dynamictext.bake_blocs"
bl_label = "Dynamic text : Bake blocs"
@classmethod
def poll(cls, context) :
return True if context.object.type == "FONT" else False
def execute(self, context):
original = context.object
originalSettings = context.object.dynamicSettings
originalBlocs = context.object.dynamicBlocs
for ind,bloc in enumerate(originalBlocs) :
if bloc.body != "" :
nextPos = sorted(originalBlocs, key=lambda db:db.frameStart)[ind+1].frameStart if ind < len(originalBlocs) - 1 else bpy.context.scene.frame_end + 1000
textData = bpy.context.object.data.copy()
objText = bpy.data.objects.new(original.name,textData)
bpy.context.scene.objects.link(objText)
objText.data.body = bloc.body
objText.hide = True
objText.keyframe_insert(data_path="hide", index=-1, frame=0)
objText.keyframe_insert(data_path="hide", index=-1, frame=nextPos)
objText.hide = False
objText.keyframe_insert(data_path="hide", index=-1, frame=bloc.frameStart)
objText.hide_render = True
objText.keyframe_insert(data_path="hide_render", index=-1, frame=0)
objText.keyframe_insert(data_path="hide_render", index=-1, frame=nextPos)
objText.hide_render = False
objText.keyframe_insert(data_path="hide_render", index=-1, frame=bloc.frameStart)
if len(objText.material_slots) :
if objText.material_slots[0].material :
mat = objText.material_slots[0].material
mat.alpha = 0.0
mat.keyframe_insert(data_path="alpha", index=-1, frame = bloc.frameStart-1)
mat.keyframe_insert(data_path="alpha", index=-1, frame = nextPos)
mat.alpha = 1.0
mat.keyframe_insert(data_path="alpha", index=-1, frame = bloc.frameStart-1+originalSettings.fadeStart)
mat.keyframe_insert(data_path="alpha", index=-1, frame = nextPos-originalSettings.fadeEnd)
return{'FINISHED'}
#Fonctions utiles pour Blender
def register():
bpy.utils.register_class(DynamicTextPanel)
bpy.utils.register_class(AddBloc)
bpy.utils.register_class(AddCustom)
bpy.utils.register_class(RemoveBloc)
bpy.utils.register_class(RemoveCustom)
bpy.utils.register_class(SetText)
bpy.utils.register_class(BakeBlocs)
bpy.app.handlers.frame_change_post.append(updateTexts)
bpy.app.handlers.render_pre.append(updateTexts)
def unregister():
bpy.utils.unregister_class(DynamicTextPanel)
bpy.utils.unregister_class(AddBloc)
bpy.utils.unregister_class(AddCustom)
bpy.utils.unregister_class(RemoveBloc)
bpy.utils.unregister_class(RemoveCustom)
bpy.utils.unregister_class(SetText)
bpy.utils.unregister_class(BakeBlocs)
bpy.app.handlers.frame_change_post.remove(updateTexts)
bpy.app.handlers.render_pre.remove(updateTexts)
if __name__ == "__main__":
register()
| gpl-2.0 | -5,689,746,719,977,277,000 | 41.466667 | 194 | 0.573052 | false |
City-of-Bloomington/green-rental | utility/migrations/0011_auto__chg_field_utilitysummary_statement.py | 2 | 25109 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'UtilitySummary.statement'
db.alter_column(u'utility_utilitysummary', 'statement_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['utility.Statement'], null=True))
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'UtilitySummary.statement'
raise RuntimeError("Cannot reverse this migration. 'UtilitySummary.statement' and its values cannot be restored.")
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'building.building': {
'Meta': {'object_name': 'Building'},
'active_listings': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'address': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'air_conditioning': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}),
'amenities': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'average_electricity': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'average_gas': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'average_sqft': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'average_trash': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'average_water': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'bike_friendly': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'bike_friendly_details': ('rentrocket.helpers.MultiSelectField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'bike_friendly_other': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'bike_score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'built_year': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'city': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['city.City']"}),
'composting': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'energy_average': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'energy_saving_details': ('rentrocket.helpers.MultiSelectField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'energy_saving_features': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'energy_saving_other': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'energy_score': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'estimated_total_max': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'estimated_total_min': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'game_room': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'garden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'garden_details': ('rentrocket.helpers.MultiSelectField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'garden_other': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'geocoder': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'gym': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'heat_source_details': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '20', 'blank': 'True'}),
'heat_source_other': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {}),
'laundry': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {}),
'max_rent': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'max_rent_listing': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'min_rent': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'min_rent_listing': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '80', 'blank': 'True'}),
'number_of_units': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'parcel': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['building.Parcel']"}),
'parking_options': ('rentrocket.helpers.MultiSelectField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'pets': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pets_options': ('rentrocket.helpers.MultiSelectField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'pets_other': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'pool': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'recycling': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'renewable_energy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'renewable_energy_details': ('rentrocket.helpers.MultiSelectField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'renewable_energy_other': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'smart_living': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['source.Source']", 'null': 'True', 'blank': 'True'}),
'sqft': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'tag': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'total_average': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'transit_friendly': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'transit_friendly_details': ('rentrocket.helpers.MultiSelectField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'transit_friendly_other': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'transit_score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '30', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'utility_data_updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'walk_friendly': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'walk_friendly_details': ('rentrocket.helpers.MultiSelectField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'walk_friendly_other': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'walk_score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'who_pays_cable': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '20'}),
'who_pays_electricity': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '20'}),
'who_pays_gas': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '20'}),
'who_pays_internet': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '20'}),
'who_pays_trash': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '20'}),
'who_pays_water': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '20'})
},
u'building.parcel': {
'Meta': {'object_name': 'Parcel'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'custom_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'from_st': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shape': ('django.db.models.fields.TextField', [], {}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'street_type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'to_st': ('django.db.models.fields.CharField', [], {'max_length': '12', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'building.unit': {
'Meta': {'ordering': "['number']", 'object_name': 'Unit'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'address': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'average_electricity': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'average_gas': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'average_trash': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'average_water': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'bathrooms': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bedrooms': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'building': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'units'", 'to': u"orm['building.Building']"}),
'deposit': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'electricity_max': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'electricity_min': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'energy_average': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'energy_score': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'floor': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'gas_max': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'gas_min': ('django.db.models.fields.FloatField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_occupants': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'rent': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'sqft': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '20', 'blank': 'True'}),
'tag': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'city.city': {
'Meta': {'object_name': 'City'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'cutoffs': ('django.db.models.fields.CharField', [], {'default': "'50,250,500,1000'", 'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {}),
'longitude': ('django.db.models.fields.FloatField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'tag': ('django.db.models.fields.CharField', [], {'default': "'<django.db.models.fields.charfield>'", 'unique': 'True', 'max_length': '200'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'city'", 'max_length': '50'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'person.person': {
'Meta': {'object_name': 'Person'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'city': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['city.City']", 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'phone': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20', 'blank': 'True'}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'})
},
u'source.feedinfo': {
'Meta': {'object_name': 'FeedInfo'},
'added': ('django.db.models.fields.DateTimeField', [], {}),
'building_id_definition': ('django.db.models.fields.TextField', [], {}),
'city': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['city.City']"}),
'contact_email': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parcel_id_definition': ('django.db.models.fields.TextField', [], {}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '12'})
},
u'source.source': {
'Meta': {'object_name': 'Source'},
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['source.FeedInfo']", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['person.Person']", 'blank': 'True'})
},
u'utility.cityserviceprovider': {
'Meta': {'object_name': 'CityServiceProvider'},
'city': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['city.City']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'provider': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['utility.ServiceProvider']"}),
'website': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'utility.serviceprovider': {
'Meta': {'object_name': 'ServiceProvider'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instructions': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'website': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'utility.serviceutility': {
'Meta': {'object_name': 'ServiceUtility'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'provider': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'utilities'", 'to': u"orm['utility.ServiceProvider']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'utility.statement': {
'Meta': {'object_name': 'Statement'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'blob_key': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'}),
'processed': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'processed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'processed_statements'", 'null': 'True', 'to': u"orm['auth.User']"}),
'type': ('django.db.models.fields.CharField', [], {'default': "'electricity'", 'max_length': '12'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['building.Unit']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'vendor': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'})
},
u'utility.statementupload': {
'Meta': {'object_name': 'StatementUpload'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'blob_key': ('django.db.models.fields.TextField', [], {}),
'building_address': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'city_tag': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'energy_sources': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'energy_strategy': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'}),
'move_in': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'person_email': ('django.db.models.fields.EmailField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'electricity'", 'max_length': '12'}),
'unit_details': ('jsonfield.fields.JSONField', [], {'blank': 'True'}),
'unit_number': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'vendor': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'})
},
u'utility.utilitysummary': {
'Meta': {'object_name': 'UtilitySummary'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'amount': ('django.db.models.fields.FloatField', [], {'blank': 'True'}),
'building': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['building.Building']"}),
'cost': ('django.db.models.fields.FloatField', [], {'blank': 'True'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'provider': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['utility.ServiceProvider']", 'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['source.Source']", 'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {}),
'statement': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['utility.Statement']", 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'electricity'", 'max_length': '12'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['building.Unit']"}),
'unit_of_measurement': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'vendor': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'})
}
}
complete_apps = ['utility'] | agpl-3.0 | -962,976,256,291,214,600 | 84.993151 | 195 | 0.54809 | false |
kmunve/processgpr | test/trace_db.py | 1 | 3048 | __author__ = 'kmu'
"""
Storing GPR traces in an SQLite database.
Check out:
http://stackoverflow.com/questions/18621513/python-insert-numpy-array-into-sqlite3-database
"""
import sqlite3
import numpy as np
import io
# Defining the trace object
class TraceDb():
def __init__(self, values, deltaT=0, Nfft=None):
"""
Init the TraceDb object.
:param values: a data array
:param deltaT: time step between samples in ns (future use)
:param Nfft: number of samples used for FFT (future use)
:return:
"""
self.values = values
self.deltaT = deltaT
self.direction = None # either up, down, cross_up, cross_down
self.quality = 0
def __str__(self):
"""
:return: string output of TraceDb object
"""
return "Direction: {0} [Q: {2}]\n{1}".format(self.direction, self.values, self.quality)
def set_direction(self, d):
"""
Sets the direction parameter
:param d: string equal to "up", "down", "cross_up", or "cross_down"
:return:
"""
directions = ["up", "down", "cross_up", "cross_down"]
if d in directions:
self.direction = d
else:
print "Choose one o the following directions:"
for direc in directions:
print direc
# Preparing sqlite to store and retrieve arrays
def adapt_array(arr):
out = io.BytesIO()
np.save(out, arr)
out.seek(0)
return buffer(out.read())
def convert_array(text):
out = io.BytesIO(text)
out.seek(0)
return np.load(out)
# Converts np.array to TEXT when inserting
sqlite3.register_adapter(np.ndarray, adapt_array)
# Converts TEXT to np.array when selecting
sqlite3.register_converter("ARRAY", convert_array)
################
# Doing a test #
################
# Creating a test TraceDb object
val = np.exp(np.linspace(-1., -4.0, 50)) * np.sin(np.linspace(0.0, 10*np.pi, 50))
trace = TraceDb(val)
trace.set_direction("up")
# Creating and connecting to a database
con = sqlite3.connect("test.db", detect_types=sqlite3.PARSE_DECLTYPES)
cur = con.cursor()
# Removing an existing table with name "test"
cur.execute("DROP TABLE IF EXISTS test")
# Adding a table called "test" to the DB
cur.execute("create table test(Id INTEGER PRIMARY KEY AUTOINCREMENT, direction TEXT, val ARRAY, quality INT);")
# Inserting three traces to test ID-autoincrement
cur.execute("INSERT INTO test(direction, val, quality) VALUES(?, ?, ?)", (trace.direction, trace.values, trace.quality))
cur.execute("INSERT INTO test(direction, val, quality) VALUES(?, ?, ?)", (trace.direction, trace.values, trace.quality))
cur.execute("INSERT INTO test(direction, val, quality) VALUES(?, ?, ?)", (trace.direction, trace.values, trace.quality))
# Commiting the changes to the DB
con.commit()
# Retrieving data from the DB
cur.execute("select * from test")
data = cur.fetchone()[2]
# Check output
print(data)
print(type(data))
if data.all() == trace.values.all():
print("YES")
| gpl-2.0 | -6,204,422,218,226,688,000 | 25.973451 | 120 | 0.643045 | false |
sajeeshcs/nested_projects_keystone | keystone/trust/backends/kvs.py | 1 | 4491 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
An in memory implementation of the trusts API.
only to be used for testing purposes
"""
import copy
from oslo.utils import timeutils
from keystone.common import kvs
from keystone import exception
from keystone.openstack.common import versionutils
from keystone import trust as keystone_trust
def _filter_trust(ref, deleted=False):
if ref['deleted_at'] and not deleted:
return None
if (ref.get('expires_at') and timeutils.utcnow() > ref['expires_at'] and
not deleted):
return None
remaining_uses = ref.get('remaining_uses')
# Do not return trusts that can't be used anymore
if remaining_uses is not None and not deleted:
if remaining_uses <= 0:
return None
ref = copy.deepcopy(ref)
return ref
class Trust(kvs.Base, keystone_trust.Driver):
@versionutils.deprecated(versionutils.deprecated.JUNO,
in_favor_of='keystone.trust.backends.sql',
remove_in=+1,
what='keystone.trust.backends.kvs')
def __init__(self):
super(Trust, self).__init__()
def create_trust(self, trust_id, trust, roles):
trust_ref = copy.deepcopy(trust)
trust_ref['id'] = trust_id
trust_ref['deleted_at'] = None
trust_ref['roles'] = roles
if (trust_ref.get('expires_at') and
trust_ref['expires_at'].tzinfo is not None):
trust_ref['expires_at'] = (timeutils.normalize_time
(trust_ref['expires_at']))
self.db.set('trust-%s' % trust_id, trust_ref)
trustee_user_id = trust_ref['trustee_user_id']
trustee_list = self.db.get('trustee-%s' % trustee_user_id, [])
trustee_list.append(trust_id)
self.db.set('trustee-%s' % trustee_user_id, trustee_list)
trustor_user_id = trust_ref['trustor_user_id']
trustor_list = self.db.get('trustor-%s' % trustor_user_id, [])
trustor_list.append(trust_id)
self.db.set('trustor-%s' % trustor_user_id, trustor_list)
return trust_ref
def consume_use(self, trust_id):
try:
orig_ref = self.db.get('trust-%s' % trust_id)
except exception.NotFound:
raise exception.TrustNotFound(trust_id=trust_id)
remaining_uses = orig_ref.get('remaining_uses')
if remaining_uses is None:
# unlimited uses, do nothing
return
elif remaining_uses > 0:
ref = copy.deepcopy(orig_ref)
ref['remaining_uses'] -= 1
self.db.set('trust-%s' % trust_id, ref)
else:
raise exception.TrustUseLimitReached(trust_id=trust_id)
def get_trust(self, trust_id, deleted=False):
try:
ref = self.db.get('trust-%s' % trust_id)
return _filter_trust(ref, deleted=deleted)
except exception.NotFound:
return None
def delete_trust(self, trust_id):
try:
ref = self.db.get('trust-%s' % trust_id)
except exception.NotFound:
raise exception.TrustNotFound(trust_id=trust_id)
ref['deleted_at'] = timeutils.utcnow()
self.db.set('trust-%s' % trust_id, ref)
def list_trusts(self):
trusts = []
for key, value in self.db.items():
if key.startswith("trust-") and not value['deleted_at']:
trusts.append(value)
return trusts
def list_trusts_for_trustee(self, trustee_user_id):
trusts = []
for trust in self.db.get('trustee-%s' % trustee_user_id, []):
trusts.append(self.get_trust(trust))
return trusts
def list_trusts_for_trustor(self, trustor_user_id):
trusts = []
for trust in self.db.get('trustor-%s' % trustor_user_id, []):
trusts.append(self.get_trust(trust))
return trusts
| apache-2.0 | 2,008,816,630,022,249,500 | 36.115702 | 76 | 0.607882 | false |
SamWitte/Codds_DarkMatter | src/globalfnc.py | 1 | 23236 | """
Copyright (c) 2015 Andreea Georgescu
Created on Fri Nov 21 02:49:53 2014
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import math
from collections import OrderedDict
from scipy.special import erf, erfinv
from scipy.stats import chi2
pi = np.pi
T = True # short-hand notation
F = False
PRECISSION = 1.e-5
# Unit conversions
fermiGeV = 1./0.1973269602 # Natural[GeV femto Meter]
kilogram = 1e-9/1.782661758e-36 # kg in GeV (units: [GeV/kg])
SpeedOfLight = 299792.458 # km/s
AtomicMassUnit = 0.931494028
ProtonMass = 1.00727646677 * AtomicMassUnit
mPhiRef = 1000.
rho = 0.3 # GeV/cm**3
conversion_factor = rho * SpeedOfLight**2 * 1e5 * 3600 * 24
ConfidenceLevel = 0.9
v0bar = default_v0bar = 220.
vobs = default_vobs = default_v0bar + 12.
vesc = default_vesc = 533.
if False: # alternative velocities; kept for reference
v0bar = 230 - 3 * 24.4
vobs = v0bar + 12
vesc = 544 - 3 * 39
""" List of experiment names corresponding to each type of statistical analysis.
"""
MaximumGapLimit_exper = ["SuperCDMS",
"LUX2013zero", "LUX2013one", "LUX2013three", "LUX2013five", "LUX2013many",
"XENON10", "XENON100", "CDMSlite2013CoGeNTQ",
"LUX2016one", "LUX2016five", "LUX2016many","PandaX","CDMSlite2016", "Xenon1T",
"CDMS_Snolab_GeHV", "LZ","Darwin", "CDMS_Snolab_SiHV","CDMS_Snolab_GeiZip",
"PICO_500", "DarkSideG2","PICO_60"]
GaussianLimit_exper = ["KIMS2012", "PICASSO"]
BinnedSignal_exper = ["DAMA2010Na", "DAMA2010I"]
Crosses_exper = ["CDMSSi2012", "DAMA2010Na", "DAMA2010I"]
DAMALimit_exper = ["DAMA2010Na_TotRateLimit"]
Poisson_exper = ["SIMPLEModeStage2"]
EHImethod_exper = ["CDMSSi2012", "CDMSSiGeArtif", "CDMSSiArtif"]
EHI_Pois = ["3BinXe","3BinXe2"]
MOD_BAND = ["DAMA_2Bin"]
SHM_line = ["SHM_eta0", "SHM_eta1"]
Extended_Like = ["CDMSSi2012"]
Poisson_Like = ["SuperCDMSLessT5", "SuperCDMSLikelihood","LUX2016zero"]
Poisson_likelihood = ["SuperCDMSLessT5", "SuperCDMSLikelihood", "LUX2016zero"]
""" Colors for plotting.
"""
Color = {"SuperCDMS": 'peru',
"LUX2013zero": 'magenta', "LUX2013one": 'magenta', "LUX2013three": 'magenta',
"LUX2013five": 'magenta', "LUX2013many": 'magenta',
"XENON10": 'orange', "XENON100": 'royalblue',
"CDMSlite2013CoGeNTQ": 'cyan', "CDMSSi2012": 'red',
"CDMSSi2012_EHI": 'firebrick',
"KIMS2012": 'purple', "PICASSO": 'darkturquoise',
"DAMA2010Na_TotRateLimit": 'black',
"DAMA2010Na": 'green',
"DAMA2010I": 'green',
"SIMPLEModeStage2": 'saddlebrown',
"SHM_eta0": 'gray', "SHM_eta1": 'gray', "SuperCDMSLessT5": 'peru',
"SuperCDMSLikelihood": 'peru',
"LUX2016zero": 'navy', "LUX2016five": 'navy', "LUX2016many": 'navy', "PandaX":'darkorchid',
"LUX2016one": 'navy', "CDMSlite2016": 'green', "Xenon1T": 'royalblue',
"CDMS_Snolab_GeHV": 'darkturquoise',
"LZ": 'black',"Darwin":'black', "CDMS_Snolab_SiHV":'darkturquoise',
"CDMS_Snolab_GeiZip":'darkturquoise', "PICO_500":'black', "DarkSideG2":'black',"PICO_60":'green',
"3BinXe":'black',"3BinXe2":'black',
"DAMA_2Bin": 'black'}
""" Linestyles get cicled through for each experiment name.
"""
linestyles = ['-', '--', '-.', ':']
""" For some experiments the linestyles are fixed and customized, passed as dashes.
"""
line_dashes = {"LUX2013one": (8, 4, 3, 4, 3, 4),
"LUX2013three": (8, 4, 3, 4), "LUX2013five": (8, 4), "LUX2013many": None,
"SHM_eta0": (8, 4), "SHM_eta1": (3, 4), "SuperCDMSLessT5": (3, 4),
"SuperCDMSLikelihood": (8, 4, 3, 4, 3, 4),
"LUX2016five": (8, 4, 3, 4, 3, 4),
"LUX2016many": (8, 4, 3, 4),
"LUX2016one": (3,4) }
""" Legend names, in order of appearence in the legend for the corresponding experiments
that appear in the plot.
"""
legend_names = OrderedDict([("DAMA$_0", ["DAMA2010Na_TotRateLimit"]),
("DAMA$_1$", ["DAMA2010Na", "DAMA2010I",
"DAMA2010Na DAMA2010I",
"DAMA2010I DAMA2010Na"]),
("CDMS-II-Si", ["CDMSSi2012", "CDMSSi2012_EHI"]),
("SuperCDMS", ["SuperCDMS"]),
("CDMSlite", ["CDMSlite2013CoGeNTQ"]),
("SIMPLE", ["SIMPLEModeStage2"]),
("XENON10", ["XENON10"]), ("XENON100", ["XENON100"]),
("Xenon1T", ["Xenon1T"]),
("LUX2013", ["LUX2013zero", "LUX2013one", "LUX2013three",
"LUX2013five", "LUX2013many"]),
("PICASSO", ["PICASSO"]), ("KIMS", ["KIMS2012"]),
("SHM $(\sigma_p = 10^{-40}\mathrm{ cm}^2)$",
["SHM_eta0", "SHM_eta1"]),
("LUX2016", ["LUX2016zero","LUX2016zero", "LUX2016five", "LUX2016many"]),
("PandaX", ["PandaX"]), ("CDMSlite2016", ["CDMSlite2016"]),
("CDMS_Snolab_GeHV", ["CDMS Snolab Ge HV"]),
("LZ", ["LZ"]),("Darwin", ["Darwin"]),
("CDMS_Snolab_SiHV", ["CDMS Snolab Si HV"]),
("CDMS_Snolab_GeiZip", ["CDMS Snolab Ge iZip"]),
("PICO_500", ["PICO(500L)"]),("DarkSideG2", ["DarkSide G2"]),
("PICO_60", ["PICO(60L)"]), ("3BinXe", ["3BinXe"]),
("3BinXe2", ["3BinXe2"]), ("DAMA_2Bin", ["DAMA_2Bin"])])
""" Transparency parameter for filling regions, depending on the quenching factor.
"""
transp_list = [0.6, 0.4]
transparency = {0.4: transp_list[0], 0.09: transp_list[0],
(0.4, 0.09): transp_list[0], (0.09, 0.4): transp_list[0],
0.3: transp_list[1], 0.06: transp_list[1],
(0.3, 0.06): transp_list[1], (0.06, 0.3): transp_list[1]}
def confidence_level(sigma):
return erf(sigma/np.sqrt(2))
def sigma_dev(CL):
return np.sqrt(2) * erfinv(CL)
def chi_squared(dof, CL=ConfidenceLevel):
return chi2.ppf(CL, dof)
def chi_squared1(CL=ConfidenceLevel):
# special case for dof = 1
return sigma_dev(CL)**2
def import_file(full_path_to_module):
""" Imports Python module from file.
"""
import os
import sys
directory, module_name = os.path.split(full_path_to_module)
module_name = os.path.splitext(module_name)[0]
path = list(sys.path)
sys.path.insert(0, directory)
try:
module = __import__(module_name)
return module
finally:
sys.path[:] = path # restore
def FileNameTail(fp, fn, mPhi):
""" Gives a file name-tail that is added to each output file, to distinguish
between different input parameters.
"""
if mPhi == mPhiRef:
mPhi_string = ""
else:
mPhi_string = "_mPhi" + str(math.trunc(mPhi))
fnfp = fn/fp
fnfp_string = "_fnfp"
if fnfp == 1.:
fnfp_string = "_fnfp1"
elif abs(fnfp) < 1:
if fnfp < 0:
fnfp_string += "_neg0" + str(math.trunc(round(10 * abs(fnfp))))
else:
fnfp_string += "0" + str(math.trunc(round(10 * abs(fnfp))))
else:
if fnfp < 0:
fnfp_string += "_neg" + str(math.trunc(round(abs(fnfp))))
else:
fnfp_string += str(math.trunc(round(abs(fnfp))))
return mPhi_string + fnfp_string
def OutputDirectory(output_main_dir, scattering_type, mPhi, delta):
""" Gives the name of the output directory for the given input parameters.
Input:
output_main_dir: string, optional
Name of main output directory.
scattering_type: string
'SI' for spin-dependent, 'SDPS' for pseudo-scalar, 'SDAV' for axial-vector.
mPhi: float
Mass of mediator.
delta: float
DM mass split.
Returns:
out_dir: string
Full path of output directory.
"""
out_dir = output_main_dir
if mPhi == mPhiRef:
out_dir += "Contact"
else:
out_dir += "LongRange"
out_dir += scattering_type + "_delta_"
if delta < 0:
out_dir += "neg"
out_dir += str(math.trunc(abs(delta))) + "/"
return out_dir
def Output_file_name(exper_name, scattering_type, mPhi, mx, fp, fn, delta, HALO_DEP,
filename_tail, OUTPUT_MAIN_DIR, quenching=None):
""" Gives the name of the output file name for the given input parameters.
Input:
exper_name: string
Name of experiment.
scattering_type: string
'SI' for spin-dependent, 'SDPS' for pseudo-scalar, 'SDAV' for axial-vector.
mPhi: float
Mass of mediator.
mx: float
DM mass.
fp and fn: float
Couplings to proton and neutron.
delta: float
DM mass split.
confidence_levels: list
List of confidence levels.
HALO_DEP: bool
Whether the analysis is halo-dependent or halo-independent.
filename_tail: string
Tag to be added to the file name.
OUTPUT_MAIN_DIR: string
Name of main output directory.
quenching: float, optional
quenching factor, needed for experiments that can have multiple options.
"""
output_dir = OutputDirectory(OUTPUT_MAIN_DIR, scattering_type, mPhi, delta)
output_file_no_extension = "./" + output_dir + "UpperLimit_" + exper_name
if HALO_DEP:
output_file_no_extension += "_mxsigma"
if vesc != default_vesc:
output_file_no_extension += "_vesc" \
+ str(math.trunc(round(vesc)))
if vobs != default_vobs:
output_file_no_extension += "_vobs" \
+ str(math.trunc(round(vobs)))
else:
output_file_no_extension += "_mx_" + str(mx) + "GeV"
output_file_no_extension += FileNameTail(fp, fn, mPhi) + filename_tail
if quenching is not None:
output_file_no_extension += "_q" + str(quenching)
print(output_file_no_extension)
return output_file_no_extension
def MultiExper_Output_file_name(multiexper_input, scattering_type, mPhi, mx, fp, fn, delta,
filename_tail, OUTPUT_MAIN_DIR, quenching=None):
""" Gives the name of the output file name for the given input parameters.
Input:
multiexper_input: string
Names of experiments.
scattering_type: string
'SI' for spin-dependent, 'SDPS' for pseudo-scalar, 'SDAV' for axial-vector.
mPhi: float
Mass of mediator.
mx: float
DM mass.
fp and fn: float
Couplings to proton and neutron.
delta: float
DM mass split.
confidence_levels: list
List of confidence levels.
filename_tail: string
Tag to be added to the file name.
OUTPUT_MAIN_DIR: string
Name of main output directory.
quenching: float, optional
quenching factor, needed for experiments that can have multiple options.
"""
output_dir = OutputDirectory(OUTPUT_MAIN_DIR, scattering_type, mPhi, delta)
output_file_no_extension = "./" + output_dir + "MultiExperiment_EHI_"
for x in range(0, multiexper_input.size):
output_file_no_extension += multiexper_input[x]
output_file_no_extension += "_mx_" + str(mx) + "GeV"
output_file_no_extension += FileNameTail(fp, fn, mPhi) + filename_tail
if quenching is not None:
output_file_no_extension += "_q" + str(quenching)
print(output_file_no_extension)
return output_file_no_extension
def Plot_file_name(HALO_DEP, scattering_type, mPhi, fp, fn, delta,
filename_tail, OUTPUT_MAIN_DIR, mx=None):
""" Gives the name of the plot file name for the given input parameters.
Input:
HALO_DEP: bool
Whether the analysis is halo-dependent or halo-independent.
scattering_type: string
'SI' for spin-dependent, 'SDPS' for pseudo-scalar, 'SDAV' for axial-vector.
mPhi: float
Mass of mediator.
fp and fn: float
Couplings to proton and neutron.
delta: float
DM mass split.
filename_tail: string
Tag to be added to the file name.
OUTPUT_MAIN_DIR: string
Name of main output directory.
mx: float, optional
DM mass.
"""
out_dir = OUTPUT_MAIN_DIR + "PLOTS_"
if mPhi == mPhiRef:
scattering = "Contact"
else:
scattering = "LongRange"
scattering += scattering_type
out_dir += scattering + "_Py/"
if HALO_DEP:
file_name = out_dir + "HaloDep_"
else:
file_name = out_dir + "HaloIndep_"
file_name += scattering + "_delta_"
if delta < 0:
file_name += "neg"
file_name += str(math.trunc(abs(delta))) + FileNameTail(fp, fn, mPhi) + \
filename_tail + ".pdf"
return file_name
def Gaussian(x, mu, sigma):
""" Gaussian resolution function.
"""
return np.exp(-(x-mu)**2 / (2 * sigma**2)) / (np.sqrt(2 * pi) * sigma)
def GPoisson(x, nu, sigma):
""" Resolution function for the Xe experiment; it is a combination of
Poisson and Gaussian resolution.
nu is the expected # of events.
"""
eps = 1.e-4
n = 1
add = nu * np.exp(-(x-1.)**2 / (2 * sigma**2))
summation = 0.
nfact = 1 # factorial
try:
while add > eps * (summation+add):
#print("TEST", add, summation, x, nu, sigma)
summation += add
n += 1
nfact *= n
add = 1. * nu**n / nfact * np.exp(-(x-n)**2 / (2. * n * sigma**2)) / np.sqrt(n)
except TypeError:
return 0.
result = summation * np.exp(-nu) / np.sqrt(2 * np.pi) / sigma
#print("GPoisson: ", result)
return result
def HelmFF(ER, A, mT):
""" Helm Form Factor. See http://arxiv.org/abs/hep-ph/0608035.
Input:
ER: float
Recoil energy.
A: int
Target mass number.
mT: float
Target nuclide mass.
"""
q = np.sqrt(2e-6 * mT * ER)
s = 0.9
ha = 0.52
hc = 1.23 * A**(1./3) - 0.6 # half-density radius
cpa = 7./3. * (pi * ha)**2
rsq = hc**2 + cpa
r1tmp = rsq - 5. * s**2
r1 = list(map(lambda r, s: np.sqrt(r) if r > 0 else np.sqrt(s), r1tmp, rsq))
x = np.abs(q * r1 * fermiGeV)
y = q * s * fermiGeV
f = np.array(list(map(lambda i: 3.0 * (np.sin(i) - i * np.cos(i))/i**3 if i > 5.e-8
else 1. - i**2 * (-0.1 + i**2 *
(1./200. + i**2 * (-1./15120. + i**2/1330560.))), x)))
return f**2 * np.exp(-y**2)
def GaussianFFSD(ER, A, mT):
""" Gaussian Form Factor for spin-dependent interactions.
Input:
ER: float
Recoil energy.
A: int
Target mass number.
mT: float
Target nuclide mass.
"""
q = np.sqrt(2e-6 * mT * ER)
R = 0.92 * A**(1./3) + 2.68 - 0.78 * np.sqrt((A**(1./3) - 3.8)**2 + 0.2)
x = np.abs(q * R * fermiGeV)
return np.exp(-x**2 / 4.)
""" Form factor options are used to select the correct FF depending on the type of
interaction (spin-independent, spin-dependent with axial-vector or pseudo-scalar
coupling).
"""
FFSI_options = {'HelmFF': HelmFF,
}
FFSD_options = {'GaussianFFSD': GaussianFFSD,
}
FF_options = {'SI': FFSI_options,
'SDPS': FFSD_options,
'SDAV': FFSD_options,
}
def ERecoil_ratio(mT_1, mT_2, mx, quenching_1, quenching_2):
mu_1 = mT_1 * mx / (mT_1 + mx)
mu_2 = mT_2 * mx / (mT_2 + mx)
return mu_2**2 / mu_1**2 * mT_1 / mT_2 * quenching_2 / quenching_1
def VMin(ER, mT, mx, delta):
""" Minimum velocity for a given recoil energy.
Input:
ER: float
Recoil energy.
mT: float
Target nuclide mass.
mx: float
DM mass.
delta: float
DM mass split.
Returns:
vmin: float
"""
muT = mx * mT / (mx + mT)
return SpeedOfLight * 1.e-3 / np.sqrt(2. * ER * mT) * abs(delta + ER * mT / muT)
def VminDelta(mT, mx, delta):
muT = mx * mT / (mx + mT)
return SpeedOfLight / 500. * np.sqrt(delta / 2. / muT) if delta > 0 \
else np.array([0] * len(mT))
def ERecoilBranch(vmin, mT, mx, delta, sign):
""" Recoil energy for given vmin.
Input:
vmin: float
Minimum DM velocity vmin.
Target nuclide mass.
mT: float
Target nuclide mass.
mx: float
DM mass.
delta: float
DM mass split.
sign: +1 or -1
Corresponds to the upper and lower branch, respectively.
Returns:
ER: float
"""
muT = mx * mT / (mx + mT)
if delta > 0:
vdel = np.sqrt(2. * delta / muT)
else:
vdel = np.array([0.])
if vmin >= min(vdel):
return 1.e6 * muT**2 / (2.*mT) * \
(vmin / SpeedOfLight +
sign * np.sqrt((vmin / SpeedOfLight)**2 - 2.*delta / muT * 1.e-6))**2
else:
return 1.e6 * muT**2 / (2.*mT) * \
(vdel / SpeedOfLight +
sign * np.sqrt((vdel / SpeedOfLight)**2 - 2.*delta / muT * 1.e-6))**2
def dERecoildVmin(vmin, mT, mx, delta, sign):
""" Derivative of recoil energy ER with respect to velocity vmin
Input:
Input:
vmin: float
Minimum DM velocity vmin.
Target nuclide mass.
mT: float
Target nuclide mass.
mx: float
DM mass.
delta: float
DM mass split.
sign: +1 or -1
Corresponds to the upper and lower branch, respectively.
Returns:
d ER/d vmin: float
"""
muT = mx * mT / (mx + mT)
if delta > 0:
vdel = np.sqrt(2. * delta / muT)
else:
vdel = np.array([0.])
if vmin != 0 and vmin >= min(vdel):
sqrt_factor = np.sqrt(1. - 2.*delta / (muT * vmin**2) * SpeedOfLight**2 * 1.e-6)
ret = sign * muT**2 * vmin / mT * (1. + sign * sqrt_factor)**2 / sqrt_factor
elif vmin == 0 and delta == 0:
ret = muT**2 * vmin / mT
elif vmin == 0 and delta < 0:
ret = sign * (- delta * muT) / (np.sqrt(- 2.0 * delta * SpeedOfLight**2 * 1.e-6 / muT) * mT)
else:
ret = 0.
return ret
def eta0Maxwellian(vmin, vobs, v0bar, vesc):
""" Velocity integral eta0 in a Standard Halo Model with Maxwellian velocity
distribution.
Input:
vmin: float
Minumum DM velocity.
vobs: float
Observed velocity.
v0bar: float
Velocity dispersion.
vesc: float
Excape velocity.
Returns:
eta0: float
"""
x = vmin/v0bar
y = vobs/v0bar
z = vesc/v0bar
erfz = erf(z)
sqrt_pi = np.sqrt(pi)
exp_z_sq = np.exp(-z**2)
exp_z_sq_z = np.exp(-z**2) * z
eta = list(map(lambda i: -2. * exp_z_sq / sqrt_pi - erf(i-y) / (2.*y) +
erf(i+y) / (2.*y)
if i + y <= z
else exp_z_sq * (i - y - z) / (sqrt_pi * y) - erf(i-y) / (2.*y) +
erfz / (2.*y)
if i - y <= z < i + y
else 0, x))
return eta / (-2. * exp_z_sq_z / sqrt_pi + erfz) / v0bar
def eta1Maxwellian(vmin, vobs, v0bar, vesc):
""" Same as eta0Maxwellian, but this is the modulation velocity integral
eta1 = d eta0 / d vobs * delta_v.
delta_v = v_Earth * cos(gamma) where the velocity of the Earth is
v_Earth = 30 km/s and is inclined at an angle of gamma = 60 deg wrt the
galactic plane.
Returns:
eta1: float
"""
x = vmin/v0bar
y = vobs/v0bar
z = vesc/v0bar
delta_v = 15. # 30 * cos(60 * pi / 180)
erfz = erf(z)
sqrt_pi = np.sqrt(pi)
exp_z_sq = np.exp(-z**2)
exp_z_sq_z = np.exp(-z**2) * z
erf_z = erf(z)
eta = list(map(lambda i: (np.exp(-(i+y)**2) + np.exp(-(i-y)**2)) / (sqrt_pi * y) +
(erf(i-y) - erf(i+y)) / (2 * y**2)
if i + y <= z
else exp_z_sq * (-i + z) / (sqrt_pi * y**2) +
np.exp(-(i-y)**2) / (sqrt_pi * y) + (erf(i-y) - erf_z) / (2 * y**2)
if i - y <= z < i + y
else 0, x))
return eta / (-2. * exp_z_sq_z / sqrt_pi + erfz) * delta_v / v0bar**2
def MaximumGapC0scaled(x, mu_over_x):
""" Scaled probability C0 of the maximum gap size being smaller than a particular
value of x. Based on Maximum Gap Method (Eq 2 of PHYSICAL REVIEW D 66, 032005, 2002).
Input:
x: float
The size of the maximum gap in the random experiment.
mu_over_x: float
mu/x, where mu is the total expected number of events.
"""
if mu_over_x < 1.:
return 1.
elif 1. <= mu_over_x < 2.:
return 1. - np.exp(-x) * (1. + mu_over_x * x - x)
else:
l = np.array([((k - mu_over_x) * x)**(k-1) * np.exp(-k * x) / math.factorial(k) *
(x * (mu_over_x - k) + k)
for k in range(1, math.trunc(np.floor(mu_over_x)))])
return 1. - l.sum()
def Rebin_data(data, error):
""" Rebins the data from multiple bins into a single bin.
Input:
data, error: ndarray
Lists containing the data and error from each bin.
Returns:
data_rebinned, error_rebinned: float
"""
data_rebinned = sum(data)
error_rebinned = np.sqrt(sum(error**2))
return data_rebinned, error_rebinned
if __name__ == "__main__":
vmin = np.linspace(1, 1000, 1000)
mx = 1.3
delta = -200
mT = np.array([65.134, 66.995, 67.9278, 68.8571, 70.7203])
ER_plus = np.array([ERecoilBranch(vmin, mT_i, mx, delta, 1) for mT_i in mT])
ER_minus = np.array([ERecoilBranch(vmin, mT_i, mx, delta, -1) for mT_i in mT])
import matplotlib.pyplot as plt
plt.close()
for ER in ER_plus:
plt.plot(vmin, ER)
for ER in ER_minus:
plt.plot(vmin, ER)
plt.plot(vmin, 2.7 * np.ones_like(vmin))
plt.plot(vmin, 3 * np.ones_like(vmin))
ER = np.linspace(2, 6, 100)
plt.plot(490 * np.ones_like(ER), ER)
plt.plot(495 * np.ones_like(ER), ER)
plt.show()
| gpl-2.0 | 4,614,329,665,938,111,000 | 34.366819 | 106 | 0.548072 | false |
armab/st2 | st2actions/st2actions/runners/pythonrunner.py | 1 | 7435 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import abc
import json
import uuid
import logging as stdlib_logging
import six
from eventlet.green import subprocess
from st2actions.runners import ActionRunner
from st2common.util.green.shell import run_command
from st2common import log as logging
from st2common.constants.action import ACTION_OUTPUT_RESULT_DELIMITER
from st2common.constants.action import LIVEACTION_STATUS_SUCCEEDED
from st2common.constants.action import LIVEACTION_STATUS_FAILED
from st2common.constants.action import LIVEACTION_STATUS_TIMED_OUT
from st2common.constants.error_messages import PACK_VIRTUALENV_DOESNT_EXIST
from st2common.util.sandboxing import get_sandbox_path
from st2common.util.sandboxing import get_sandbox_python_path
from st2common.util.sandboxing import get_sandbox_python_binary_path
from st2common.util.sandboxing import get_sandbox_virtualenv_path
from st2common.constants.runners import PYTHON_RUNNER_DEFAULT_ACTION_TIMEOUT
__all__ = [
'get_runner',
'PythonRunner',
'Action'
]
LOG = logging.getLogger(__name__)
# constants to lookup in runner_parameters.
RUNNER_ENV = 'env'
RUNNER_TIMEOUT = 'timeout'
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
WRAPPER_SCRIPT_NAME = 'python_action_wrapper.py'
WRAPPER_SCRIPT_PATH = os.path.join(BASE_DIR, WRAPPER_SCRIPT_NAME)
def get_runner():
return PythonRunner(str(uuid.uuid4()))
@six.add_metaclass(abc.ABCMeta)
class Action(object):
"""
Base action class other Python actions should inherit from.
"""
description = None
def __init__(self, config=None):
"""
:param config: Action config.
:type config: ``dict``
"""
self.config = config or {}
self.logger = self._set_up_logger()
@abc.abstractmethod
def run(self, **kwargs):
pass
def _set_up_logger(self):
"""
Set up a logger which logs all the messages with level DEBUG
and above to stderr.
"""
logger_name = 'actions.python.%s' % (self.__class__.__name__)
logger = logging.getLogger(logger_name)
console = stdlib_logging.StreamHandler()
console.setLevel(stdlib_logging.DEBUG)
formatter = stdlib_logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logger.addHandler(console)
logger.setLevel(stdlib_logging.DEBUG)
return logger
class PythonRunner(ActionRunner):
def __init__(self, runner_id, timeout=PYTHON_RUNNER_DEFAULT_ACTION_TIMEOUT):
"""
:param timeout: Action execution timeout in seconds.
:type timeout: ``int``
"""
super(PythonRunner, self).__init__(runner_id=runner_id)
self._timeout = timeout
def pre_run(self):
# TODO :This is awful, but the way "runner_parameters" and other variables get
# assigned on the runner instance is even worse. Those arguments should
# be passed to the constructor.
self._env = self.runner_parameters.get(RUNNER_ENV, {})
self._timeout = self.runner_parameters.get(RUNNER_TIMEOUT, self._timeout)
def run(self, action_parameters):
pack = self.get_pack_name()
serialized_parameters = json.dumps(action_parameters) if action_parameters else ''
virtualenv_path = get_sandbox_virtualenv_path(pack=pack)
python_path = get_sandbox_python_binary_path(pack=pack)
if virtualenv_path and not os.path.isdir(virtualenv_path):
format_values = {'pack': pack, 'virtualenv_path': virtualenv_path}
msg = PACK_VIRTUALENV_DOESNT_EXIST % format_values
raise Exception(msg)
if not self.entry_point:
raise Exception('Action "%s" is missing entry_point attribute' % (self.action.name))
args = [
python_path,
WRAPPER_SCRIPT_PATH,
'--pack=%s' % (pack),
'--file-path=%s' % (self.entry_point),
'--parameters=%s' % (serialized_parameters),
'--parent-args=%s' % (json.dumps(sys.argv[1:]))
]
# We need to ensure all the st2 dependencies are also available to the
# subprocess
env = os.environ.copy()
env['PATH'] = get_sandbox_path(virtualenv_path=virtualenv_path)
env['PYTHONPATH'] = get_sandbox_python_path(inherit_from_parent=True,
inherit_parent_virtualenv=True)
# Include user provided environment variables (if any)
user_env_vars = self._get_env_vars()
env.update(user_env_vars)
# Include common st2 environment variables
st2_env_vars = self._get_common_action_env_variables()
env.update(st2_env_vars)
exit_code, stdout, stderr, timed_out = run_command(cmd=args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=False,
env=env, timeout=self._timeout)
if timed_out:
error = 'Action failed to complete in %s seconds' % (self._timeout)
else:
error = None
if ACTION_OUTPUT_RESULT_DELIMITER in stdout:
split = stdout.split(ACTION_OUTPUT_RESULT_DELIMITER)
assert len(split) == 3
result = split[1].strip()
stdout = split[0] + split[2]
else:
result = None
try:
result = json.loads(result)
except:
pass
output = {
'stdout': stdout,
'stderr': stderr,
'exit_code': exit_code,
'result': result
}
if error:
output['error'] = error
if exit_code == 0:
status = LIVEACTION_STATUS_SUCCEEDED
elif timed_out:
status = LIVEACTION_STATUS_TIMED_OUT
else:
status = LIVEACTION_STATUS_FAILED
return (status, output, None)
def _get_env_vars(self):
"""
Return sanitized environment variables which will be used when launching
a subprocess.
:rtype: ``dict``
"""
# Don't allow user to override PYTHONPATH since this would break things
blacklisted_vars = ['pythonpath']
env_vars = {}
if self._env:
env_vars.update(self._env)
# Remove "blacklisted" environment variables
to_delete = []
for key, value in env_vars.items():
if key.lower() in blacklisted_vars:
to_delete.append(key)
for key in to_delete:
del env_vars[key]
return env_vars
| apache-2.0 | -366,287,523,733,597,700 | 32.795455 | 96 | 0.6308 | false |
arienchen/pytibrv | tests/python/test-events.py | 1 | 1925 | import datetime
import time
from pytibrv.Tibrv import *
import unittest
class EventTest(unittest.TestCase, TibrvMsgCallback):
@classmethod
def setUpClass(cls):
status = Tibrv.open()
if status != TIBRV_OK:
raise TibrvError(status)
@classmethod
def tearDownClass(cls):
Tibrv.close()
def callback(self, event: TibrvEvent, msg: TibrvMsg, closure):
print('RECV [{}] < {}'.format(msg.sendSubject, str(msg)))
self.msg_recv = msg
# detech from TIBRV, must destroy later
status = msg.detach()
self.assertEqual(TIBRV_OK, status, TibrvStatus.text(status))
def test_create(self):
tx = TibrvTx()
status = tx.create(None, None, None)
self.assertEqual(TIBRV_OK, status, TibrvStatus.text(status))
que = TibrvQueue()
status = que.create('TEST')
self.assertEqual(TIBRV_OK, status, TibrvStatus.text(status))
# Create an INBOX
subj = tx.inbox()
lst = TibrvListener()
status = lst.create(que, self, tx, subj)
self.assertEqual(TIBRV_OK, status, TibrvStatus.text(status))
disp = TibrvDispatcher()
status = disp.create(que)
self.assertEqual(TIBRV_OK, status, TibrvStatus.text(status))
m = TibrvMsg.create()
m.setStr('DATA', 'TEST')
print('')
self.msg_recv = None
status = tx.send(m, subj)
self.timeout = time.time() + 10000
while time.time() <= self.timeout:
if self.msg_recv is not None:
break
time.sleep(0.1)
#print('SLEEP...')
self.assertIsNotNone(self.msg_recv)
self.assertEqual(m.getStr('DATA'), self.msg_recv.getStr('DATA'))
del self.msg_recv
del m
del disp
del lst
del que
del tx
if __name__ == "__main__" :
unittest.main(verbosity=2)
| bsd-3-clause | -2,019,257,972,628,880,000 | 24.666667 | 72 | 0.57974 | false |
scrollback/kuma | vendor/packages/translate-toolkit/translate/tools/poswap.py | 7 | 3619 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2007 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with translate; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Builds a new translation file with the target of the input language as
source language.
Ensure that the two po files correspond 100% to the same pot file before using
this.
To translate Kurdish (ku) through French::
po2swap -i fr/ -t ku -o fr-ku
To convert the fr-ku files back to en-ku::
po2swap --reverse -i fr/ -t fr-ku -o en-ku
See: http://translate.sourceforge.net/wiki/toolkit/poswap for further examples and
usage instructions
"""
from translate.storage import po
from translate.convert import convert
def swapdir(store):
"""Swap the source and target of each unit."""
for unit in store.units:
if unit.isheader():
continue
if not unit.target or unit.isfuzzy():
unit.target = unit.source
else:
unit.source, unit.target = unit.target, unit.source
def convertpo(inputpofile, outputpotfile, template, reverse=False):
"""reads in inputpofile, removes the header, writes to outputpotfile."""
inputpo = po.pofile(inputpofile)
templatepo = po.pofile(template)
if reverse:
swapdir(inputpo)
templatepo.makeindex()
header = inputpo.header()
if header:
inputpo.units = inputpo.units[1:]
for i, unit in enumerate(inputpo.units):
for location in unit.getlocations():
templateunit = templatepo.locationindex.get(location, None)
if templateunit and templateunit.source == unit.source:
break
else:
templateunit = templatepo.findunit(unit.source)
unit.othercomments = []
if unit.target and not unit.isfuzzy():
unit.source = unit.target
elif not reverse:
if inputpo.filename:
unit.addnote("No translation found in %s" % inputpo.filename, origin="programmer")
else:
unit.addnote("No translation found in the supplied source language", origin="programmer")
unit.target = ""
unit.markfuzzy(False)
if templateunit:
unit.addnote(templateunit.getnotes(origin="translator"))
unit.markfuzzy(templateunit.isfuzzy())
unit.target = templateunit.target
if unit.isobsolete():
del inputpo.units[i]
outputpotfile.write(str(inputpo))
return 1
def main(argv=None):
formats = {("po", "po"): ("po", convertpo), ("po", "pot"): ("po", convertpo), "po": ("po", convertpo)}
parser = convert.ConvertOptionParser(formats, usetemplates=True, description=__doc__)
parser.add_option("", "--reverse", dest="reverse", default=False, action="store_true",
help="reverse the process of intermediate language conversion")
parser.passthrough.append("reverse")
parser.run(argv)
if __name__ == '__main__':
main()
| mpl-2.0 | -4,705,156,967,958,186,000 | 35.555556 | 106 | 0.665653 | false |
chaluemwut/fbserver | venv/lib/python2.7/site-packages/scipy/stats/tests/test_mstats_basic.py | 17 | 42696 | """
Tests for the stats.mstats module (support for masked arrays)
"""
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy import nan
import numpy.ma as ma
from numpy.ma import masked, nomask
import scipy.stats.mstats as mstats
from scipy import stats
from numpy.testing import TestCase, run_module_suite
from numpy.testing.decorators import skipif
from numpy.ma.testutils import (assert_equal, assert_almost_equal,
assert_array_almost_equal, assert_array_almost_equal_nulp, assert_,
assert_allclose, assert_raises)
class TestMquantiles(TestCase):
def test_mquantiles_limit_keyword(self):
# Regression test for Trac ticket #867
data = np.array([[6., 7., 1.],
[47., 15., 2.],
[49., 36., 3.],
[15., 39., 4.],
[42., 40., -999.],
[41., 41., -999.],
[7., -999., -999.],
[39., -999., -999.],
[43., -999., -999.],
[40., -999., -999.],
[36., -999., -999.]])
desired = [[19.2, 14.6, 1.45],
[40.0, 37.5, 2.5],
[42.8, 40.05, 3.55]]
quants = mstats.mquantiles(data, axis=0, limit=(0, 50))
assert_almost_equal(quants, desired)
class TestGMean(TestCase):
def test_1D(self):
a = (1,2,3,4)
actual = mstats.gmean(a)
desired = np.power(1*2*3*4,1./4.)
assert_almost_equal(actual, desired, decimal=14)
desired1 = mstats.gmean(a,axis=-1)
assert_almost_equal(actual, desired1, decimal=14)
assert_(not isinstance(desired1, ma.MaskedArray))
a = ma.array((1,2,3,4),mask=(0,0,0,1))
actual = mstats.gmean(a)
desired = np.power(1*2*3,1./3.)
assert_almost_equal(actual, desired,decimal=14)
desired1 = mstats.gmean(a,axis=-1)
assert_almost_equal(actual, desired1, decimal=14)
@skipif(not hasattr(np, 'float96'), 'cannot find float96 so skipping')
def test_1D_float96(self):
a = ma.array((1,2,3,4), mask=(0,0,0,1))
actual_dt = mstats.gmean(a, dtype=np.float96)
desired_dt = np.power(1 * 2 * 3, 1. / 3.).astype(np.float96)
assert_almost_equal(actual_dt, desired_dt, decimal=14)
assert_(actual_dt.dtype == desired_dt.dtype)
def test_2D(self):
a = ma.array(((1, 2, 3, 4), (1, 2, 3, 4), (1, 2, 3, 4)),
mask=((0, 0, 0, 0), (1, 0, 0, 1), (0, 1, 1, 0)))
actual = mstats.gmean(a)
desired = np.array((1,2,3,4))
assert_array_almost_equal(actual, desired, decimal=14)
desired1 = mstats.gmean(a,axis=0)
assert_array_almost_equal(actual, desired1, decimal=14)
actual = mstats.gmean(a, -1)
desired = ma.array((np.power(1*2*3*4,1./4.),
np.power(2*3,1./2.),
np.power(1*4,1./2.)))
assert_array_almost_equal(actual, desired, decimal=14)
class TestHMean(TestCase):
def test_1D(self):
a = (1,2,3,4)
actual = mstats.hmean(a)
desired = 4. / (1./1 + 1./2 + 1./3 + 1./4)
assert_almost_equal(actual, desired, decimal=14)
desired1 = mstats.hmean(ma.array(a),axis=-1)
assert_almost_equal(actual, desired1, decimal=14)
a = ma.array((1,2,3,4),mask=(0,0,0,1))
actual = mstats.hmean(a)
desired = 3. / (1./1 + 1./2 + 1./3)
assert_almost_equal(actual, desired,decimal=14)
desired1 = mstats.hmean(a,axis=-1)
assert_almost_equal(actual, desired1, decimal=14)
@skipif(not hasattr(np, 'float96'), 'cannot find float96 so skipping')
def test_1D_float96(self):
a = ma.array((1,2,3,4), mask=(0,0,0,1))
actual_dt = mstats.hmean(a, dtype=np.float96)
desired_dt = np.asarray(3. / (1./1 + 1./2 + 1./3),
dtype=np.float96)
assert_almost_equal(actual_dt, desired_dt, decimal=14)
assert_(actual_dt.dtype == desired_dt.dtype)
def test_2D(self):
a = ma.array(((1,2,3,4),(1,2,3,4),(1,2,3,4)),
mask=((0,0,0,0),(1,0,0,1),(0,1,1,0)))
actual = mstats.hmean(a)
desired = ma.array((1,2,3,4))
assert_array_almost_equal(actual, desired, decimal=14)
actual1 = mstats.hmean(a,axis=-1)
desired = (4./(1/1.+1/2.+1/3.+1/4.),
2./(1/2.+1/3.),
2./(1/1.+1/4.)
)
assert_array_almost_equal(actual1, desired, decimal=14)
class TestRanking(TestCase):
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
def test_ranking(self):
x = ma.array([0,1,1,1,2,3,4,5,5,6,])
assert_almost_equal(mstats.rankdata(x),
[1,3,3,3,5,6,7,8.5,8.5,10])
x[[3,4]] = masked
assert_almost_equal(mstats.rankdata(x),
[1,2.5,2.5,0,0,4,5,6.5,6.5,8])
assert_almost_equal(mstats.rankdata(x, use_missing=True),
[1,2.5,2.5,4.5,4.5,4,5,6.5,6.5,8])
x = ma.array([0,1,5,1,2,4,3,5,1,6,])
assert_almost_equal(mstats.rankdata(x),
[1,3,8.5,3,5,7,6,8.5,3,10])
x = ma.array([[0,1,1,1,2], [3,4,5,5,6,]])
assert_almost_equal(mstats.rankdata(x),
[[1,3,3,3,5], [6,7,8.5,8.5,10]])
assert_almost_equal(mstats.rankdata(x, axis=1),
[[1,3,3,3,5], [1,2,3.5,3.5,5]])
assert_almost_equal(mstats.rankdata(x,axis=0),
[[1,1,1,1,1], [2,2,2,2,2,]])
class TestCorr(TestCase):
def test_pearsonr(self):
# Tests some computations of Pearson's r
x = ma.arange(10)
with warnings.catch_warnings():
# The tests in this context are edge cases, with perfect
# correlation or anticorrelation, or totally masked data.
# None of these should trigger a RuntimeWarning.
warnings.simplefilter("error", RuntimeWarning)
assert_almost_equal(mstats.pearsonr(x, x)[0], 1.0)
assert_almost_equal(mstats.pearsonr(x, x[::-1])[0], -1.0)
x = ma.array(x, mask=True)
pr = mstats.pearsonr(x, x)
assert_(pr[0] is masked)
assert_(pr[1] is masked)
x1 = ma.array([-1.0, 0.0, 1.0])
y1 = ma.array([0, 0, 3])
r, p = mstats.pearsonr(x1, y1)
assert_almost_equal(r, np.sqrt(3)/2)
assert_almost_equal(p, 1.0/3)
# (x2, y2) have the same unmasked data as (x1, y1).
mask = [False, False, False, True]
x2 = ma.array([-1.0, 0.0, 1.0, 99.0], mask=mask)
y2 = ma.array([0, 0, 3, -1], mask=mask)
r, p = mstats.pearsonr(x2, y2)
assert_almost_equal(r, np.sqrt(3)/2)
assert_almost_equal(p, 1.0/3)
def test_spearmanr(self):
# Tests some computations of Spearman's rho
(x, y) = ([5.05,6.75,3.21,2.66],[1.65,2.64,2.64,6.95])
assert_almost_equal(mstats.spearmanr(x,y)[0], -0.6324555)
(x, y) = ([5.05,6.75,3.21,2.66,np.nan],[1.65,2.64,2.64,6.95,np.nan])
(x, y) = (ma.fix_invalid(x), ma.fix_invalid(y))
assert_almost_equal(mstats.spearmanr(x,y)[0], -0.6324555)
x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,
1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7]
y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,
0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4]
assert_almost_equal(mstats.spearmanr(x,y)[0], 0.6887299)
x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,
1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7, np.nan]
y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,
0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4, np.nan]
(x, y) = (ma.fix_invalid(x), ma.fix_invalid(y))
assert_almost_equal(mstats.spearmanr(x,y)[0], 0.6887299)
def test_kendalltau(self):
# Tests some computations of Kendall's tau
x = ma.fix_invalid([5.05, 6.75, 3.21, 2.66,np.nan])
y = ma.fix_invalid([1.65, 26.5, -5.93, 7.96, np.nan])
z = ma.fix_invalid([1.65, 2.64, 2.64, 6.95, np.nan])
assert_almost_equal(np.asarray(mstats.kendalltau(x,y)),
[+0.3333333,0.4969059])
assert_almost_equal(np.asarray(mstats.kendalltau(x,z)),
[-0.5477226,0.2785987])
#
x = ma.fix_invalid([0, 0, 0, 0,20,20, 0,60, 0,20,
10,10, 0,40, 0,20, 0, 0, 0, 0, 0, np.nan])
y = ma.fix_invalid([0,80,80,80,10,33,60, 0,67,27,
25,80,80,80,80,80,80, 0,10,45, np.nan, 0])
result = mstats.kendalltau(x,y)
assert_almost_equal(np.asarray(result), [-0.1585188, 0.4128009])
def test_kendalltau_seasonal(self):
# Tests the seasonal Kendall tau.
x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],
[4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],
[3, 2, 5, 6, 18, 4, 9, 1, 1,nan, 1, 1,nan],
[nan, 6, 11, 4, 17,nan, 6, 1, 1, 2, 5, 1, 1]]
x = ma.fix_invalid(x).T
output = mstats.kendalltau_seasonal(x)
assert_almost_equal(output['global p-value (indep)'], 0.008, 3)
assert_almost_equal(output['seasonal p-value'].round(2),
[0.18,0.53,0.20,0.04])
def test_pointbiserial(self):
x = [1,0,1,1,1,1,0,1,0,0,0,1,1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,1,0,
0,0,0,0,1,-1]
y = [14.8,13.8,12.4,10.1,7.1,6.1,5.8,4.6,4.3,3.5,3.3,3.2,3.0,
2.8,2.8,2.5,2.4,2.3,2.1,1.7,1.7,1.5,1.3,1.3,1.2,1.2,1.1,
0.8,0.7,0.6,0.5,0.2,0.2,0.1,np.nan]
assert_almost_equal(mstats.pointbiserialr(x, y)[0], 0.36149, 5)
class TestTrimming(TestCase):
def test_trim(self):
a = ma.arange(10)
assert_equal(mstats.trim(a), [0,1,2,3,4,5,6,7,8,9])
a = ma.arange(10)
assert_equal(mstats.trim(a,(2,8)), [None,None,2,3,4,5,6,7,8,None])
a = ma.arange(10)
assert_equal(mstats.trim(a,limits=(2,8),inclusive=(False,False)),
[None,None,None,3,4,5,6,7,None,None])
a = ma.arange(10)
assert_equal(mstats.trim(a,limits=(0.1,0.2),relative=True),
[None,1,2,3,4,5,6,7,None,None])
a = ma.arange(12)
a[[0,-1]] = a[5] = masked
assert_equal(mstats.trim(a, (2,8)),
[None, None, 2, 3, 4, None, 6, 7, 8, None, None, None])
x = ma.arange(100).reshape(10, 10)
expected = [1]*10 + [0]*70 + [1]*20
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=None)
assert_equal(trimx._mask.ravel(), expected)
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=0)
assert_equal(trimx._mask.ravel(), expected)
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=-1)
assert_equal(trimx._mask.T.ravel(), expected)
# same as above, but with an extra masked row inserted
x = ma.arange(110).reshape(11, 10)
x[1] = masked
expected = [1]*20 + [0]*70 + [1]*20
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=None)
assert_equal(trimx._mask.ravel(), expected)
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=0)
assert_equal(trimx._mask.ravel(), expected)
trimx = mstats.trim(x.T, (0.1,0.2), relative=True, axis=-1)
assert_equal(trimx.T._mask.ravel(), expected)
def test_trim_old(self):
x = ma.arange(100)
assert_equal(mstats.trimboth(x).count(), 60)
assert_equal(mstats.trimtail(x,tail='r').count(), 80)
x[50:70] = masked
trimx = mstats.trimboth(x)
assert_equal(trimx.count(), 48)
assert_equal(trimx._mask, [1]*16 + [0]*34 + [1]*20 + [0]*14 + [1]*16)
x._mask = nomask
x.shape = (10,10)
assert_equal(mstats.trimboth(x).count(), 60)
assert_equal(mstats.trimtail(x).count(), 80)
def test_trimmedmean(self):
data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
296,299,306,376,428,515,666,1310,2611])
assert_almost_equal(mstats.trimmed_mean(data,0.1), 343, 0)
assert_almost_equal(mstats.trimmed_mean(data,(0.1,0.1)), 343, 0)
assert_almost_equal(mstats.trimmed_mean(data,(0.2,0.2)), 283, 0)
def test_trimmed_stde(self):
data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
296,299,306,376,428,515,666,1310,2611])
assert_almost_equal(mstats.trimmed_stde(data,(0.2,0.2)), 56.13193, 5)
assert_almost_equal(mstats.trimmed_stde(data,0.2), 56.13193, 5)
def test_winsorization(self):
data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
296,299,306,376,428,515,666,1310,2611])
assert_almost_equal(mstats.winsorize(data,(0.2,0.2)).var(ddof=1),
21551.4, 1)
data[5] = masked
winsorized = mstats.winsorize(data)
assert_equal(winsorized.mask, data.mask)
class TestMoments(TestCase):
# Comparison numbers are found using R v.1.5.1
# note that length(testcase) = 4
# testmathworks comes from documentation for the
# Statistics Toolbox for Matlab and can be found at both
# http://www.mathworks.com/access/helpdesk/help/toolbox/stats/kurtosis.shtml
# http://www.mathworks.com/access/helpdesk/help/toolbox/stats/skewness.shtml
# Note that both test cases came from here.
testcase = [1,2,3,4]
testmathworks = ma.fix_invalid([1.165, 0.6268, 0.0751, 0.3516, -0.6965,
np.nan])
testcase_2d = ma.array(
np.array([[0.05245846, 0.50344235, 0.86589117, 0.36936353, 0.46961149],
[0.11574073, 0.31299969, 0.45925772, 0.72618805, 0.75194407],
[0.67696689, 0.91878127, 0.09769044, 0.04645137, 0.37615733],
[0.05903624, 0.29908861, 0.34088298, 0.66216337, 0.83160998],
[0.64619526, 0.94894632, 0.27855892, 0.0706151, 0.39962917]]),
mask=np.array([[True, False, False, True, False],
[True, True, True, False, True],
[False, False, False, False, False],
[True, True, True, True, True],
[False, False, True, False, False]], dtype=np.bool))
def test_moment(self):
y = mstats.moment(self.testcase,1)
assert_almost_equal(y,0.0,10)
y = mstats.moment(self.testcase,2)
assert_almost_equal(y,1.25)
y = mstats.moment(self.testcase,3)
assert_almost_equal(y,0.0)
y = mstats.moment(self.testcase,4)
assert_almost_equal(y,2.5625)
def test_variation(self):
y = mstats.variation(self.testcase)
assert_almost_equal(y,0.44721359549996, 10)
def test_skewness(self):
y = mstats.skew(self.testmathworks)
assert_almost_equal(y,-0.29322304336607,10)
y = mstats.skew(self.testmathworks,bias=0)
assert_almost_equal(y,-0.437111105023940,10)
y = mstats.skew(self.testcase)
assert_almost_equal(y,0.0,10)
def test_kurtosis(self):
# Set flags for axis = 0 and fisher=0 (Pearson's definition of kurtosis
# for compatibility with Matlab)
y = mstats.kurtosis(self.testmathworks,0,fisher=0,bias=1)
assert_almost_equal(y, 2.1658856802973,10)
# Note that MATLAB has confusing docs for the following case
# kurtosis(x,0) gives an unbiased estimate of Pearson's skewness
# kurtosis(x) gives a biased estimate of Fisher's skewness (Pearson-3)
# The MATLAB docs imply that both should give Fisher's
y = mstats.kurtosis(self.testmathworks,fisher=0, bias=0)
assert_almost_equal(y, 3.663542721189047,10)
y = mstats.kurtosis(self.testcase,0,0)
assert_almost_equal(y,1.64)
# test that kurtosis works on multidimensional masked arrays
correct_2d = ma.array(np.array([-1.5, -3., -1.47247052385, 0.,
-1.26979517952]),
mask=np.array([False, False, False, True,
False], dtype=np.bool))
assert_array_almost_equal(mstats.kurtosis(self.testcase_2d, 1),
correct_2d)
for i, row in enumerate(self.testcase_2d):
assert_almost_equal(mstats.kurtosis(row), correct_2d[i])
correct_2d_bias_corrected = ma.array(
np.array([-1.5, -3., -1.88988209538, 0., -0.5234638463918877]),
mask=np.array([False, False, False, True, False], dtype=np.bool))
assert_array_almost_equal(mstats.kurtosis(self.testcase_2d, 1,
bias=False),
correct_2d_bias_corrected)
for i, row in enumerate(self.testcase_2d):
assert_almost_equal(mstats.kurtosis(row, bias=False),
correct_2d_bias_corrected[i])
# Check consistency between stats and mstats implementations
assert_array_almost_equal_nulp(mstats.kurtosis(self.testcase_2d[2, :]),
stats.kurtosis(self.testcase_2d[2, :]))
def test_mode(self):
a1 = [0,0,0,1,1,1,2,3,3,3,3,4,5,6,7]
a2 = np.reshape(a1, (3,5))
a3 = np.array([1,2,3,4,5,6])
a4 = np.reshape(a3, (3,2))
ma1 = ma.masked_where(ma.array(a1) > 2, a1)
ma2 = ma.masked_where(a2 > 2, a2)
ma3 = ma.masked_where(a3 < 2, a3)
ma4 = ma.masked_where(ma.array(a4) < 2, a4)
assert_equal(mstats.mode(a1, axis=None), (3,4))
assert_equal(mstats.mode(a1, axis=0), (3,4))
assert_equal(mstats.mode(ma1, axis=None), (0,3))
assert_equal(mstats.mode(a2, axis=None), (3,4))
assert_equal(mstats.mode(ma2, axis=None), (0,3))
assert_equal(mstats.mode(a3, axis=None), (1,1))
assert_equal(mstats.mode(ma3, axis=None), (2,1))
assert_equal(mstats.mode(a2, axis=0), ([[0,0,0,1,1]], [[1,1,1,1,1]]))
assert_equal(mstats.mode(ma2, axis=0), ([[0,0,0,1,1]], [[1,1,1,1,1]]))
assert_equal(mstats.mode(a2, axis=-1), ([[0],[3],[3]], [[3],[3],[1]]))
assert_equal(mstats.mode(ma2, axis=-1), ([[0],[1],[0]], [[3],[1],[0]]))
assert_equal(mstats.mode(ma4, axis=0), ([[3,2]], [[1,1]]))
assert_equal(mstats.mode(ma4, axis=-1), ([[2],[3],[5]], [[1],[1],[1]]))
class TestPercentile(TestCase):
def setUp(self):
self.a1 = [3,4,5,10,-3,-5,6]
self.a2 = [3,-6,-2,8,7,4,2,1]
self.a3 = [3.,4,5,10,-3,-5,-6,7.0]
def test_percentile(self):
x = np.arange(8) * 0.5
assert_equal(mstats.scoreatpercentile(x, 0), 0.)
assert_equal(mstats.scoreatpercentile(x, 100), 3.5)
assert_equal(mstats.scoreatpercentile(x, 50), 1.75)
def test_2D(self):
x = ma.array([[1, 1, 1],
[1, 1, 1],
[4, 4, 3],
[1, 1, 1],
[1, 1, 1]])
assert_equal(mstats.scoreatpercentile(x,50), [1,1,1])
class TestVariability(TestCase):
""" Comparison numbers are found using R v.1.5.1
note that length(testcase) = 4
"""
testcase = ma.fix_invalid([1,2,3,4,np.nan])
def test_signaltonoise(self):
# This is not in R, so used:
# mean(testcase, axis=0) / (sqrt(var(testcase)*3/4))
y = mstats.signaltonoise(self.testcase)
assert_almost_equal(y, 2.236067977)
def test_sem(self):
# This is not in R, so used: sqrt(var(testcase)*3/4) / sqrt(3)
y = mstats.sem(self.testcase)
assert_almost_equal(y, 0.6454972244)
n = self.testcase.count()
assert_allclose(mstats.sem(self.testcase, ddof=0) * np.sqrt(n/(n-2)),
mstats.sem(self.testcase, ddof=2))
def test_zmap(self):
# This is not in R, so tested by using:
# (testcase[i]-mean(testcase,axis=0)) / sqrt(var(testcase)*3/4)
y = mstats.zmap(self.testcase, self.testcase)
desired_unmaskedvals = ([-1.3416407864999, -0.44721359549996,
0.44721359549996, 1.3416407864999])
assert_array_almost_equal(desired_unmaskedvals,
y.data[y.mask == False], decimal=12)
def test_zscore(self):
# This is not in R, so tested by using:
# (testcase[i]-mean(testcase,axis=0)) / sqrt(var(testcase)*3/4)
y = mstats.zscore(self.testcase)
desired = ma.fix_invalid([-1.3416407864999, -0.44721359549996,
0.44721359549996, 1.3416407864999, np.nan])
assert_almost_equal(desired, y, decimal=12)
class TestMisc(TestCase):
def test_obrientransform(self):
args = [[5]*5+[6]*11+[7]*9+[8]*3+[9]*2+[10]*2,
[6]+[7]*2+[8]*4+[9]*9+[10]*16]
result = [5*[3.1828]+11*[0.5591]+9*[0.0344]+3*[1.6086]+2*[5.2817]+2*[11.0538],
[10.4352]+2*[4.8599]+4*[1.3836]+9*[0.0061]+16*[0.7277]]
assert_almost_equal(np.round(mstats.obrientransform(*args).T,4),
result,4)
def test_kstwosamp(self):
x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],
[4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],
[3, 2, 5, 6, 18, 4, 9, 1, 1,nan, 1, 1,nan],
[nan, 6, 11, 4, 17,nan, 6, 1, 1, 2, 5, 1, 1]]
x = ma.fix_invalid(x).T
(winter,spring,summer,fall) = x.T
assert_almost_equal(np.round(mstats.ks_twosamp(winter,spring),4),
(0.1818,0.9892))
assert_almost_equal(np.round(mstats.ks_twosamp(winter,spring,'g'),4),
(0.1469,0.7734))
assert_almost_equal(np.round(mstats.ks_twosamp(winter,spring,'l'),4),
(0.1818,0.6744))
def test_friedmanchisq(self):
# No missing values
args = ([9.0,9.5,5.0,7.5,9.5,7.5,8.0,7.0,8.5,6.0],
[7.0,6.5,7.0,7.5,5.0,8.0,6.0,6.5,7.0,7.0],
[6.0,8.0,4.0,6.0,7.0,6.5,6.0,4.0,6.5,3.0])
result = mstats.friedmanchisquare(*args)
assert_almost_equal(result[0], 10.4737, 4)
assert_almost_equal(result[1], 0.005317, 6)
# Missing values
x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],
[4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],
[3, 2, 5, 6, 18, 4, 9, 1, 1,nan, 1, 1,nan],
[nan, 6, 11, 4, 17,nan, 6, 1, 1, 2, 5, 1, 1]]
x = ma.fix_invalid(x)
result = mstats.friedmanchisquare(*x)
assert_almost_equal(result[0], 2.0156, 4)
assert_almost_equal(result[1], 0.5692, 4)
def test_regress_simple():
# Regress a line with sinusoidal noise. Test for #1273.
x = np.linspace(0, 100, 100)
y = 0.2 * np.linspace(0, 100, 100) + 10
y += np.sin(np.linspace(0, 20, 100))
slope, intercept, r_value, p_value, sterr = mstats.linregress(x, y)
assert_almost_equal(slope, 0.19644990055858422)
assert_almost_equal(intercept, 10.211269918932341)
def test_theilslopes():
# Test for basic slope and intercept.
slope, intercept, lower, upper = mstats.theilslopes([0,1,1])
assert_almost_equal(slope, 0.5)
assert_almost_equal(intercept, 0.5)
# Test for correct masking.
y = np.ma.array([0,1,100,1], mask=[False, False, True, False])
slope, intercept, lower, upper = mstats.theilslopes(y)
assert_almost_equal(slope, 1./3)
assert_almost_equal(intercept, 2./3)
# Test of confidence intervals from example in Sen (1968).
x = [1, 2, 3, 4, 10, 12, 18]
y = [9, 15, 19, 20, 45, 55, 78]
slope, intercept, lower, upper = mstats.theilslopes(y, x, 0.07)
assert_almost_equal(slope, 4)
assert_almost_equal(upper, 4.38, decimal=2)
assert_almost_equal(lower, 3.71, decimal=2)
def test_plotting_positions():
# Regression test for #1256
pos = mstats.plotting_positions(np.arange(3), 0, 0)
assert_array_almost_equal(pos.data, np.array([0.25, 0.5, 0.75]))
class TestNormalitytests():
def test_vs_nonmasked(self):
x = np.array((-2,-1,0,1,2,3)*4)**2
assert_array_almost_equal(mstats.normaltest(x),
stats.normaltest(x))
assert_array_almost_equal(mstats.skewtest(x),
stats.skewtest(x))
assert_array_almost_equal(mstats.kurtosistest(x),
stats.kurtosistest(x))
funcs = [stats.normaltest, stats.skewtest, stats.kurtosistest]
mfuncs = [mstats.normaltest, mstats.skewtest, mstats.kurtosistest]
x = [1, 2, 3, 4]
for func, mfunc in zip(funcs, mfuncs):
assert_raises(ValueError, func, x)
assert_raises(ValueError, mfunc, x)
def test_axis_None(self):
# Test axis=None (equal to axis=0 for 1-D input)
x = np.array((-2,-1,0,1,2,3)*4)**2
assert_allclose(mstats.normaltest(x, axis=None), mstats.normaltest(x))
assert_allclose(mstats.skewtest(x, axis=None), mstats.skewtest(x))
assert_allclose(mstats.kurtosistest(x, axis=None),
mstats.kurtosistest(x))
def test_maskedarray_input(self):
# Add some masked values, test result doesn't change
x = np.array((-2,-1,0,1,2,3)*4)**2
xm = np.ma.array(np.r_[np.inf, x, 10],
mask=np.r_[True, [False] * x.size, True])
assert_allclose(mstats.normaltest(xm), stats.normaltest(x))
assert_allclose(mstats.skewtest(xm), stats.skewtest(x))
assert_allclose(mstats.kurtosistest(xm), stats.kurtosistest(x))
def test_nd_input(self):
x = np.array((-2,-1,0,1,2,3)*4)**2
x_2d = np.vstack([x] * 2).T
for func in [mstats.normaltest, mstats.skewtest, mstats.kurtosistest]:
res_1d = func(x)
res_2d = func(x_2d)
assert_allclose(res_2d[0], [res_1d[0]] * 2)
assert_allclose(res_2d[1], [res_1d[1]] * 2)
#TODO: for all ttest functions, add tests with masked array inputs
class TestTtest_rel():
def test_vs_nonmasked(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
# 1-D inputs
res1 = stats.ttest_rel(outcome[:, 0], outcome[:, 1])
res2 = mstats.ttest_rel(outcome[:, 0], outcome[:, 1])
assert_allclose(res1, res2)
# 2-D inputs
res1 = stats.ttest_rel(outcome[:, 0], outcome[:, 1], axis=None)
res2 = mstats.ttest_rel(outcome[:, 0], outcome[:, 1], axis=None)
assert_allclose(res1, res2)
res1 = stats.ttest_rel(outcome[:, :2], outcome[:, 2:], axis=0)
res2 = mstats.ttest_rel(outcome[:, :2], outcome[:, 2:], axis=0)
assert_allclose(res1, res2)
# Check default is axis=0
res3 = mstats.ttest_rel(outcome[:, :2], outcome[:, 2:])
assert_allclose(res2, res3)
def test_invalid_input_size(self):
assert_raises(ValueError, mstats.ttest_rel,
np.arange(10), np.arange(11))
x = np.arange(24)
assert_raises(ValueError, mstats.ttest_rel,
x.reshape(2, 3, 4), x.reshape(2, 4, 3), axis=1)
assert_raises(ValueError, mstats.ttest_rel,
x.reshape(2, 3, 4), x.reshape(2, 4, 3), axis=2)
def test_empty(self):
res1 = mstats.ttest_rel([], [])
assert_(np.all(np.isnan(res1)))
class TestTtest_ind():
def test_vs_nonmasked(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
# 1-D inputs
res1 = stats.ttest_ind(outcome[:, 0], outcome[:, 1])
res2 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1])
assert_allclose(res1, res2)
# 2-D inputs
res1 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], axis=None)
res2 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], axis=None)
assert_allclose(res1, res2)
res1 = stats.ttest_ind(outcome[:, :2], outcome[:, 2:], axis=0)
res2 = mstats.ttest_ind(outcome[:, :2], outcome[:, 2:], axis=0)
assert_allclose(res1, res2)
# Check default is axis=0
res3 = mstats.ttest_ind(outcome[:, :2], outcome[:, 2:])
assert_allclose(res2, res3)
def test_empty(self):
res1 = mstats.ttest_ind([], [])
assert_(np.all(np.isnan(res1)))
class TestTtest_1samp():
def test_vs_nonmasked(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
# 1-D inputs
res1 = stats.ttest_1samp(outcome[:, 0], 1)
res2 = mstats.ttest_1samp(outcome[:, 0], 1)
assert_allclose(res1, res2)
# 2-D inputs
res1 = stats.ttest_1samp(outcome[:, 0], outcome[:, 1], axis=None)
res2 = mstats.ttest_1samp(outcome[:, 0], outcome[:, 1], axis=None)
assert_allclose(res1, res2)
res1 = stats.ttest_1samp(outcome[:, :2], outcome[:, 2:], axis=0)
res2 = mstats.ttest_1samp(outcome[:, :2], outcome[:, 2:], axis=0)
assert_allclose(res1, res2)
# Check default is axis=0
res3 = mstats.ttest_1samp(outcome[:, :2], outcome[:, 2:])
assert_allclose(res2, res3)
def test_empty(self):
res1 = mstats.ttest_1samp([], 1)
assert_(np.all(np.isnan(res1)))
class TestCompareWithStats(TestCase):
"""
Class to compare mstats results with stats results.
It is in general assumed that scipy.stats is at a more mature stage than
stats.mstats. If a routine in mstats results in similar results like in
scipy.stats, this is considered also as a proper validation of scipy.mstats
routine.
Different sample sizes are used for testing, as some problems between stats
and mstats are dependent on sample size.
Author: Alexander Loew
NOTE that some tests fail. This might be caused by
a) actual differences or bugs between stats and mstats
b) numerical inaccuracies
c) different definitions of routine interfaces
These failures need to be checked. Current workaround is to have disabled these tests,
but issuing reports on scipy-dev
"""
def get_n(self):
""" Returns list of sample sizes to be used for comparison. """
return [1000, 100, 10, 5]
def generate_xy_sample(self, n):
# This routine generates numpy arrays and corresponding masked arrays
# with the same data, but additional masked values
np.random.seed(1234567)
x = np.random.randn(n)
y = x + np.random.randn(n)
xm = np.ones(len(x) + 5) * 1e16
ym = np.ones(len(y) + 5) * 1e16
xm[0:len(x)] = x
ym[0:len(y)] = y
mask = xm > 9e15
xm = np.ma.array(xm, mask=mask)
ym = np.ma.array(ym, mask=mask)
return x, y, xm, ym
def generate_xy_sample2D(self, n, nx):
x = np.ones((n, nx)) * np.nan
y = np.ones((n, nx)) * np.nan
xm = np.ones((n+5, nx)) * np.nan
ym = np.ones((n+5, nx)) * np.nan
for i in range(nx):
x[:,i], y[:,i], dx, dy = self.generate_xy_sample(n)
xm[0:n, :] = x[0:n]
ym[0:n, :] = y[0:n]
xm = np.ma.array(xm, mask=np.isnan(xm))
ym = np.ma.array(ym, mask=np.isnan(ym))
return x, y, xm, ym
def test_linregress(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
res1 = stats.linregress(x, y)
res2 = stats.mstats.linregress(xm, ym)
assert_allclose(np.asarray(res1), np.asarray(res2))
def test_pearsonr(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r, p = stats.pearsonr(x, y)
rm, pm = stats.mstats.pearsonr(xm, ym)
assert_almost_equal(r, rm, decimal=14)
assert_almost_equal(p, pm, decimal=14)
def test_spearmanr(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r, p = stats.spearmanr(x, y)
rm, pm = stats.mstats.spearmanr(xm, ym)
assert_almost_equal(r, rm, 14)
assert_almost_equal(p, pm, 14)
def test_gmean(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.gmean(abs(x))
rm = stats.mstats.gmean(abs(xm))
assert_allclose(r, rm, rtol=1e-13)
r = stats.gmean(abs(y))
rm = stats.mstats.gmean(abs(ym))
assert_allclose(r, rm, rtol=1e-13)
def test_hmean(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.hmean(abs(x))
rm = stats.mstats.hmean(abs(xm))
assert_almost_equal(r, rm, 10)
r = stats.hmean(abs(y))
rm = stats.mstats.hmean(abs(ym))
assert_almost_equal(r, rm, 10)
def test_skew(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.skew(x)
rm = stats.mstats.skew(xm)
assert_almost_equal(r, rm, 10)
r = stats.skew(y)
rm = stats.mstats.skew(ym)
assert_almost_equal(r, rm, 10)
def test_moment(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.moment(x)
rm = stats.mstats.moment(xm)
assert_almost_equal(r, rm, 10)
r = stats.moment(y)
rm = stats.mstats.moment(ym)
assert_almost_equal(r, rm, 10)
def test_signaltonoise(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.signaltonoise(x)
rm = stats.mstats.signaltonoise(xm)
assert_almost_equal(r, rm, 10)
r = stats.signaltonoise(y)
rm = stats.mstats.signaltonoise(ym)
assert_almost_equal(r, rm, 10)
def test_betai(self):
np.random.seed(12345)
for i in range(10):
a = np.random.rand() * 5.
b = np.random.rand() * 200.
assert_equal(stats.betai(a, b, 0.), 0.)
assert_equal(stats.betai(a, b, 1.), 1.)
assert_equal(stats.mstats.betai(a, b, 0.), 0.)
assert_equal(stats.mstats.betai(a, b, 1.), 1.)
x = np.random.rand()
assert_almost_equal(stats.betai(a, b, x),
stats.mstats.betai(a, b, x), decimal=13)
def test_zscore(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
#reference solution
zx = (x - x.mean()) / x.std()
zy = (y - y.mean()) / y.std()
#validate stats
assert_allclose(stats.zscore(x), zx, rtol=1e-10)
assert_allclose(stats.zscore(y), zy, rtol=1e-10)
#compare stats and mstats
assert_allclose(stats.zscore(x), stats.mstats.zscore(xm[0:len(x)]),
rtol=1e-10)
assert_allclose(stats.zscore(y), stats.mstats.zscore(ym[0:len(y)]),
rtol=1e-10)
def test_kurtosis(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.kurtosis(x)
rm = stats.mstats.kurtosis(xm)
assert_almost_equal(r, rm, 10)
r = stats.kurtosis(y)
rm = stats.mstats.kurtosis(ym)
assert_almost_equal(r, rm, 10)
def test_sem(self):
# example from stats.sem doc
a = np.arange(20).reshape(5,4)
am = np.ma.array(a)
r = stats.sem(a,ddof=1)
rm = stats.mstats.sem(am, ddof=1)
assert_allclose(r, 2.82842712, atol=1e-5)
assert_allclose(rm, 2.82842712, atol=1e-5)
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.mstats.sem(xm, axis=None, ddof=0),
stats.sem(x, axis=None, ddof=0), decimal=13)
assert_almost_equal(stats.mstats.sem(ym, axis=None, ddof=0),
stats.sem(y, axis=None, ddof=0), decimal=13)
assert_almost_equal(stats.mstats.sem(xm, axis=None, ddof=1),
stats.sem(x, axis=None, ddof=1), decimal=13)
assert_almost_equal(stats.mstats.sem(ym, axis=None, ddof=1),
stats.sem(y, axis=None, ddof=1), decimal=13)
def test_describe(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.describe(x, ddof=1)
rm = stats.mstats.describe(xm, ddof=1)
for ii in range(6):
assert_almost_equal(np.asarray(r[ii]),
np.asarray(rm[ii]),
decimal=12)
def test_rankdata(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.rankdata(x)
rm = stats.mstats.rankdata(x)
assert_allclose(r, rm)
def test_tmean(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.tmean(x),stats.mstats.tmean(xm), 14)
assert_almost_equal(stats.tmean(y),stats.mstats.tmean(ym), 14)
def test_tmax(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.tmax(x,2.),
stats.mstats.tmax(xm,2.), 10)
assert_almost_equal(stats.tmax(y,2.),
stats.mstats.tmax(ym,2.), 10)
def test_tmin(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_equal(stats.tmin(x),stats.mstats.tmin(xm))
assert_equal(stats.tmin(y),stats.mstats.tmin(ym))
assert_almost_equal(stats.tmin(x,lowerlimit=-1.),
stats.mstats.tmin(xm,lowerlimit=-1.), 10)
assert_almost_equal(stats.tmin(y,lowerlimit=-1.),
stats.mstats.tmin(ym,lowerlimit=-1.), 10)
def test_zmap(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
z = stats.zmap(x,y)
zm = stats.mstats.zmap(xm,ym)
assert_allclose(z, zm[0:len(z)], atol=1e-10)
def test_variation(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.variation(x), stats.mstats.variation(xm),
decimal=12)
assert_almost_equal(stats.variation(y), stats.mstats.variation(ym),
decimal=12)
def test_tvar(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.tvar(x), stats.mstats.tvar(xm),
decimal=12)
assert_almost_equal(stats.tvar(y), stats.mstats.tvar(ym),
decimal=12)
def test_trimboth(self):
a = np.arange(20)
b = stats.trimboth(a, 0.1)
bm = stats.mstats.trimboth(a, 0.1)
assert_allclose(b, bm.data[~bm.mask])
def test_tsem(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.tsem(x),stats.mstats.tsem(xm), decimal=14)
assert_almost_equal(stats.tsem(y),stats.mstats.tsem(ym), decimal=14)
assert_almost_equal(stats.tsem(x,limits=(-2.,2.)),
stats.mstats.tsem(xm,limits=(-2.,2.)),
decimal=14)
def test_skewtest(self):
# this test is for 1D data
for n in self.get_n():
if n > 8:
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.skewtest(x)
rm = stats.mstats.skewtest(xm)
assert_allclose(r[0], rm[0], rtol=1e-15)
# TODO this test is not performed as it is a known issue that
# mstats returns a slightly different p-value what is a bit
# strange is that other tests like test_maskedarray_input don't
# fail!
#~ assert_almost_equal(r[1], rm[1])
def test_skewtest_2D_notmasked(self):
# a normal ndarray is passed to the masked function
x = np.random.random((20, 2)) * 20.
r = stats.skewtest(x)
rm = stats.mstats.skewtest(x)
assert_allclose(np.asarray(r), np.asarray(rm))
def test_skewtest_2D_WithMask(self):
nx = 2
for n in self.get_n():
if n > 8:
x, y, xm, ym = self.generate_xy_sample2D(n, nx)
r = stats.skewtest(x)
rm = stats.mstats.skewtest(xm)
assert_equal(r[0][0],rm[0][0])
assert_equal(r[0][1],rm[0][1])
def test_normaltest(self):
np.seterr(over='raise')
for n in self.get_n():
if n > 8:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=UserWarning)
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.normaltest(x)
rm = stats.mstats.normaltest(xm)
assert_allclose(np.asarray(r), np.asarray(rm))
def test_find_repeats(self):
x = np.asarray([1,1,2,2,3,3,3,4,4,4,4]).astype('float')
tmp = np.asarray([1,1,2,2,3,3,3,4,4,4,4,5,5,5,5]).astype('float')
mask = (tmp == 5.)
xm = np.ma.array(tmp, mask=mask)
r = stats.find_repeats(x)
rm = stats.mstats.find_repeats(xm)
assert_equal(r,rm)
def test_kendalltau(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.kendalltau(x, y)
rm = stats.mstats.kendalltau(xm, ym)
assert_almost_equal(r[0], rm[0], decimal=10)
assert_almost_equal(r[1], rm[1], decimal=7)
def test_obrientransform(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.obrientransform(x)
rm = stats.mstats.obrientransform(xm)
assert_almost_equal(r.T, rm[0:len(x)])
if __name__ == "__main__":
run_module_suite()
| apache-2.0 | 1,844,709,644,750,876,400 | 39.470142 | 90 | 0.53546 | false |
wunderlins/learning | python/zodb/lib/osx/persistent/tests/test_mapping.py | 2 | 7118 | ##############################################################################
#
# Copyright (c) Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import unittest
class Test_default(unittest.TestCase):
def _getTargetClass(self):
from persistent.mapping import default
return default
def _makeOne(self, func):
return self._getTargetClass()(func)
def test___get___from_class(self):
_called_with = []
def _test(inst):
_called_with.append(inst)
return '_test'
descr = self._makeOne(_test)
class Foo(object):
testing = descr
self.assertTrue(Foo.testing is descr)
self.assertEqual(_called_with, [])
def test___get___from_instance(self):
_called_with = []
def _test(inst):
_called_with.append(inst)
return 'TESTING'
descr = self._makeOne(_test)
class Foo(object):
testing = descr
foo = Foo()
self.assertEqual(foo.testing, 'TESTING')
self.assertEqual(_called_with, [foo])
class PersistentMappingTests(unittest.TestCase):
def _getTargetClass(self):
from persistent.mapping import PersistentMapping
return PersistentMapping
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_volatile_attributes_not_persisted(self):
# http://www.zope.org/Collectors/Zope/2052
m = self._makeOne()
m.foo = 'bar'
m._v_baz = 'qux'
state = m.__getstate__()
self.assertTrue('foo' in state)
self.assertFalse('_v_baz' in state)
def testTheWorld(self):
from persistent._compat import PYTHON2
# Test constructors
l0 = {}
l1 = {0:0}
l2 = {0:0, 1:1}
u = self._makeOne()
u0 = self._makeOne(l0)
u1 = self._makeOne(l1)
u2 = self._makeOne(l2)
uu = self._makeOne(u)
uu0 = self._makeOne(u0)
uu1 = self._makeOne(u1)
uu2 = self._makeOne(u2)
class OtherMapping(dict):
def __init__(self, initmapping):
self.__data = initmapping
def items(self):
return self.__data.items()
v0 = self._makeOne(OtherMapping(u0))
vv = self._makeOne([(0, 0), (1, 1)])
# Test __repr__
eq = self.assertEqual
eq(str(u0), str(l0), "str(u0) == str(l0)")
eq(repr(u1), repr(l1), "repr(u1) == repr(l1)")
# Test __cmp__ and __len__
if PYTHON2:
def mycmp(a, b):
r = cmp(a, b)
if r < 0: return -1
if r > 0: return 1
return r
all = [l0, l1, l2, u, u0, u1, u2, uu, uu0, uu1, uu2]
for a in all:
for b in all:
eq(mycmp(a, b), mycmp(len(a), len(b)),
"mycmp(a, b) == mycmp(len(a), len(b))")
# Test __getitem__
for i in range(len(u2)):
eq(u2[i], i, "u2[i] == i")
# Test get
for i in range(len(u2)):
eq(u2.get(i), i, "u2.get(i) == i")
eq(u2.get(i, 5), i, "u2.get(i, 5) == i")
for i in min(u2)-1, max(u2)+1:
eq(u2.get(i), None, "u2.get(i) == None")
eq(u2.get(i, 5), 5, "u2.get(i, 5) == 5")
# Test __setitem__
uu2[0] = 0
uu2[1] = 100
uu2[2] = 200
# Test __delitem__
del uu2[1]
del uu2[0]
try:
del uu2[0]
except KeyError:
pass
else:
raise TestFailed("uu2[0] shouldn't be deletable")
# Test __contains__
for i in u2:
self.assertTrue(i in u2, "i in u2")
for i in min(u2)-1, max(u2)+1:
self.assertTrue(i not in u2, "i not in u2")
# Test update
l = {"a":"b"}
u = self._makeOne(l)
u.update(u2)
for i in u:
self.assertTrue(i in l or i in u2, "i in l or i in u2")
for i in l:
self.assertTrue(i in u, "i in u")
for i in u2:
self.assertTrue(i in u, "i in u")
# Test setdefault
x = u2.setdefault(0, 5)
eq(x, 0, "u2.setdefault(0, 5) == 0")
x = u2.setdefault(5, 5)
eq(x, 5, "u2.setdefault(5, 5) == 5")
self.assertTrue(5 in u2, "5 in u2")
# Test pop
x = u2.pop(1)
eq(x, 1, "u2.pop(1) == 1")
self.assertTrue(1 not in u2, "1 not in u2")
try:
u2.pop(1)
except KeyError:
pass
else:
self.fail("1 should not be poppable from u2")
x = u2.pop(1, 7)
eq(x, 7, "u2.pop(1, 7) == 7")
# Test popitem
items = list(u2.items())
key, value = u2.popitem()
self.assertTrue((key, value) in items, "key, value in items")
self.assertTrue(key not in u2, "key not in u2")
# Test clear
u2.clear()
eq(u2, {}, "u2 == {}")
def test___repr___converts_legacy_container_attr(self):
# In the past, PM used a _container attribute. For some time, the
# implementation continued to use a _container attribute in pickles
# (__get/setstate__) to be compatible with older releases. This isn't
# really necessary any more. In fact, releases for which this might
# matter can no longer share databases with current releases. Because
# releases as recent as 3.9.0b5 still use _container in saved state, we
# need to accept such state, but we stop producing it.
pm = self._makeOne()
self.assertEqual(pm.__dict__, {'data': {}})
# Make it look like an older instance
pm.__dict__.clear()
pm.__dict__['_container'] = {'a': 1}
self.assertEqual(pm.__dict__, {'_container': {'a': 1}})
pm._p_changed = 0
self.assertEqual(repr(pm), "{'a': 1}")
self.assertEqual(pm.__dict__, {'data': {'a': 1}})
self.assertEqual(pm.__getstate__(), {'data': {'a': 1}})
class Test_legacy_PersistentDict(unittest.TestCase):
def _getTargetClass(self):
from persistent.dict import PersistentDict
return PersistentDict
def test_PD_is_alias_to_PM(self):
from persistent.mapping import PersistentMapping
self.assertTrue(self._getTargetClass() is PersistentMapping)
def test_suite():
return unittest.TestSuite((
unittest.makeSuite(Test_default),
unittest.makeSuite(PersistentMappingTests),
unittest.makeSuite(Test_legacy_PersistentDict),
))
| gpl-2.0 | -3,446,693,615,814,522,400 | 29.033755 | 79 | 0.519949 | false |
nbborlongan/geonode | geonode/context_processors.py | 5 | 3018 | #########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.conf import settings
from geonode import get_version
from geonode.catalogue import default_catalogue_backend
from django.contrib.sites.models import Site
def resource_urls(request):
"""Global values to pass to templates"""
site = Site.objects.get_current()
defaults = dict(
STATIC_URL=settings.STATIC_URL,
CATALOGUE_BASE_URL=default_catalogue_backend()['URL'],
REGISTRATION_OPEN=settings.REGISTRATION_OPEN,
VERSION=get_version(),
SITE_NAME=site.name,
SITE_DOMAIN=site.domain,
RESOURCE_PUBLISHING=settings.RESOURCE_PUBLISHING,
THEME_ACCOUNT_CONTACT_EMAIL=settings.THEME_ACCOUNT_CONTACT_EMAIL,
DEBUG_STATIC=getattr(
settings,
"DEBUG_STATIC",
False),
PROXY_URL=getattr(
settings,
'PROXY_URL',
'/proxy/?url='),
SOCIAL_BUTTONS=getattr(
settings,
'SOCIAL_BUTTONS',
False),
HAYSTACK_SEARCH=getattr(
settings,
'HAYSTACK_SEARCH',
False),
SKIP_PERMS_FILTER=getattr(
settings,
'SKIP_PERMS_FILTER',
False),
HAYSTACK_FACET_COUNTS=getattr(
settings,
'HAYSTACK_FACET_COUNTS',
False),
CLIENT_RESULTS_LIMIT=getattr(
settings,
'CLIENT_RESULTS_LIMIT',
10),
LICENSES_ENABLED=getattr(
settings,
'LICENSES',
dict()).get(
'ENABLED',
False),
LICENSES_DETAIL=getattr(
settings,
'LICENSES',
dict()).get(
'DETAIL',
'never'),
LICENSES_METADATA=getattr(
settings,
'LICENSES',
dict()).get(
'METADATA',
'never'),
USE_NOTIFICATIONS=('notification' in settings.INSTALLED_APPS),
DEFAULT_ANONYMOUS_VIEW_PERMISSION=getattr(settings, 'DEFAULT_ANONYMOUS_VIEW_PERMISSION', False),
DEFAULT_ANONYMOUS_DOWNLOAD_PERMISSION=getattr(settings, 'DEFAULT_ANONYMOUS_DOWNLOAD_PERMISSION', False),
)
return defaults
| gpl-3.0 | 7,496,288,331,391,082,000 | 32.910112 | 112 | 0.576541 | false |
v-iam/azure-sdk-for-python | azure-batch/azure/batch/models/pool_disable_auto_scale_options.py | 3 | 1719 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PoolDisableAutoScaleOptions(Model):
"""Additional parameters for the Pool_disable_auto_scale operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
def __init__(self, timeout=30, client_request_id=None, return_client_request_id=False, ocp_date=None):
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
| mit | -1,907,633,428,260,927,500 | 44.236842 | 106 | 0.663758 | false |
hepix-virtualisation/vmcatcher | vmcatcher/vmcatcher_subscribe/retrieveHttp.py | 1 | 1229 | import retrieveBase
import logging
import httplib
import socket
import base64
timeout = 60
class retrieve(retrieveBase.retrieve):
def __init__(self, *args, **kwargs):
retrieveBase.retrieve.__init__(self,args,kwargs)
self.port_default = 80
def requestAsString(self):
output = {'code' : 0}
auth = base64.standard_b64encode("%s:%s" % (self.username, self.password))
con = httplib.HTTPConnection(self.server, self.port, True, timeout)
headers = {"User-Agent": "vmcatcher"}
if (self.username != None) and (self.password != None):
auth = base64.standard_b64encode("%s:%s" % (self.username, self.password))
headers["Authorization"] = "Basic %s" % (auth)
try:
con.request("GET" , self.path, headers=headers)
except socket.gaierror as expt:
output['error'] = expt.strerror
output['code'] = 404
return output
responce = con.getresponse()
httpstatus = responce.status
if httpstatus == 200:
output['responce'] = responce.read()
else:
output['error'] = responce.reason
output['code'] = httpstatus
return output
| apache-2.0 | -3,026,290,837,146,958,300 | 35.147059 | 86 | 0.593979 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.