repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values | var_hash
int64 -9,223,186,179,200,150,000
9,223,291,175B
| doc_hash
int64 -9,223,304,365,658,930,000
9,223,309,051B
| line_mean
float64 3.5
99.8
| line_max
int64 13
999
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
nickpack/reportlab | src/reportlab/pdfbase/pdfform.py | 3 | 17084 |
"""Support for Acrobat Forms in ReportLab documents
This module is somewhat experimental at this time.
Includes basic support for
textfields,
select fields (drop down lists), and
check buttons.
The public interface consists of functions at the moment.
At some later date these operations may be made into canvas
methods. (comments?)
The ...Absolute(...) functions position the fields with respect
to the absolute canvas coordinate space -- that is, they do not
respect any coordinate transforms in effect for the canvas.
The ...Relative(...) functions position the ONLY THE LOWER LEFT
CORNER of the field using the coordinate transform in effect for
the canvas. THIS WILL ONLY WORK CORRECTLY FOR TRANSLATED COORDINATES
-- THE SHAPE, SIZE, FONTSIZE, AND ORIENTATION OF THE FIELD WILL NOT BE EFFECTED
BY SCALING, ROTATION, SKEWING OR OTHER NON-TRANSLATION COORDINATE
TRANSFORMS.
Please note that all field names (titles) in a given document must be unique.
Textfields and select fields only support the "base 14" canvas fonts
at this time.
See individual function docstrings below for more information.
The function test1(...) generates a simple test file.
THIS CONTRIBUTION WAS COMMISSIONED BY REPORTLAB USERS
WHO WISH TO REMAIN ANONYMOUS.
"""
### NOTE: MAKE THE STRING FORMATS DYNAMIC IN PATTERNS TO SUPPORT ENCRYPTION XXXX
import string
from reportlab.pdfbase.pdfdoc import LINEEND, PDFString, PDFStream, PDFDictionary, PDFName
from reportlab.lib.colors import obj_R_G_B
#==========================public interfaces
def textFieldAbsolute(canvas, title, x, y, width, height, value="", maxlen=1000000, multiline=0):
"""Place a text field on the current page
with name title at ABSOLUTE position (x,y) with
dimensions (width, height), using value as the default value and
maxlen as the maximum permissible length. If multiline is set make
it a multiline field.
"""
theform = getForm(canvas)
return theform.textField(canvas, title, x, y, x+width, y+height, value, maxlen, multiline)
def textFieldRelative(canvas, title, xR, yR, width, height, value="", maxlen=1000000, multiline=0):
"same as textFieldAbsolute except the x and y are relative to the canvas coordinate transform"
(xA, yA) = canvas.absolutePosition(xR,yR)
return textFieldAbsolute(canvas, title, xA, yA, width, height, value, maxlen, multiline)
def buttonFieldAbsolute(canvas, title, value, x, y):
"""Place a check button field on the current page
with name title and default value value (one of "Yes" or "Off")
at ABSOLUTE position (x,y).
"""
theform = getForm(canvas)
return theform.buttonField(canvas, title, value, x, y)
def buttonFieldRelative(canvas, title, value, xR, yR):
"same as buttonFieldAbsolute except the x and y are relative to the canvas coordinate transform"
(xA, yA) = canvas.absolutePosition(xR,yR)
return buttonFieldAbsolute(canvas, title, value, xA, yA)
def selectFieldAbsolute(canvas, title, value, options, x, y, width, height):
"""Place a select field (drop down list) on the current page
with name title and
with options listed in the sequence options
default value value (must be one of options)
at ABSOLUTE position (x,y) with dimensions (width, height)."""
theform = getForm(canvas)
theform.selectField(canvas, title, value, options, x, y, x+width, y+height)
def selectFieldRelative(canvas, title, value, options, xR, yR, width, height):
"same as textFieldAbsolute except the x and y are relative to the canvas coordinate transform"
(xA, yA) = canvas.absolutePosition(xR,yR)
return selectFieldAbsolute(canvas, title, value, options, xA, yA, width, height)
def test1():
from reportlab.pdfgen import canvas
fn = "formtest1.pdf"
c = canvas.Canvas(fn)
# first page
c.setFont("Courier", 10)
c.drawString(100, 500, "hello world")
textFieldAbsolute(c, "fieldA", 100, 600, 100, 20, "default value")
textFieldAbsolute(c, "fieldB", 100, 300, 100, 50, "another default value", multiline=1)
selectFieldAbsolute(c, "fieldC", "France", ["Canada", "France", "China"], 100, 200, 100, 20)
c.rect(100, 600, 100, 20)
buttonFieldAbsolute(c, "field2", "Yes", 100, 700)
c.rect(100, 700, 20, 20)
buttonFieldAbsolute(c, "field3", "Off", 100, 800)
c.rect(100, 800, 20, 20)
# second page
c.showPage()
c.setFont("Helvetica", 7)
c.translate(50, 20)
c.drawString(100, 500, "hello world")
textFieldRelative(c, "fieldA_1", 100, 600, 100, 20, "default value 2")
c.setStrokeColorRGB(1,0,0)
c.setFillColorRGB(0,1,0.5)
textFieldRelative(c, "fieldB_1", 100, 300, 100, 50, "another default value 2", multiline=1)
selectFieldRelative(c, "fieldC_1", "France 1", ["Canada 0", "France 1", "China 2"], 100, 200, 100, 20)
c.rect(100, 600, 100, 20)
buttonFieldRelative(c, "field2_1", "Yes", 100, 700)
c.rect(100, 700, 20, 20)
buttonFieldRelative(c, "field3_1", "Off", 100, 800)
c.rect(100, 800, 20, 20)
c.save()
print "wrote", fn
#==========================end of public interfaces
from pdfpattern import PDFPattern
def getForm(canvas):
"get form from canvas, create the form if needed"
try:
return canvas.AcroForm
except AttributeError:
theform = canvas.AcroForm = AcroForm()
# install the form in the document
d = canvas._doc
cat = d._catalog
cat.AcroForm = theform
return theform
class AcroForm:
__PDFObject__ = True
def __init__(self):
self.fields = []
def textField(self, canvas, title, xmin, ymin, xmax, ymax, value="", maxlen=1000000, multiline=0):
# determine the page ref
doc = canvas._doc
page = doc.thisPageRef()
# determine text info
R, G, B = obj_R_G_B(canvas._fillColorObj)
#print "rgb", (R,G,B)
font = canvas. _fontname
fontsize = canvas. _fontsize
field = TextField(title, value, xmin, ymin, xmax, ymax, page, maxlen,
font, fontsize, R, G, B, multiline)
self.fields.append(field)
canvas._addAnnotation(field)
def selectField(self, canvas, title, value, options, xmin, ymin, xmax, ymax):
# determine the page ref
doc = canvas._doc
page = doc.thisPageRef()
# determine text info
R, G, B = obj_R_G_B(canvas._fillColorObj)
#print "rgb", (R,G,B)
font = canvas. _fontname
fontsize = canvas. _fontsize
field = SelectField(title, value, options, xmin, ymin, xmax, ymax, page,
font=font, fontsize=fontsize, R=R, G=G, B=B)
self.fields.append(field)
canvas._addAnnotation(field)
def buttonField(self, canvas, title, value, xmin, ymin):
# determine the page ref
doc = canvas._doc
page = doc.thisPageRef()
field = ButtonField(title, value, xmin, ymin, page)
self.fields.append(field)
canvas._addAnnotation(field)
def format(self, document):
from reportlab.pdfbase.pdfdoc import PDFArray
proxy = PDFPattern(FormPattern, Resources=GLOBALRESOURCES, fields=PDFArray(self.fields))
return proxy.format(document)
FormPattern = [
'<<', LINEEND,
' /NeedAppearances true ', LINEEND,
' /DA ', PDFString('/Helv 0 Tf 0 g '), LINEEND,
' /DR ', LINEEND,
["Resources"],
' /Fields ', LINEEND,
["fields"],
'>>'
]
def FormFontsDictionary():
from reportlab.pdfbase.pdfdoc import PDFDictionary
fontsdictionary = PDFDictionary()
fontsdictionary.__RefOnly__ = 1
for (fullname, shortname) in FORMFONTNAMES.items():
fontsdictionary[shortname] = FormFont(fullname, shortname)
fontsdictionary["ZaDb"] = ZADB
return fontsdictionary
def FormResources():
return PDFPattern(FormResourcesDictionaryPattern,
Encoding=ENCODING, Font=GLOBALFONTSDICTIONARY)
ZaDbPattern = [
' <<'
' /BaseFont'
' /ZapfDingbats'
' /Name'
' /ZaDb'
' /Subtype'
' /Type1'
' /Type'
' /Font'
'>>']
ZADB = PDFPattern(ZaDbPattern)
FormResourcesDictionaryPattern = [
'<<',
' /Encoding ',
["Encoding"], LINEEND,
' /Font ',
["Font"], LINEEND,
'>>'
]
FORMFONTNAMES = {
"Helvetica": "Helv",
"Helvetica-Bold": "HeBo",
'Courier': "Cour",
'Courier-Bold': "CoBo",
'Courier-Oblique': "CoOb",
'Courier-BoldOblique': "CoBO",
'Helvetica-Oblique': "HeOb",
'Helvetica-BoldOblique': "HeBO",
'Times-Roman': "Time",
'Times-Bold': "TiBo",
'Times-Italic': "TiIt",
'Times-BoldItalic': "TiBI",
}
EncodingPattern = [
'<<',
' /PDFDocEncoding ',
["PDFDocEncoding"], LINEEND,
'>>',
]
PDFDocEncodingPattern = [
'<<'
' /Differences'
' ['
' 24'
' /breve'
' /caron'
' /circumflex'
' /dotaccent'
' /hungarumlaut'
' /ogonek'
' /ring'
' /tilde'
' 39'
' /quotesingle'
' 96'
' /grave'
' 128'
' /bullet'
' /dagger'
' /daggerdbl'
' /ellipsis'
' /emdash'
' /endash'
' /florin'
' /fraction'
' /guilsinglleft'
' /guilsinglright'
' /minus'
' /perthousand'
' /quotedblbase'
' /quotedblleft'
' /quotedblright'
' /quoteleft'
' /quoteright'
' /quotesinglbase'
' /trademark'
' /fi'
' /fl'
' /Lslash'
' /OE'
' /Scaron'
' /Ydieresis'
' /Zcaron'
' /dotlessi'
' /lslash'
' /oe'
' /scaron'
' /zcaron'
' 160'
' /Euro'
' 164'
' /currency'
' 166'
' /brokenbar'
' 168'
' /dieresis'
' /copyright'
' /ordfeminine'
' 172'
' /logicalnot'
' /.notdef'
' /registered'
' /macron'
' /degree'
' /plusminus'
' /twosuperior'
' /threesuperior'
' /acute'
' /mu'
' 183'
' /periodcentered'
' /cedilla'
' /onesuperior'
' /ordmasculine'
' 188'
' /onequarter'
' /onehalf'
' /threequarters'
' 192'
' /Agrave'
' /Aacute'
' /Acircumflex'
' /Atilde'
' /Adieresis'
' /Aring'
' /AE'
' /Ccedilla'
' /Egrave'
' /Eacute'
' /Ecircumflex'
' /Edieresis'
' /Igrave'
' /Iacute'
' /Icircumflex'
' /Idieresis'
' /Eth'
' /Ntilde'
' /Ograve'
' /Oacute'
' /Ocircumflex'
' /Otilde'
' /Odieresis'
' /multiply'
' /Oslash'
' /Ugrave'
' /Uacute'
' /Ucircumflex'
' /Udieresis'
' /Yacute'
' /Thorn'
' /germandbls'
' /agrave'
' /aacute'
' /acircumflex'
' /atilde'
' /adieresis'
' /aring'
' /ae'
' /ccedilla'
' /egrave'
' /eacute'
' /ecircumflex'
' /edieresis'
' /igrave'
' /iacute'
' /icircumflex'
' /idieresis'
' /eth'
' /ntilde'
' /ograve'
' /oacute'
' /ocircumflex'
' /otilde'
' /odieresis'
' /divide'
' /oslash'
' /ugrave'
' /uacute'
' /ucircumflex'
' /udieresis'
' /yacute'
' /thorn'
' /ydieresis'
' ]'
' /Type'
' /Encoding'
'>>']
# global constant
PDFDOCENC = PDFPattern(PDFDocEncodingPattern)
# global constant
ENCODING = PDFPattern(EncodingPattern, PDFDocEncoding=PDFDOCENC)
def FormFont(BaseFont, Name):
from reportlab.pdfbase.pdfdoc import PDFName
return PDFPattern(FormFontPattern, BaseFont=PDFName(BaseFont), Name=PDFName(Name), Encoding=PDFDOCENC)
FormFontPattern = [
'<<',
' /BaseFont ',
["BaseFont"], LINEEND,
' /Encoding ',
["Encoding"], LINEEND,
' /Name ',
["Name"], LINEEND,
' /Subtype '
' /Type1 '
' /Type '
' /Font '
'>>' ]
# global constants
GLOBALFONTSDICTIONARY = FormFontsDictionary()
GLOBALRESOURCES = FormResources()
def TextField(title, value, xmin, ymin, xmax, ymax, page,
maxlen=1000000, font="Helvetica-Bold", fontsize=9, R=0, G=0, B=0.627, multiline=0):
from reportlab.pdfbase.pdfdoc import PDFString, PDFName
Flags = 0
if multiline:
Flags = Flags | (1<<12) # bit 13 is at position 12 :)
fontname = FORMFONTNAMES[font]
return PDFPattern(TextFieldPattern,
value=PDFString(value), maxlen=maxlen, page=page,
title=PDFString(title),
xmin=xmin, ymin=ymin, xmax=xmax, ymax=ymax,
fontname=PDFName(fontname), fontsize=fontsize, R=R, G=G, B=B, Flags=Flags)
TextFieldPattern = [
'<<'
' /DA'
' (', ["fontname"],' ',["fontsize"],' Tf ',["R"],' ',["G"],' ',["B"],' rg)'
' /DV ',
["value"], LINEEND,
' /F 4 /FT /Tx'
'/MK << /BC [ 0 0 0 ] >>'
' /MaxLen ',
["maxlen"], LINEEND,
' /P ',
["page"], LINEEND,
' /Rect '
' [', ["xmin"], " ", ["ymin"], " ", ["xmax"], " ", ["ymax"], ' ]'
'/Subtype /Widget'
' /T ',
["title"], LINEEND,
' /Type'
' /Annot'
' /V ',
["value"], LINEEND,
' /Ff ',
["Flags"],LINEEND,
'>>']
def SelectField(title, value, options, xmin, ymin, xmax, ymax, page,
font="Helvetica-Bold", fontsize=9, R=0, G=0, B=0.627):
#print "ARGS", (title, value, options, xmin, ymin, xmax, ymax, page, font, fontsize, R, G, B)
from reportlab.pdfbase.pdfdoc import PDFString, PDFName, PDFArray
if value not in options:
raise ValueError, "value %s must be one of options %s" % (repr(value), repr(options))
fontname = FORMFONTNAMES[font]
optionstrings = map(PDFString, options)
optionarray = PDFArray(optionstrings)
return PDFPattern(SelectFieldPattern,
Options=optionarray,
Selected=PDFString(value), Page=page,
Name=PDFString(title),
xmin=xmin, ymin=ymin, xmax=xmax, ymax=ymax,
fontname=PDFName(fontname), fontsize=fontsize, R=R, G=G, B=B)
SelectFieldPattern = [
'<< % a select list',LINEEND,
' /DA ',
' (', ["fontname"],' ',["fontsize"],' Tf ',["R"],' ',["G"],' ',["B"],' rg)',LINEEND,
#' (/Helv 12 Tf 0 g)',LINEEND,
' /DV ',
["Selected"],LINEEND,
' /F ',
' 4',LINEEND,
' /FT ',
' /Ch',LINEEND,
' /MK ',
' <<',
' /BC',
' [',
' 0',
' 0',
' 0',
' ]',
' /BG',
' [',
' 1',
' 1',
' 1',
' ]',
' >>',LINEEND,
' /Opt ',
["Options"],LINEEND,
' /P ',
["Page"],LINEEND,
'/Rect',
' [',["xmin"], " ", ["ymin"], " ", ["xmax"], " ", ["ymax"],
' ] ',LINEEND,
'/Subtype',
' /Widget',LINEEND,
' /T ',
["Name"],LINEEND,
' /Type ',
' /Annot',
' /V ',
["Selected"],LINEEND,
'>>']
def ButtonField(title, value, xmin, ymin, page):
if value not in ("Yes", "Off"):
raise ValueError, "button value must be 'Yes' or 'Off': "+repr(value)
(dx, dy) = (16.77036, 14.90698)
return PDFPattern(ButtonFieldPattern,
Name=PDFString(title),
xmin=xmin, ymin=ymin, xmax=xmin+dx, ymax=ymin+dy,
Hide=HIDE,
APDOff=APDOFF,
APDYes=APDYES,
APNYes=APNYES,
Value=PDFName(value),
Page=page)
ButtonFieldPattern = ['<< ',
'/AA',
' <<',
' /D ',
["Hide"], LINEEND,
#' %(imported.18.0)s',
' >> ',
'/AP ',
' <<',
' /D',
' <<',
' /Off ',
#' %(imported.40.0)s',
["APDOff"], LINEEND,
' /Yes ',
#' %(imported.39.0)s',
["APDYes"], LINEEND,
' >>', LINEEND,
' /N',
' << ',
' /Yes ',
#' %(imported.38.0)s',
["APNYes"], LINEEND,
' >>',
' >>', LINEEND,
' /AS ',
["Value"], LINEEND,
' /DA ',
PDFString('/ZaDb 0 Tf 0 g'), LINEEND,
'/DV ',
["Value"], LINEEND,
'/F ',
' 4 ',
'/FT ',
' /Btn ',
'/H ',
' /T ',
'/MK ',
' <<',
' /AC (\\376\\377)',
#PDFString('\376\377'),
' /CA ',
PDFString('4'),
' /RC ',
PDFString('\376\377'),
' >> ',LINEEND,
'/P ',
["Page"], LINEEND,
'/Rect',
' [',["xmin"], " ", ["ymin"], " ", ["xmax"], " ", ["ymax"],
' ] ',LINEEND,
'/Subtype',
' /Widget ',
'/T ',
["Name"], LINEEND,
'/Type',
' /Annot ',
'/V ',
["Value"], LINEEND,
' >>']
HIDE = PDFPattern([
'<< '
'/S '
' /Hide '
'>>'])
def buttonStreamDictionary():
"everything except the length for the button appearance streams"
result = PDFDictionary()
result["SubType"] = "/Form"
result["BBox"] = "[0 0 16.77036 14.90698]"
font = PDFDictionary()
font["ZaDb"] = ZADB
resources = PDFDictionary()
resources["ProcSet"] = "[ /PDF /Text ]"
resources["Font"] = font
result["Resources"] = resources
return result
def ButtonStream(content):
dict = buttonStreamDictionary()
result = PDFStream(dict, content)
result.filters = []
return result
APDOFF = ButtonStream('0.749 g 0 0 16.7704 14.907 re f'+LINEEND)
APDYES = ButtonStream(
'0.749 g 0 0 16.7704 14.907 re f q 1 1 14.7704 12.907 re W '+
'n BT /ZaDb 11.3086 Tf 0 g 1 0 0 1 3.6017 3.3881 Tm (4) Tj ET'+LINEEND)
APNYES = ButtonStream(
'q 1 1 14.7704 12.907 re W n BT /ZaDb 11.3086 Tf 0 g 1 0 0 1 3.6017 3.3881 Tm (4) Tj ET Q'+LINEEND)
#==== script interpretation
if __name__=="__main__":
test1()
| bsd-3-clause | 2,114,313,584,419,419,000 | 5,127,336,236,190,010,000 | 24.946372 | 106 | 0.581129 | false |
a-parhom/edx-platform | common/djangoapps/student/tests/test_password_policy.py | 2 | 14639 | # -*- coding: utf-8 -*-
"""
This test file will verify proper password policy enforcement, which is an option feature
"""
import json
from importlib import import_module
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.urls import reverse
from django.test import TestCase
from django.test.client import RequestFactory
from django.test.utils import override_settings
from mock import patch
from openedx.core.djangoapps.external_auth.models import ExternalAuthMap
from openedx.core.djangoapps.site_configuration.tests.factories import SiteFactory
from openedx.core.djangoapps.user_authn.views.deprecated import create_account
from util.password_policy_validators import create_validator_config
class TestPasswordPolicy(TestCase):
"""
Go through some password policy tests to make sure things are properly working
"""
def setUp(self):
super(TestPasswordPolicy, self).setUp()
self.url = reverse('create_account')
self.request_factory = RequestFactory()
self.url_params = {
'username': 'username',
'email': '[email protected]',
'name': 'username',
'terms_of_service': 'true',
'honor_code': 'true',
}
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('util.password_policy_validators.MinimumLengthValidator', {'min_length': 6})
])
def test_password_length_too_short(self):
self.url_params['password'] = 'aaa'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"This password is too short. It must contain at least 6 characters.",
)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('util.password_policy_validators.MinimumLengthValidator', {'min_length': 6})
])
def test_password_length_long_enough(self):
self.url_params['password'] = 'ThisIsALongerPassword'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('util.password_policy_validators.MaximumLengthValidator', {'max_length': 12})
])
def test_password_length_too_long(self):
self.url_params['password'] = 'ThisPasswordIsWayTooLong'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"This password is too long. It must contain no more than 12 characters.",
)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('util.password_policy_validators.UppercaseValidator', {'min_upper': 3})
])
def test_password_not_enough_uppercase(self):
self.url_params['password'] = 'thisshouldfail'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"This password must contain at least 3 uppercase letters.",
)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('util.password_policy_validators.UppercaseValidator', {'min_upper': 3})
])
def test_password_enough_uppercase(self):
self.url_params['password'] = 'ThisShouldPass'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('util.password_policy_validators.LowercaseValidator', {'min_lower': 3})
])
def test_password_not_enough_lowercase(self):
self.url_params['password'] = 'THISSHOULDFAIL'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"This password must contain at least 3 lowercase letters.",
)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('util.password_policy_validators.LowercaseValidator', {'min_lower': 3})
])
def test_password_enough_lowercase(self):
self.url_params['password'] = 'ThisShouldPass'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('util.password_policy_validators.PunctuationValidator', {'min_punctuation': 3})
])
def test_not_enough_punctuations(self):
self.url_params['password'] = 'thisshouldfail'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"This password must contain at least 3 punctuation marks.",
)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('util.password_policy_validators.PunctuationValidator', {'min_punctuation': 3})
])
def test_enough_punctuations(self):
self.url_params['password'] = 'Th!sSh.uldPa$*'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('util.password_policy_validators.NumericValidator', {'min_numeric': 3})
])
def test_not_enough_numeric_characters(self):
# The unicode ២ is the number 2 in Khmer and the ٧ is the Arabic-Indic number 7
self.url_params['password'] = u'thisShouldFail២٧'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"This password must contain at least 3 numbers.",
)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('util.password_policy_validators.NumericValidator', {'min_numeric': 3})
])
def test_enough_numeric_characters(self):
# The unicode ២ is the number 2 in Khmer
self.url_params['password'] = u'thisShouldPass២33'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('util.password_policy_validators.AlphabeticValidator', {'min_alphabetic': 3})
])
def test_not_enough_alphabetic_characters(self):
self.url_params['password'] = '123456ab'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"This password must contain at least 3 letters.",
)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('util.password_policy_validators.AlphabeticValidator', {'min_alphabetic': 3})
])
def test_enough_alphabetic_characters(self):
self.url_params['password'] = u'𝒯𝓗Ï𝓼𝒫å𝓼𝓼𝔼𝓼'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('util.password_policy_validators.MinimumLengthValidator', {'min_length': 3}),
create_validator_config('util.password_policy_validators.UppercaseValidator', {'min_upper': 3}),
create_validator_config('util.password_policy_validators.NumericValidator', {'min_numeric': 3}),
create_validator_config('util.password_policy_validators.PunctuationValidator', {'min_punctuation': 3}),
])
def test_multiple_errors_fail(self):
self.url_params['password'] = 'thisshouldfail'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
errstring = (
"This password must contain at least 3 uppercase letters. "
"This password must contain at least 3 numbers. "
"This password must contain at least 3 punctuation marks."
)
self.assertEqual(obj['value'], errstring)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('util.password_policy_validators.MinimumLengthValidator', {'min_length': 3}),
create_validator_config('util.password_policy_validators.UppercaseValidator', {'min_upper': 3}),
create_validator_config('util.password_policy_validators.LowercaseValidator', {'min_lower': 3}),
create_validator_config('util.password_policy_validators.NumericValidator', {'min_numeric': 3}),
create_validator_config('util.password_policy_validators.PunctuationValidator', {'min_punctuation': 3}),
])
def test_multiple_errors_pass(self):
self.url_params['password'] = u'tH1s Sh0u!d P3#$!'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('django.contrib.auth.password_validation.CommonPasswordValidator')
])
def test_common_password_fail(self):
self.url_params['password'] = 'password'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"This password is too common.",
)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('django.contrib.auth.password_validation.CommonPasswordValidator')
])
def test_common_password_pass(self):
self.url_params['password'] = 'this_is_ok'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('util.password_policy_validators.MinimumLengthValidator', {'min_length': 6}),
create_validator_config('util.password_policy_validators.MaximumLengthValidator', {'max_length': 75}),
])
def test_with_unicode(self):
self.url_params['password'] = u'四節比分和七年前'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('util.password_policy_validators.MinimumLengthValidator', {'min_length': 6})
], SESSION_ENGINE='django.contrib.sessions.backends.cache')
def test_ext_auth_password_length_too_short(self):
"""
Tests that even if password policy is enforced, ext_auth registrations aren't subject to it
"""
self.url_params['password'] = u'aaa' # shouldn't pass validation
request = self.request_factory.post(self.url, self.url_params)
request.site = SiteFactory.create()
# now indicate we are doing ext_auth by setting 'ExternalAuthMap' in the session.
request.session = import_module(settings.SESSION_ENGINE).SessionStore() # empty session
extauth = ExternalAuthMap(external_id='[email protected]',
external_email='[email protected]',
internal_password=self.url_params['password'],
external_domain='shib:https://idp.stanford.edu/')
request.session['ExternalAuthMap'] = extauth
request.user = AnonymousUser()
with patch('edxmako.request_context.get_current_request', return_value=request):
response = create_account(request)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
class TestUsernamePasswordNonmatch(TestCase):
"""
Test that registration username and password fields differ
"""
def setUp(self):
super(TestUsernamePasswordNonmatch, self).setUp()
self.url = reverse('create_account')
self.url_params = {
'username': 'username',
'email': '[email protected]',
'name': 'username',
'terms_of_service': 'true',
'honor_code': 'true',
}
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('django.contrib.auth.password_validation.UserAttributeSimilarityValidator')
])
def test_with_username_password_match(self):
self.url_params['username'] = "foobar"
self.url_params['password'] = "foobar"
response = self.client.post(self.url, self.url_params)
self.assertEquals(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"The password is too similar to the username.",
)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('django.contrib.auth.password_validation.UserAttributeSimilarityValidator')
])
def test_with_username_password_nonmatch(self):
self.url_params['username'] = "foobar"
self.url_params['password'] = "nonmatch"
response = self.client.post(self.url, self.url_params)
self.assertEquals(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
| agpl-3.0 | 88,533,273,914,379,650 | -7,359,241,423,302,365,000 | 44.301242 | 112 | 0.664016 | false |
alaski/nova | nova/scheduler/filters/exact_disk_filter.py | 18 | 1846 | # Copyright (c) 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from nova.scheduler import filters
LOG = logging.getLogger(__name__)
class ExactDiskFilter(filters.BaseHostFilter):
"""Exact Disk Filter."""
def host_passes(self, host_state, spec_obj):
"""Return True if host has the exact amount of disk available."""
requested_disk = (1024 * (spec_obj.root_gb +
spec_obj.ephemeral_gb) +
spec_obj.swap)
if requested_disk != host_state.free_disk_mb:
LOG.debug("%(host_state)s does not have exactly "
"%(requested_disk)s MB usable disk, it "
"has %(usable_disk_mb)s.",
{'host_state': host_state,
'requested_disk': requested_disk,
'usable_disk_mb': host_state.free_disk_mb})
return False
# NOTE(mgoddard): Setting the limit ensures that it is enforced in
# compute. This ensures that if multiple instances are scheduled to a
# single host, then all after the first will fail in the claim.
host_state.limits['disk_gb'] = host_state.total_usable_disk_gb
return True
| apache-2.0 | 2,222,348,295,965,614,600 | 1,207,456,646,038,196,000 | 40.022222 | 78 | 0.626761 | false |
mlperf/training_results_v0.5 | v0.5.0/google/cloud_v2.512/resnet-tpuv2-512/code/resnet/model/tpu/models/official/resnet/resnet_main.py | 5 | 27064 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train a ResNet-50 model on ImageNet on TPU."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from absl import app
from absl import flags
import absl.logging as _logging # pylint: disable=unused-import
import tensorflow as tf
from official.resnet import imagenet_input
from official.resnet import lars_util
from official.resnet import resnet_model
from tensorflow.contrib import summary
from tensorflow.contrib.tpu.python.tpu import async_checkpoint
from tensorflow.contrib.training.python.training import evaluation
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.estimator import estimator
FLAGS = flags.FLAGS
FAKE_DATA_DIR = 'gs://cloud-tpu-test-datasets/fake_imagenet'
flags.DEFINE_bool(
'use_tpu', default=True,
help=('Use TPU to execute the model for training and evaluation. If'
' --use_tpu=false, will use whatever devices are available to'
' TensorFlow by default (e.g. CPU and GPU)'))
# Cloud TPU Cluster Resolvers
flags.DEFINE_string(
'tpu', default=None,
help='The Cloud TPU to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url.')
flags.DEFINE_string(
'gcp_project', default=None,
help='Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_zone', default=None,
help='GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
# Model specific flags
flags.DEFINE_string(
'data_dir', default=FAKE_DATA_DIR,
help=('The directory where the ImageNet input data is stored. Please see'
' the README.md for the expected data format.'))
flags.DEFINE_string(
'model_dir', default=None,
help=('The directory where the model and training/evaluation summaries are'
' stored.'))
flags.DEFINE_integer(
'resnet_depth', default=50,
help=('Depth of ResNet model to use. Must be one of {18, 34, 50, 101, 152,'
' 200}. ResNet-18 and 34 use the pre-activation residual blocks'
' without bottleneck layers. The other models use pre-activation'
' bottleneck layers. Deeper models require more training time and'
' more memory and may require reducing --train_batch_size to prevent'
' running out of memory.'))
flags.DEFINE_string(
'mode', default='train_and_eval',
help='One of {"train_and_eval", "train", "eval"}.')
flags.DEFINE_integer(
'train_steps', default=112590,
help=('The number of steps to use for training. Default is 112590 steps'
' which is approximately 90 epochs at batch size 1024. This flag'
' should be adjusted according to the --train_batch_size flag.'))
flags.DEFINE_integer(
'train_batch_size', default=1024, help='Batch size for training.')
flags.DEFINE_integer(
'eval_batch_size', default=1024, help='Batch size for evaluation.')
flags.DEFINE_integer(
'num_train_images', default=1281167, help='Size of training data set.')
flags.DEFINE_integer(
'num_eval_images', default=50000, help='Size of evaluation data set.')
flags.DEFINE_integer(
'num_label_classes', default=1000, help='Number of classes, at least 2')
flags.DEFINE_integer(
'steps_per_eval', default=1251,
help=('Controls how often evaluation is performed. Since evaluation is'
' fairly expensive, it is advised to evaluate as infrequently as'
' possible (i.e. up to --train_steps, which evaluates the model only'
' after finishing the entire training regime).'))
flags.DEFINE_integer(
'eval_timeout',
default=None,
help='Maximum seconds between checkpoints before evaluation terminates.')
flags.DEFINE_bool(
'skip_host_call', default=False,
help=('Skip the host_call which is executed every training step. This is'
' generally used for generating training summaries (train loss,'
' learning rate, etc...). When --skip_host_call=false, there could'
' be a performance drop if host_call function is slow and cannot'
' keep up with the TPU-side computation.'))
flags.DEFINE_integer(
'iterations_per_loop', default=1251,
help=('Number of steps to run on TPU before outfeeding metrics to the CPU.'
' If the number of iterations in the loop would exceed the number of'
' train steps, the loop will exit before reaching'
' --iterations_per_loop. The larger this value is, the higher the'
' utilization on the TPU.'))
flags.DEFINE_integer(
'num_parallel_calls', default=64,
help=('Number of parallel threads in CPU for the input pipeline'))
flags.DEFINE_integer(
'num_cores', default=8,
help=('Number of TPU cores. For a single TPU device, this is 8 because each'
' TPU has 4 chips each with 2 cores.'))
flags.DEFINE_string(
'bigtable_project', None,
'The Cloud Bigtable project. If None, --gcp_project will be used.')
flags.DEFINE_string(
'bigtable_instance', None,
'The Cloud Bigtable instance to load data from.')
flags.DEFINE_string(
'bigtable_table', 'imagenet',
'The Cloud Bigtable table to load data from.')
flags.DEFINE_string(
'bigtable_train_prefix', 'train_',
'The prefix identifying training rows.')
flags.DEFINE_string(
'bigtable_eval_prefix', 'validation_',
'The prefix identifying evaluation rows.')
flags.DEFINE_string(
'bigtable_column_family', 'tfexample',
'The column family storing TFExamples.')
flags.DEFINE_string(
'bigtable_column_qualifier', 'example',
'The column name storing TFExamples.')
flags.DEFINE_string(
'data_format', default='channels_last',
help=('A flag to override the data format used in the model. The value'
' is either channels_first or channels_last. To run the network on'
' CPU or TPU, channels_last should be used. For GPU, channels_first'
' will improve performance.'))
# TODO(chrisying): remove this flag once --transpose_tpu_infeed flag is enabled
# by default for TPU
flags.DEFINE_bool(
'transpose_input', default=True,
help='Use TPU double transpose optimization')
flags.DEFINE_string(
'export_dir',
default=None,
help=('The directory where the exported SavedModel will be stored.'))
flags.DEFINE_bool(
'export_to_tpu', default=False,
help=('Whether to export additional metagraph with "serve, tpu" tags'
' in addition to "serve" only metagraph.'))
flags.DEFINE_string(
'precision', default='bfloat16',
help=('Precision to use; one of: {bfloat16, float32}'))
flags.DEFINE_float(
'base_learning_rate', default=0.1,
help=('Base learning rate when train batch size is 256.'))
flags.DEFINE_float(
'momentum', default=0.9,
help=('Momentum parameter used in the MomentumOptimizer.'))
flags.DEFINE_float(
'weight_decay', default=1e-4,
help=('Weight decay coefficiant for l2 regularization.'))
flags.DEFINE_float(
'label_smoothing', default=0.0,
help=('Label smoothing parameter used in the softmax_cross_entropy'))
flags.DEFINE_integer('log_step_count_steps', 64, 'The number of steps at '
'which the global step information is logged.')
flags.DEFINE_bool('enable_lars',
default=False,
help=('Enable LARS optimizer for large batch training.'))
flags.DEFINE_float('poly_rate', default=0.0,
help=('Set LARS/Poly learning rate.'))
flags.DEFINE_bool(
'use_cache', default=True, help=('Enable cache for training input.'))
flags.DEFINE_bool(
'use_async_checkpointing', default=False, help=('Enable async checkpoint'))
# Learning rate schedule
LR_SCHEDULE = [ # (multiplier, epoch to start) tuples
(1.0, 5), (0.1, 30), (0.01, 60), (0.001, 80)
]
# The input tensor is in the range of [0, 255], we need to scale them to the
# range of [0, 1]
MEAN_RGB = [0.485 * 255, 0.456 * 255, 0.406 * 255]
STDDEV_RGB = [0.229 * 255, 0.224 * 255, 0.225 * 255]
def learning_rate_schedule(current_epoch):
"""Handles linear scaling rule, gradual warmup, and LR decay.
The learning rate starts at 0, then it increases linearly per step.
After 5 epochs we reach the base learning rate (scaled to account
for batch size).
After 30, 60 and 80 epochs the learning rate is divided by 10.
After 90 epochs training stops and the LR is set to 0. This ensures
that we train for exactly 90 epochs for reproducibility.
Args:
current_epoch: `Tensor` for current epoch.
Returns:
A scaled `Tensor` for current learning rate.
"""
scaled_lr = FLAGS.base_learning_rate * (FLAGS.train_batch_size / 256.0)
decay_rate = (scaled_lr * LR_SCHEDULE[0][0] *
current_epoch / LR_SCHEDULE[0][1])
for mult, start_epoch in LR_SCHEDULE:
decay_rate = tf.where(current_epoch < start_epoch,
decay_rate, scaled_lr * mult)
return decay_rate
def resnet_model_fn(features, labels, mode, params):
"""The model_fn for ResNet to be used with TPUEstimator.
Args:
features: `Tensor` of batched images.
labels: `Tensor` of labels for the data samples
mode: one of `tf.estimator.ModeKeys.{TRAIN,EVAL,PREDICT}`
params: `dict` of parameters passed to the model from the TPUEstimator,
`params['batch_size']` is always provided and should be used as the
effective batch size.
Returns:
A `TPUEstimatorSpec` for the model
"""
if isinstance(features, dict):
features = features['feature']
# In most cases, the default data format NCHW instead of NHWC should be
# used for a significant performance boost on GPU/TPU. NHWC should be used
# only if the network needs to be run on CPU since the pooling operations
# are only supported on NHWC.
if FLAGS.data_format == 'channels_first':
assert not FLAGS.transpose_input # channels_first only for GPU
features = tf.transpose(features, [0, 3, 1, 2])
if FLAGS.transpose_input and mode != tf.estimator.ModeKeys.PREDICT:
features = tf.transpose(features, [3, 0, 1, 2]) # HWCN to NHWC
# Normalize the image to zero mean and unit variance.
features -= tf.constant(MEAN_RGB, shape=[1, 1, 3], dtype=features.dtype)
features /= tf.constant(STDDEV_RGB, shape=[1, 1, 3], dtype=features.dtype)
# This nested function allows us to avoid duplicating the logic which
# builds the network, for different values of --precision.
def build_network():
network = resnet_model.resnet_v1(
resnet_depth=FLAGS.resnet_depth,
num_classes=FLAGS.num_label_classes,
data_format=FLAGS.data_format)
return network(
inputs=features, is_training=(mode == tf.estimator.ModeKeys.TRAIN))
if FLAGS.precision == 'bfloat16':
with tf.contrib.tpu.bfloat16_scope():
logits = build_network()
logits = tf.cast(logits, tf.float32)
elif FLAGS.precision == 'float32':
logits = build_network()
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'classes': tf.argmax(logits, axis=1),
'probabilities': tf.nn.softmax(logits, name='softmax_tensor')
}
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
export_outputs={
'classify': tf.estimator.export.PredictOutput(predictions)
})
# If necessary, in the model_fn, use params['batch_size'] instead the batch
# size flags (--train_batch_size or --eval_batch_size).
batch_size = params['batch_size'] # pylint: disable=unused-variable
# Calculate loss, which includes softmax cross entropy and L2 regularization.
one_hot_labels = tf.one_hot(labels, FLAGS.num_label_classes)
cross_entropy = tf.losses.softmax_cross_entropy(
logits=logits,
onehot_labels=one_hot_labels,
label_smoothing=FLAGS.label_smoothing)
# Add weight decay to the loss for non-batch-normalization variables.
loss = cross_entropy + FLAGS.weight_decay * tf.add_n(
[tf.nn.l2_loss(v) for v in tf.trainable_variables()
if 'batch_normalization' not in v.name])
host_call = None
if mode == tf.estimator.ModeKeys.TRAIN:
# Compute the current epoch and associated learning rate from global_step.
global_step = tf.train.get_global_step()
steps_per_epoch = FLAGS.num_train_images / FLAGS.train_batch_size
current_epoch = (tf.cast(global_step, tf.float32) /
steps_per_epoch)
# LARS is a large batch optimizer. LARS enables higher accuracy at batch 16K
# and larger batch sizes.
if FLAGS.train_batch_size >= 16384 and FLAGS.enable_lars:
learning_rate = 0.0
optimizer = lars_util.init_lars_optimizer(current_epoch)
else:
learning_rate = learning_rate_schedule(current_epoch)
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate,
momentum=FLAGS.momentum,
use_nesterov=True)
if FLAGS.use_tpu:
# When using TPU, wrap the optimizer with CrossShardOptimizer which
# handles synchronization details between different TPU cores. To the
# user, this should look like regular synchronous training.
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
# Batch normalization requires UPDATE_OPS to be added as a dependency to
# the train operation.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss, global_step)
if not FLAGS.skip_host_call:
def host_call_fn(gs, loss, lr, ce):
"""Training host call. Creates scalar summaries for training metrics.
This function is executed on the CPU and should not directly reference
any Tensors in the rest of the `model_fn`. To pass Tensors from the
model to the `metric_fn`, provide as part of the `host_call`. See
https://www.tensorflow.org/api_docs/python/tf/contrib/tpu/TPUEstimatorSpec
for more information.
Arguments should match the list of `Tensor` objects passed as the second
element in the tuple passed to `host_call`.
Args:
gs: `Tensor with shape `[batch]` for the global_step
loss: `Tensor` with shape `[batch]` for the training loss.
lr: `Tensor` with shape `[batch]` for the learning_rate.
ce: `Tensor` with shape `[batch]` for the current_epoch.
Returns:
List of summary ops to run on the CPU host.
"""
gs = gs[0]
# Host call fns are executed FLAGS.iterations_per_loop times after one
# TPU loop is finished, setting max_queue value to the same as number of
# iterations will make the summary writer only flush the data to storage
# once per loop.
with summary.create_file_writer(
FLAGS.model_dir, max_queue=FLAGS.iterations_per_loop).as_default():
with summary.always_record_summaries():
summary.scalar('loss', loss[0], step=gs)
summary.scalar('learning_rate', lr[0], step=gs)
summary.scalar('current_epoch', ce[0], step=gs)
return summary.all_summary_ops()
# To log the loss, current learning rate, and epoch for Tensorboard, the
# summary op needs to be run on the host CPU via host_call. host_call
# expects [batch_size, ...] Tensors, thus reshape to introduce a batch
# dimension. These Tensors are implicitly concatenated to
# [params['batch_size']].
gs_t = tf.reshape(global_step, [1])
loss_t = tf.reshape(loss, [1])
lr_t = tf.reshape(learning_rate, [1])
ce_t = tf.reshape(current_epoch, [1])
host_call = (host_call_fn, [gs_t, loss_t, lr_t, ce_t])
else:
train_op = None
eval_metrics = None
if mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(labels, logits):
"""Evaluation metric function. Evaluates accuracy.
This function is executed on the CPU and should not directly reference
any Tensors in the rest of the `model_fn`. To pass Tensors from the model
to the `metric_fn`, provide as part of the `eval_metrics`. See
https://www.tensorflow.org/api_docs/python/tf/contrib/tpu/TPUEstimatorSpec
for more information.
Arguments should match the list of `Tensor` objects passed as the second
element in the tuple passed to `eval_metrics`.
Args:
labels: `Tensor` with shape `[batch]`.
logits: `Tensor` with shape `[batch, num_classes]`.
Returns:
A dict of the metrics to return from evaluation.
"""
predictions = tf.argmax(logits, axis=1)
top_1_accuracy = tf.metrics.accuracy(labels, predictions)
in_top_5 = tf.cast(tf.nn.in_top_k(logits, labels, 5), tf.float32)
top_5_accuracy = tf.metrics.mean(in_top_5)
return {
'top_1_accuracy': top_1_accuracy,
'top_5_accuracy': top_5_accuracy,
}
eval_metrics = (metric_fn, [labels, logits])
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
host_call=host_call,
eval_metrics=eval_metrics)
def _verify_non_empty_string(value, field_name):
"""Ensures that a given proposed field value is a non-empty string.
Args:
value: proposed value for the field.
field_name: string name of the field, e.g. `project`.
Returns:
The given value, provided that it passed the checks.
Raises:
ValueError: the value is not a string, or is a blank string.
"""
if not isinstance(value, str):
raise ValueError(
'Bigtable parameter "%s" must be a string.' % field_name)
if not value:
raise ValueError(
'Bigtable parameter "%s" must be non-empty.' % field_name)
return value
def _select_tables_from_flags():
"""Construct training and evaluation Bigtable selections from flags.
Returns:
[training_selection, evaluation_selection]
"""
project = _verify_non_empty_string(
FLAGS.bigtable_project or FLAGS.gcp_project,
'project')
instance = _verify_non_empty_string(FLAGS.bigtable_instance, 'instance')
table = _verify_non_empty_string(FLAGS.bigtable_table, 'table')
train_prefix = _verify_non_empty_string(FLAGS.bigtable_train_prefix,
'train_prefix')
eval_prefix = _verify_non_empty_string(FLAGS.bigtable_eval_prefix,
'eval_prefix')
column_family = _verify_non_empty_string(FLAGS.bigtable_column_family,
'column_family')
column_qualifier = _verify_non_empty_string(FLAGS.bigtable_column_qualifier,
'column_qualifier')
return [
imagenet_input.BigtableSelection(
project=project,
instance=instance,
table=table,
prefix=p,
column_family=column_family,
column_qualifier=column_qualifier)
for p in (train_prefix, eval_prefix)
]
def main(unused_argv):
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu if (FLAGS.tpu or FLAGS.use_tpu) else '',
zone=FLAGS.tpu_zone,
project=FLAGS.gcp_project)
if FLAGS.use_async_checkpointing:
save_checkpoints_steps = None
else:
save_checkpoints_steps = max(100, FLAGS.iterations_per_loop)
config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=FLAGS.model_dir,
save_checkpoints_steps=save_checkpoints_steps,
log_step_count_steps=FLAGS.log_step_count_steps,
session_config=tf.ConfigProto(
graph_options=tf.GraphOptions(
rewrite_options=rewriter_config_pb2.RewriterConfig(
disable_meta_optimizer=True))),
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_cores,
per_host_input_for_training=tf.contrib.tpu.InputPipelineConfig
.PER_HOST_V2)) # pylint: disable=line-too-long
resnet_classifier = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=resnet_model_fn,
config=config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
export_to_tpu=FLAGS.export_to_tpu)
assert FLAGS.precision == 'bfloat16' or FLAGS.precision == 'float32', (
'Invalid value for --precision flag; must be bfloat16 or float32.')
tf.logging.info('Precision: %s', FLAGS.precision)
use_bfloat16 = FLAGS.precision == 'bfloat16'
# Input pipelines are slightly different (with regards to shuffling and
# preprocessing) between training and evaluation.
if FLAGS.bigtable_instance:
tf.logging.info('Using Bigtable dataset, table %s', FLAGS.bigtable_table)
select_train, select_eval = _select_tables_from_flags()
imagenet_train, imagenet_eval = [imagenet_input.ImageNetBigtableInput(
is_training=is_training,
use_bfloat16=use_bfloat16,
transpose_input=FLAGS.transpose_input,
selection=selection) for (is_training, selection) in
[(True, select_train),
(False, select_eval)]]
else:
if FLAGS.data_dir == FAKE_DATA_DIR:
tf.logging.info('Using fake dataset.')
else:
tf.logging.info('Using dataset: %s', FLAGS.data_dir)
imagenet_train, imagenet_eval = [
imagenet_input.ImageNetInput(
is_training=is_training,
data_dir=FLAGS.data_dir,
transpose_input=FLAGS.transpose_input,
cache=FLAGS.use_cache and is_training,
num_parallel_calls=FLAGS.num_parallel_calls,
use_bfloat16=use_bfloat16) for is_training in [True, False]
]
steps_per_epoch = FLAGS.num_train_images // FLAGS.train_batch_size
eval_steps = FLAGS.num_eval_images // FLAGS.eval_batch_size
if FLAGS.mode == 'eval':
# Run evaluation when there's a new checkpoint
for ckpt in evaluation.checkpoints_iterator(
FLAGS.model_dir, timeout=FLAGS.eval_timeout):
tf.logging.info('Starting to evaluate.')
try:
start_timestamp = time.time() # This time will include compilation time
eval_results = resnet_classifier.evaluate(
input_fn=imagenet_eval.input_fn,
steps=eval_steps,
checkpoint_path=ckpt)
elapsed_time = int(time.time() - start_timestamp)
tf.logging.info('Eval results: %s. Elapsed seconds: %d',
eval_results, elapsed_time)
# Terminate eval job when final checkpoint is reached
current_step = int(os.path.basename(ckpt).split('-')[1])
if current_step >= FLAGS.train_steps:
tf.logging.info(
'Evaluation finished after training step %d', current_step)
break
except tf.errors.NotFoundError:
# Since the coordinator is on a different job than the TPU worker,
# sometimes the TPU worker does not finish initializing until long after
# the CPU job tells it to start evaluating. In this case, the checkpoint
# file could have been deleted already.
tf.logging.info(
'Checkpoint %s no longer exists, skipping checkpoint', ckpt)
else: # FLAGS.mode == 'train' or FLAGS.mode == 'train_and_eval'
current_step = estimator._load_global_step_from_checkpoint_dir(FLAGS.model_dir) # pylint: disable=protected-access,line-too-long
steps_per_epoch = FLAGS.num_train_images // FLAGS.train_batch_size
tf.logging.info('Training for %d steps (%.2f epochs in total). Current'
' step %d.',
FLAGS.train_steps,
FLAGS.train_steps / steps_per_epoch,
current_step)
start_timestamp = time.time() # This time will include compilation time
if FLAGS.mode == 'train':
hooks = []
if FLAGS.use_async_checkpointing:
hooks.append(
async_checkpoint.AsyncCheckpointSaverHook(
checkpoint_dir=FLAGS.model_dir,
save_steps=max(100, FLAGS.iterations_per_loop)))
resnet_classifier.train(
input_fn=imagenet_train.input_fn,
max_steps=FLAGS.train_steps,
hooks=hooks)
else:
assert FLAGS.mode == 'train_and_eval'
while current_step < FLAGS.train_steps:
# Train for up to steps_per_eval number of steps.
# At the end of training, a checkpoint will be written to --model_dir.
next_checkpoint = min(current_step + FLAGS.steps_per_eval,
FLAGS.train_steps)
resnet_classifier.train(
input_fn=imagenet_train.input_fn, max_steps=next_checkpoint)
current_step = next_checkpoint
tf.logging.info('Finished training up to step %d. Elapsed seconds %d.',
next_checkpoint, int(time.time() - start_timestamp))
# Evaluate the model on the most recent model in --model_dir.
# Since evaluation happens in batches of --eval_batch_size, some images
# may be excluded modulo the batch size. As long as the batch size is
# consistent, the evaluated images are also consistent.
tf.logging.info('Starting to evaluate.')
eval_results = resnet_classifier.evaluate(
input_fn=imagenet_eval.input_fn,
steps=FLAGS.num_eval_images // FLAGS.eval_batch_size)
tf.logging.info('Eval results at step %d: %s',
next_checkpoint, eval_results)
elapsed_time = int(time.time() - start_timestamp)
tf.logging.info('Finished training up to step %d. Elapsed seconds %d.',
FLAGS.train_steps, elapsed_time)
if FLAGS.export_dir is not None:
# The guide to serve a exported TensorFlow model is at:
# https://www.tensorflow.org/serving/serving_basic
tf.logging.info('Starting to export model.')
resnet_classifier.export_saved_model(
export_dir_base=FLAGS.export_dir,
serving_input_receiver_fn=imagenet_input.image_serving_input_fn)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
app.run(main)
| apache-2.0 | 3,595,334,027,505,108,500 | 5,119,847,382,111,558,000 | 38.917404 | 133 | 0.663686 | false |
eaudeweb/xhtml2pdf | setup_version.py | 61 | 1771 | # -*- coding: utf-8 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Updates the version infos
"""
import time
import re
import cgi
VERSION = open("VERSION.txt", "r").read().strip()
BUILD = time.strftime("%Y-%m-%d")
FILES = [
"setup.py",
"setup_exe.py",
# "setup_egg.py",
"sx/pisa3/pisa_version.py",
"doc/pisa-en.html",
]
try:
HELP = cgi.escape(open("HELP.txt", "r").read(), 1)
except:
HELP = ""
HELP = "<!--HELP--><pre>" + HELP + "</pre><!--HELP-->"
rxversion = re.compile("VERSION{.*?}VERSION", re.MULTILINE|re.IGNORECASE|re.DOTALL)
rxbuild = re.compile("BUILD{.*?}BUILD", re.MULTILINE|re.IGNORECASE|re.DOTALL)
rxversionhtml = re.compile("\<\!--VERSION--\>.*?\<\!--VERSION--\>", re.MULTILINE|re.IGNORECASE|re.DOTALL)
rxhelphtml = re.compile("\<\!--HELP--\>.*?\<\!--HELP--\>", re.MULTILINE|re.IGNORECASE|re.DOTALL)
for fname in FILES:
print "Update", fname, "..."
data = open(fname, "rb").read()
data = rxversion.sub("VERSION{" + VERSION + "}VERSION", data)
data = rxversionhtml.sub("<!--VERSION-->" + VERSION + "<!--VERSION-->", data)
data = rxbuild.sub("BUILD{" + BUILD + "}BUILD", data)
data = rxhelphtml.sub(HELP, data)
open(fname, "wb").write(data)
| apache-2.0 | -5,805,185,087,789,091,000 | -8,937,504,320,173,323,000 | 32.415094 | 105 | 0.649351 | false |
gmalmquist/pants | tests/python/pants_test/backend/codegen/tasks/test_ragel_gen.py | 12 | 2331 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from textwrap import dedent
from pants.backend.codegen.targets.java_ragel_library import JavaRagelLibrary
from pants.backend.codegen.tasks.ragel_gen import RagelGen, calculate_genfile
from pants.util.contextutil import temporary_file
from pants.util.dirutil import safe_mkdtemp
from pants_test.tasks.task_test_base import TaskTestBase
ragel_file_contents = dedent("""
package com.example.atoi;
%%{
machine parser;
action minus {
negative = true;
}
action digit {
val *= 10;
val += fc - '0';
}
main := ( '-'@minus )? ( [0-9] @digit ) + '\0';
}%%
public class Parser {
%% write data;
public static int parse(CharSequence input) {
StringBuilder builder = new StringBuilder(input);
builder.append('\0');
char[] data = builder.toString().toCharArray();
int p = 0;
int pe = data.length;
int eof = pe;
int cs;
boolean negative = false;
int val = 0;
%% write init;
%% write exec;
if (negative)
return -val;
else
return val;
}
}
""")
class RagelGenTest(TaskTestBase):
@classmethod
def task_type(cls):
return RagelGen
def test_ragel_gen(self):
self.create_file(relpath='test_ragel_gen/atoi.rl', contents=ragel_file_contents)
target = self.make_target(spec='test_ragel_gen:atoi',
target_type=JavaRagelLibrary,
sources=['atoi.rl'])
task = self.create_task(self.context(target_roots=[target]))
target_workdir = safe_mkdtemp(dir=self.test_workdir)
task.execute_codegen(target, target_workdir)
generated_files = []
for root, _, files in os.walk(target_workdir):
generated_files.extend(os.path.relpath(os.path.join(root, f), target_workdir) for f in files)
self.assertEqual(['com/example/atoi/Parser.java'], generated_files)
def test_smoke(self):
with temporary_file() as fp:
fp.write(ragel_file_contents)
fp.flush()
self.assertEquals(calculate_genfile(fp.name), 'com/example/atoi/Parser.java')
| apache-2.0 | -7,795,438,957,769,122,000 | 2,716,586,105,773,098,000 | 26.423529 | 99 | 0.662806 | false |
yinchunlong/abelkhan-1 | ext/c++/thirdpart/c++/boost/tools/build/src/build/feature.py | 11 | 33759 | # Status: ported, except for unit tests.
# Base revision: 64488
#
# Copyright 2001, 2002, 2003 Dave Abrahams
# Copyright 2002, 2006 Rene Rivera
# Copyright 2002, 2003, 2004, 2005, 2006 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
import re
from b2.util import utility, bjam_signature, is_iterable_typed
import b2.util.set
from b2.util.utility import add_grist, get_grist, ungrist, replace_grist, to_seq
from b2.exceptions import *
__re_split_subfeatures = re.compile ('<(.*):(.*)>')
__re_no_hyphen = re.compile ('^([^:]+)$')
__re_slash_or_backslash = re.compile (r'[\\/]')
class Feature(object):
# Map from string attribute names to integers bit flags.
# This will be initialized after declaration of the class.
_attribute_name_to_integer = {}
def __init__(self, name, values, attributes):
assert isinstance(name, basestring)
assert is_iterable_typed(values, basestring)
assert is_iterable_typed(attributes, basestring)
self._name = name
self._values = values
self._default = None
self._attributes = 0
for a in attributes:
self._attributes = self._attributes | Feature._attribute_name_to_integer[a]
self._attributes_string_list = attributes
self._subfeatures = []
self._parent = None
def name(self):
return self._name
def values(self):
return self._values
def add_values(self, values):
assert is_iterable_typed(values, basestring)
self._values.extend(values)
def attributes(self):
return self._attributes
def set_default(self, value):
assert isinstance(value, basestring)
for attr in ('free', 'optional'):
if getattr(self, attr)():
get_manager().errors()('"{}" feature "<{}>" cannot have a default value.'
.format(attr, self._name))
self._default = value
def default(self):
return self._default
# FIXME: remove when we fully move to using classes for features/properties
def attributes_string_list(self):
return self._attributes_string_list
def subfeatures(self):
return self._subfeatures
def add_subfeature(self, name):
assert isinstance(name, Feature)
self._subfeatures.append(name)
def parent(self):
"""For subfeatures, return pair of (parent_feature, value).
Value may be None if this subfeature is not specific to any
value of the parent feature.
"""
return self._parent
def set_parent(self, feature, value):
assert isinstance(feature, Feature)
assert isinstance(value, basestring)
self._parent = (feature, value)
def __str__(self):
return self._name
def reset ():
""" Clear the module state. This is mainly for testing purposes.
"""
global __all_attributes, __all_features, __implicit_features, __composite_properties
global __features_with_attributes, __subfeature_from_value, __all_top_features, __free_features
global __all_subfeatures
# The list with all attribute names.
__all_attributes = [ 'implicit',
'composite',
'optional',
'symmetric',
'free',
'incidental',
'path',
'dependency',
'propagated',
'link-incompatible',
'subfeature',
'order-sensitive'
]
i = 1
for a in __all_attributes:
setattr(Feature, a.upper(), i)
Feature._attribute_name_to_integer[a] = i
def probe(self, flag=i):
return getattr(self, "_attributes") & flag
setattr(Feature, a.replace("-", "_"), probe)
i = i << 1
# A map containing all features. The key is the feature name.
# The value is an instance of Feature class.
__all_features = {}
# All non-subfeatures.
__all_top_features = []
# Maps valus to the corresponding implicit feature
__implicit_features = {}
# A map containing all composite properties. The key is a Property instance,
# and the value is a list of Property instances
__composite_properties = {}
__features_with_attributes = {}
for attribute in __all_attributes:
__features_with_attributes [attribute] = []
# Maps a value to the corresponding subfeature name.
__subfeature_from_value = {}
# All free features
__free_features = []
__all_subfeatures = []
reset ()
def enumerate ():
""" Returns an iterator to the features map.
"""
return __all_features.iteritems ()
def get(name):
"""Return the Feature instance for the specified name.
Throws if no feature by such name exists
"""
assert isinstance(name, basestring)
return __all_features[name]
# FIXME: prepare-test/finish-test?
@bjam_signature((["name"], ["values", "*"], ["attributes", "*"]))
def feature (name, values, attributes = []):
""" Declares a new feature with the given name, values, and attributes.
name: the feature name
values: a sequence of the allowable values - may be extended later with feature.extend
attributes: a sequence of the feature's attributes (e.g. implicit, free, propagated, ...)
"""
__validate_feature_attributes (name, attributes)
feature = Feature(name, [], attributes)
__all_features[name] = feature
# Temporary measure while we have not fully moved from 'gristed strings'
__all_features["<" + name + ">"] = feature
for attribute in attributes:
__features_with_attributes [attribute].append (name)
name = add_grist(name)
if 'subfeature' in attributes:
__all_subfeatures.append(name)
else:
__all_top_features.append(feature)
extend (name, values)
# FIXME: why his is needed.
if 'free' in attributes:
__free_features.append (name)
return feature
@bjam_signature((["feature"], ["value"]))
def set_default (feature, value):
""" Sets the default value of the given feature, overriding any previous default.
feature: the name of the feature
value: the default value to assign
"""
f = __all_features[feature]
attributes = f.attributes()
bad_attribute = None
if attributes & Feature.FREE:
bad_attribute = "free"
elif attributes & Feature.OPTIONAL:
bad_attribute = "optional"
if bad_attribute:
raise InvalidValue ("%s property %s cannot have a default" % (bad_attribute, feature.name()))
if not value in f.values():
raise InvalidValue ("The specified default value, '%s' is invalid.\n" % value + "allowed values are: %s" % f.values())
f.set_default(value)
def defaults(features):
""" Returns the default property values for the given features.
"""
assert is_iterable_typed(features, Feature)
# FIXME: should merge feature and property modules.
from . import property
result = []
for f in features:
if not f.free() and not f.optional() and f.default():
result.append(property.Property(f, f.default()))
return result
def valid (names):
""" Returns true iff all elements of names are valid features.
"""
if isinstance(names, str):
names = [names]
assert is_iterable_typed(names, basestring)
return all(name in __all_features for name in names)
def attributes (feature):
""" Returns the attributes of the given feature.
"""
assert isinstance(feature, basestring)
return __all_features[feature].attributes_string_list()
def values (feature):
""" Return the values of the given feature.
"""
assert isinstance(feature, basestring)
validate_feature (feature)
return __all_features[feature].values()
def is_implicit_value (value_string):
""" Returns true iff 'value_string' is a value_string
of an implicit feature.
"""
assert isinstance(value_string, basestring)
if __implicit_features.has_key(value_string):
return __implicit_features[value_string]
v = value_string.split('-')
if not __implicit_features.has_key(v[0]):
return False
feature = __implicit_features[v[0]]
for subvalue in (v[1:]):
if not __find_implied_subfeature(feature, subvalue, v[0]):
return False
return True
def implied_feature (implicit_value):
""" Returns the implicit feature associated with the given implicit value.
"""
assert isinstance(implicit_value, basestring)
components = implicit_value.split('-')
if not __implicit_features.has_key(components[0]):
raise InvalidValue ("'%s' is not a value of an implicit feature" % implicit_value)
return __implicit_features[components[0]]
def __find_implied_subfeature (feature, subvalue, value_string):
assert isinstance(feature, Feature)
assert isinstance(subvalue, basestring)
assert isinstance(value_string, basestring)
try:
return __subfeature_from_value[feature][value_string][subvalue]
except KeyError:
return None
# Given a feature and a value of one of its subfeatures, find the name
# of the subfeature. If value-string is supplied, looks for implied
# subfeatures that are specific to that value of feature
# feature # The main feature name
# subvalue # The value of one of its subfeatures
# value-string # The value of the main feature
def implied_subfeature (feature, subvalue, value_string):
assert isinstance(feature, Feature)
assert isinstance(subvalue, basestring)
assert isinstance(value_string, basestring)
result = __find_implied_subfeature (feature, subvalue, value_string)
if not result:
raise InvalidValue ("'%s' is not a known subfeature value of '%s%s'" % (subvalue, feature, value_string))
return result
def validate_feature (name):
""" Checks if all name is a valid feature. Otherwise, raises an exception.
"""
assert isinstance(name, basestring)
if not __all_features.has_key(name):
raise InvalidFeature ("'%s' is not a valid feature name" % name)
else:
return __all_features[name]
# Uses Property
def __expand_subfeatures_aux (property_, dont_validate = False):
""" Helper for expand_subfeatures.
Given a feature and value, or just a value corresponding to an
implicit feature, returns a property set consisting of all component
subfeatures and their values. For example:
expand_subfeatures <toolset>gcc-2.95.2-linux-x86
-> <toolset>gcc <toolset-version>2.95.2 <toolset-os>linux <toolset-cpu>x86
equivalent to:
expand_subfeatures gcc-2.95.2-linux-x86
feature: The name of the feature, or empty if value corresponds to an implicit property
value: The value of the feature.
dont_validate: If True, no validation of value string will be done.
"""
from . import property # no __debug__ since Property is used elsewhere
assert isinstance(property_, property.Property)
assert isinstance(dont_validate, int) # matches bools
f = property_.feature()
v = property_.value()
if not dont_validate:
validate_value_string(f, v)
components = v.split ("-")
v = components[0]
result = [property.Property(f, components[0])]
subvalues = components[1:]
while len(subvalues) > 0:
subvalue = subvalues [0] # pop the head off of subvalues
subvalues = subvalues [1:]
subfeature = __find_implied_subfeature (f, subvalue, v)
# If no subfeature was found, reconstitute the value string and use that
if not subfeature:
return [property.Property(f, '-'.join(components))]
result.append(property.Property(subfeature, subvalue))
return result
def expand_subfeatures(properties, dont_validate = False):
"""
Make all elements of properties corresponding to implicit features
explicit, and express all subfeature values as separate properties
in their own right. For example, the property
gcc-2.95.2-linux-x86
might expand to
<toolset>gcc <toolset-version>2.95.2 <toolset-os>linux <toolset-cpu>x86
properties: A sequence with elements of the form
<feature>value-string or just value-string in the
case of implicit features.
: dont_validate: If True, no validation of value string will be done.
"""
if __debug__:
from .property import Property
assert is_iterable_typed(properties, Property)
assert isinstance(dont_validate, int) # matches bools
result = []
for p in properties:
# Don't expand subfeatures in subfeatures
if p.feature().subfeature():
result.append (p)
else:
result.extend(__expand_subfeatures_aux (p, dont_validate))
return result
# rule extend was defined as below:
# Can be called three ways:
#
# 1. extend feature : values *
# 2. extend <feature> subfeature : values *
# 3. extend <feature>value-string subfeature : values *
#
# * Form 1 adds the given values to the given feature
# * Forms 2 and 3 add subfeature values to the given feature
# * Form 3 adds the subfeature values as specific to the given
# property value-string.
#
#rule extend ( feature-or-property subfeature ? : values * )
#
# Now, the specific rule must be called, depending on the desired operation:
# extend_feature
# extend_subfeature
def extend (name, values):
""" Adds the given values to the given feature.
"""
assert isinstance(name, basestring)
assert is_iterable_typed(values, basestring)
name = add_grist (name)
__validate_feature (name)
feature = __all_features [name]
if feature.implicit():
for v in values:
if __implicit_features.has_key(v):
raise BaseException ("'%s' is already associated with the feature '%s'" % (v, __implicit_features [v]))
__implicit_features[v] = feature
if values and not feature.values() and not(feature.free() or feature.optional()):
# This is the first value specified for this feature,
# take it as default value
feature.set_default(values[0])
feature.add_values(values)
def validate_value_string (f, value_string):
""" Checks that value-string is a valid value-string for the given feature.
"""
assert isinstance(f, Feature)
assert isinstance(value_string, basestring)
if f.free() or value_string in f.values():
return
values = [value_string]
if f.subfeatures():
if not value_string in f.values() and \
not value_string in f.subfeatures():
values = value_string.split('-')
# An empty value is allowed for optional features
if not values[0] in f.values() and \
(values[0] or not f.optional()):
raise InvalidValue ("'%s' is not a known value of feature '%s'\nlegal values: '%s'" % (values [0], f.name(), f.values()))
for v in values [1:]:
# this will validate any subfeature values in value-string
implied_subfeature(f, v, values[0])
""" Extends the given subfeature with the subvalues. If the optional
value-string is provided, the subvalues are only valid for the given
value of the feature. Thus, you could say that
<target-platform>mingw is specifc to <toolset>gcc-2.95.2 as follows:
extend-subfeature toolset gcc-2.95.2 : target-platform : mingw ;
feature: The feature whose subfeature is being extended.
value-string: If supplied, specifies a specific value of the
main feature for which the new subfeature values
are valid.
subfeature: The name of the subfeature.
subvalues: The additional values of the subfeature being defined.
"""
def extend_subfeature (feature_name, value_string, subfeature_name, subvalues):
assert isinstance(feature_name, basestring)
assert isinstance(value_string, basestring)
assert isinstance(subfeature_name, basestring)
assert is_iterable_typed(subvalues, basestring)
feature = validate_feature(feature_name)
if value_string:
validate_value_string(feature, value_string)
subfeature_name = feature_name + '-' + __get_subfeature_name (subfeature_name, value_string)
extend(subfeature_name, subvalues) ;
subfeature = __all_features[subfeature_name]
if value_string == None: value_string = ''
if not __subfeature_from_value.has_key(feature):
__subfeature_from_value [feature] = {}
if not __subfeature_from_value[feature].has_key(value_string):
__subfeature_from_value [feature][value_string] = {}
for subvalue in subvalues:
__subfeature_from_value [feature][value_string][subvalue] = subfeature
@bjam_signature((["feature_name", "value_string", "?"], ["subfeature"],
["subvalues", "*"], ["attributes", "*"]))
def subfeature (feature_name, value_string, subfeature, subvalues, attributes = []):
""" Declares a subfeature.
feature_name: Root feature that is not a subfeature.
value_string: An optional value-string specifying which feature or
subfeature values this subfeature is specific to,
if any.
subfeature: The name of the subfeature being declared.
subvalues: The allowed values of this subfeature.
attributes: The attributes of the subfeature.
"""
parent_feature = validate_feature (feature_name)
# Add grist to the subfeature name if a value-string was supplied
subfeature_name = __get_subfeature_name (subfeature, value_string)
if subfeature_name in __all_features[feature_name].subfeatures():
message = "'%s' already declared as a subfeature of '%s'" % (subfeature, feature_name)
message += " specific to '%s'" % value_string
raise BaseException (message)
# First declare the subfeature as a feature in its own right
f = feature (feature_name + '-' + subfeature_name, subvalues, attributes + ['subfeature'])
f.set_parent(parent_feature, value_string)
parent_feature.add_subfeature(f)
# Now make sure the subfeature values are known.
extend_subfeature (feature_name, value_string, subfeature, subvalues)
@bjam_signature((["composite_property_s"], ["component_properties_s", "*"]))
def compose (composite_property_s, component_properties_s):
""" Sets the components of the given composite property.
All parameters are <feature>value strings
"""
from . import property
component_properties_s = to_seq (component_properties_s)
composite_property = property.create_from_string(composite_property_s)
f = composite_property.feature()
if len(component_properties_s) > 0 and isinstance(component_properties_s[0], property.Property):
component_properties = component_properties_s
else:
component_properties = [property.create_from_string(p) for p in component_properties_s]
if not f.composite():
raise BaseException ("'%s' is not a composite feature" % f)
if __composite_properties.has_key(property):
raise BaseException ('components of "%s" already set: %s' % (composite_property, str (__composite_properties[composite_property])))
if composite_property in component_properties:
raise BaseException ('composite property "%s" cannot have itself as a component' % composite_property)
__composite_properties[composite_property] = component_properties
def expand_composite(property_):
if __debug__:
from .property import Property
assert isinstance(property_, Property)
result = [ property_ ]
if __composite_properties.has_key(property_):
for p in __composite_properties[property_]:
result.extend(expand_composite(p))
return result
@bjam_signature((['feature'], ['properties', '*']))
def get_values (feature, properties):
""" Returns all values of the given feature specified by the given property set.
"""
if feature[0] != '<':
feature = '<' + feature + '>'
result = []
for p in properties:
if get_grist (p) == feature:
result.append (replace_grist (p, ''))
return result
def free_features ():
""" Returns all free features.
"""
return __free_features
def expand_composites (properties):
""" Expand all composite properties in the set so that all components
are explicitly expressed.
"""
if __debug__:
from .property import Property
assert is_iterable_typed(properties, Property)
explicit_features = set(p.feature() for p in properties)
result = []
# now expand composite features
for p in properties:
expanded = expand_composite(p)
for x in expanded:
if not x in result:
f = x.feature()
if f.free():
result.append (x)
elif not x in properties: # x is the result of expansion
if not f in explicit_features: # not explicitly-specified
if any(r.feature() == f for r in result):
raise FeatureConflict(
"expansions of composite features result in "
"conflicting values for '%s'\nvalues: '%s'\none contributing composite property was '%s'" %
(f.name(), [r.value() for r in result if r.feature() == f] + [x.value()], p))
else:
result.append (x)
elif any(r.feature() == f for r in result):
raise FeatureConflict ("explicitly-specified values of non-free feature '%s' conflict\n"
"existing values: '%s'\nvalue from expanding '%s': '%s'" % (f,
[r.value() for r in result if r.feature() == f], p, x.value()))
else:
result.append (x)
return result
# Uses Property
def is_subfeature_of (parent_property, f):
""" Return true iff f is an ordinary subfeature of the parent_property's
feature, or if f is a subfeature of the parent_property's feature
specific to the parent_property's value.
"""
if __debug__:
from .property import Property
assert isinstance(parent_property, Property)
assert isinstance(f, Feature)
if not f.subfeature():
return False
p = f.parent()
if not p:
return False
parent_feature = p[0]
parent_value = p[1]
if parent_feature != parent_property.feature():
return False
if parent_value and parent_value != parent_property.value():
return False
return True
def __is_subproperty_of (parent_property, p):
""" As is_subfeature_of, for subproperties.
"""
if __debug__:
from .property import Property
assert isinstance(parent_property, Property)
assert isinstance(p, Property)
return is_subfeature_of (parent_property, p.feature())
# Returns true iff the subvalue is valid for the feature. When the
# optional value-string is provided, returns true iff the subvalues
# are valid for the given value of the feature.
def is_subvalue(feature, value_string, subfeature, subvalue):
assert isinstance(feature, basestring)
assert isinstance(value_string, basestring)
assert isinstance(subfeature, basestring)
assert isinstance(subvalue, basestring)
if not value_string:
value_string = ''
try:
return __subfeature_from_value[feature][value_string][subvalue] == subfeature
except KeyError:
return False
# Uses Property
def expand (properties):
""" Given a property set which may consist of composite and implicit
properties and combined subfeature values, returns an expanded,
normalized property set with all implicit features expressed
explicitly, all subfeature values individually expressed, and all
components of composite properties expanded. Non-free features
directly expressed in the input properties cause any values of
those features due to composite feature expansion to be dropped. If
two values of a given non-free feature are directly expressed in the
input, an error is issued.
"""
if __debug__:
from .property import Property
assert is_iterable_typed(properties, Property)
expanded = expand_subfeatures(properties)
return expand_composites (expanded)
# Accepts list of Property objects
def add_defaults (properties):
""" Given a set of properties, add default values for features not
represented in the set.
Note: if there's there's ordinary feature F1 and composite feature
F2, which includes some value for F1, and both feature have default values,
then the default value of F1 will be added, not the value in F2. This might
not be right idea: consider
feature variant : debug ... ;
<variant>debug : .... <runtime-debugging>on
feature <runtime-debugging> : off on ;
Here, when adding default for an empty property set, we'll get
<variant>debug <runtime_debugging>off
and that's kind of strange.
"""
if __debug__:
from .property import Property
assert is_iterable_typed(properties, Property)
result = [x for x in properties]
handled_features = set()
for p in properties:
# We don't add default for conditional properties. We don't want
# <variant>debug:<define>DEBUG to be takes as specified value for <variant>
if not p.condition():
handled_features.add(p.feature())
missing_top = [f for f in __all_top_features if not f in handled_features]
more = defaults(missing_top)
result.extend(more)
for p in more:
handled_features.add(p.feature())
# Add defaults for subfeatures of features which are present
for p in result[:]:
s = p.feature().subfeatures()
more = defaults([s for s in p.feature().subfeatures() if not s in handled_features])
for p in more:
handled_features.add(p.feature())
result.extend(more)
return result
def minimize (properties):
""" Given an expanded property set, eliminate all redundancy: properties
which are elements of other (composite) properties in the set will
be eliminated. Non-symmetric properties equal to default values will be
eliminated, unless the override a value from some composite property.
Implicit properties will be expressed without feature
grist, and sub-property values will be expressed as elements joined
to the corresponding main property.
"""
if __debug__:
from .property import Property
assert is_iterable_typed(properties, Property)
# remove properties implied by composite features
components = []
for property in properties:
if __composite_properties.has_key (property):
components.extend(__composite_properties[property])
properties = b2.util.set.difference (properties, components)
# handle subfeatures and implicit features
# move subfeatures to the end of the list
properties = [p for p in properties if not p.feature().subfeature()] +\
[p for p in properties if p.feature().subfeature()]
result = []
while properties:
p = properties[0]
f = p.feature()
# locate all subproperties of $(x[1]) in the property set
subproperties = __select_subproperties (p, properties)
if subproperties:
# reconstitute the joined property name
subproperties.sort ()
joined = b2.build.property.Property(p.feature(), p.value() + '-' + '-'.join ([sp.value() for sp in subproperties]))
result.append(joined)
properties = b2.util.set.difference(properties[1:], subproperties)
else:
# eliminate properties whose value is equal to feature's
# default and which are not symmetric and which do not
# contradict values implied by composite properties.
# since all component properties of composites in the set
# have been eliminated, any remaining property whose
# feature is the same as a component of a composite in the
# set must have a non-redundant value.
if p.value() != f.default() or f.symmetric():
result.append (p)
#\
#or get_grist (fullp) in get_grist (components):
# FIXME: restore above
properties = properties[1:]
return result
def split (properties):
""" Given a property-set of the form
v1/v2/...vN-1/<fN>vN/<fN+1>vN+1/...<fM>vM
Returns
v1 v2 ... vN-1 <fN>vN <fN+1>vN+1 ... <fM>vM
Note that vN...vM may contain slashes. This is resilient to the
substitution of backslashes for slashes, since Jam, unbidden,
sometimes swaps slash direction on NT.
"""
assert isinstance(properties, basestring)
def split_one (properties):
pieces = re.split (__re_slash_or_backslash, properties)
result = []
for x in pieces:
if not get_grist (x) and len (result) > 0 and get_grist (result [-1]):
result = result [0:-1] + [ result [-1] + '/' + x ]
else:
result.append (x)
return result
if isinstance (properties, str):
return split_one (properties)
result = []
for p in properties:
result += split_one (p)
return result
def compress_subproperties (properties):
""" Combine all subproperties into their parent properties
Requires: for every subproperty, there is a parent property. All
features are explicitly expressed.
This rule probably shouldn't be needed, but
build-request.expand-no-defaults is being abused for unintended
purposes and it needs help
"""
from .property import Property
assert is_iterable_typed(properties, Property)
result = []
matched_subs = set()
all_subs = set()
for p in properties:
f = p.feature()
if not f.subfeature():
subs = __select_subproperties (p, properties)
if subs:
matched_subs.update(subs)
subvalues = '-'.join (sub.value() for sub in subs)
result.append(Property(
p.feature(), p.value() + '-' + subvalues,
p.condition()))
else:
result.append(p)
else:
all_subs.add(p)
# TODO: this variables are used just for debugging. What's the overhead?
assert all_subs == matched_subs
return result
######################################################################################
# Private methods
def __select_subproperties (parent_property, properties):
if __debug__:
from .property import Property
assert is_iterable_typed(properties, Property)
assert isinstance(parent_property, Property)
return [ x for x in properties if __is_subproperty_of (parent_property, x) ]
def __get_subfeature_name (subfeature, value_string):
assert isinstance(subfeature, basestring)
assert isinstance(value_string, basestring) or value_string is None
if value_string == None:
prefix = ''
else:
prefix = value_string + ':'
return prefix + subfeature
def __validate_feature_attributes (name, attributes):
assert isinstance(name, basestring)
assert is_iterable_typed(attributes, basestring)
for attribute in attributes:
if not attribute in __all_attributes:
raise InvalidAttribute ("unknown attributes: '%s' in feature declaration: '%s'" % (str (b2.util.set.difference (attributes, __all_attributes)), name))
if name in __all_features:
raise AlreadyDefined ("feature '%s' already defined" % name)
elif 'implicit' in attributes and 'free' in attributes:
raise InvalidAttribute ("free features cannot also be implicit (in declaration of feature '%s')" % name)
elif 'free' in attributes and 'propagated' in attributes:
raise InvalidAttribute ("free features cannot also be propagated (in declaration of feature '%s')" % name)
def __validate_feature (feature):
""" Generates an error if the feature is unknown.
"""
assert isinstance(feature, basestring)
if not __all_features.has_key (feature):
raise BaseException ('unknown feature "%s"' % feature)
def __select_subfeatures (parent_property, features):
""" Given a property, return the subset of features consisting of all
ordinary subfeatures of the property's feature, and all specific
subfeatures of the property's feature which are conditional on the
property's value.
"""
if __debug__:
from .property import Property
assert isinstance(parent_property, Property)
assert is_iterable_typed(features, Feature)
return [f for f in features if is_subfeature_of (parent_property, f)]
# FIXME: copy over tests.
| mit | -3,063,188,213,507,328,500 | 7,409,000,144,820,318,000 | 34.386792 | 162 | 0.635534 | false |
rockfruit/bika.lims | bika/lims/browser/analysisrequest/results_not_requested.py | 1 | 2747 | # This file is part of Bika LIMS
#
# Copyright 2011-2016 by it's authors.
# Some rights reserved. See LICENSE.txt, AUTHORS.txt.
from AccessControl import getSecurityManager
from bika.lims import bikaMessageFactory as _
from bika.lims.utils import t
from bika.lims.permissions import *
from bika.lims.browser.analysisrequest import AnalysisRequestManageResultsView
from bika.lims.content.analysisrequest import schema as AnalysisRequestSchema
from bika.lims.utils import to_utf8
from bika.lims.workflow import doActionFor
from plone.app.layout.globals.interfaces import IViewView
from DateTime import DateTime
from Products.Archetypes import PloneMessageFactory as PMF
from Products.CMFCore.utils import getToolByName
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from zope.interface import implements
import plone
class AnalysisRequestResultsNotRequestedView(AnalysisRequestManageResultsView):
implements(IViewView)
template = ViewPageTemplateFile("templates/analysisrequest_analyses_not_requested.pt")
def __call__(self):
ar = self.context
workflow = getToolByName(ar, 'portal_workflow')
# If is a retracted AR, show the link to child AR and show a warn msg
if workflow.getInfoFor(ar, 'review_state') == 'invalid':
childar = hasattr(ar, 'getChildAnalysisRequest') \
and ar.getChildAnalysisRequest() or None
childid = childar and childar.getRequestID() or None
message = _('This Analysis Request has been withdrawn and is shown '
'for trace-ability purposes only. Retest: ${retest_child_id}.',
mapping={"retest_child_id":childid if childid else ''})
self.context.plone_utils.addPortalMessage(message, 'warning')
# If is an AR automatically generated due to a Retraction, show it's
# parent AR information
if hasattr(ar, 'getParentAnalysisRequest') \
and ar.getParentAnalysisRequest():
par = ar.getParentAnalysisRequest()
message = _(
'This Analysis Request has been generated automatically due to '
'the retraction of the Analysis Request ${retracted_request_id}.',
mapping={"retracted_request_id": par.getRequestID()})
self.context.plone_utils.addPortalMessage(message, 'info')
can_do = getSecurityManager().checkPermission(ResultsNotRequested, ar)
if workflow.getInfoFor(ar, 'cancellation_state') == "cancelled":
self.request.response.redirect(ar.absolute_url())
elif not(can_do):
self.request.response.redirect(ar.absolute_url())
else:
return self.template()
| agpl-3.0 | 411,061,567,741,681,540 | 4,704,386,051,509,816,000 | 46.362069 | 90 | 0.699672 | false |
qnib/QNIBCollect | src/diamond/collectors/traceroute/traceroute.py | 7 | 3878 | # coding=utf-8
"""
Collect icmp round trip times per hop
#### Dependencies
* libparistraceroute1 (as paris-traceroute)
"""
import re
import diamond.collector
from subprocess import Popen, PIPE
class TracerouteCollector(diamond.collector.ProcessCollector):
def get_default_config_help(self):
config_help = super(TracerouteCollector, self).get_default_config_help()
config_help.update({
'bin': "The path to the tracerouting library.",
'destport': "The target port number",
'hosts': "Hosts to run the traceroute command on",
'protocol': "The protocol to use for the traceroute pings (icmp, udp, tcp)",
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(TracerouteCollector, self).get_default_config()
config.update({
'path': 'traceroute',
'hosts': { "yelp":"yelp.com" },
'protocol': 'icmp',
})
return config
def collect(self):
protocol_args = self._protocol_config()
if not protocol_args:
self.log.error(
"Please specify a protocol for the traceroute,\n"
+ " options (icmp, tcp, udp)"
)
return None
for pseudo_hostname, address in self.config.get('hosts', {}).iteritems():
metric_name = '.'.join([
pseudo_hostname,
'RoundTripTime',
])
if 'bin' not in self.config:
self.log.error(
"Please specify the path of the canonical binary"
)
return None
cmd = [self.config['bin'], '-nq1', '-w1', protocol_args, address]
try:
process = Popen(cmd, stdout=PIPE, stderr=PIPE)
errors = process.stderr.readline()
if errors:
self.log.error(
"Error running traceroute process: {0!s}".format(errors)
)
continue
while True:
line = process.stdout.readline()
if not line:
break
# A hop contains:
# hop, ip, rtt
# in that order.
hop_data = line.split()
if not hop_data or len(hop_data) not in [2, 3]:
continue
hop_number = ip = None
rtt = 0
try:
[hop_number, ip, rtt_ms] = hop_data
rtt = re.match('([0-9\.]+)ms', rtt_ms).group(1)
except ValueError as e:
[hop_number, ip] = hop_data
if hop_number is None or ip is None:
continue
rtt = float(rtt)
self.dimensions = {
'hop': hop_number,
}
if '*' not in ip:
self.dimensions['ip'] = ip
self.publish(metric_name, rtt)
except Exception as e:
self.log.error(
"Error running TracerouteCollector: {0!s}".format(e)
)
continue
def _protocol_config(self):
protocol = self.config['protocol'].lower()
destport = self.config.get('destport', 80)
if protocol == 'udp':
protocol_args = '-U'
elif protocol == 'tcp':
protocol_args = '-Tp{0!s}'.format(destport)
elif protocol == 'icmp':
protocol_args = '-I'
else:
return None
return protocol_args
| apache-2.0 | -984,879,379,596,667,800 | -5,349,473,949,366,078,000 | 29.062016 | 92 | 0.462867 | false |
ArcherSys/ArcherSys | Lib/site-packages/pygments/lexers/modula2.py | 23 | 52564 | # -*- coding: utf-8 -*-
"""
pygments.lexers.modula2
~~~~~~~~~~~~~~~~~~~~~~~
Multi-Dialect Lexer for Modula-2.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include
from pygments.util import get_bool_opt, get_list_opt
from pygments.token import Text, Comment, Operator, Keyword, Name, \
String, Number, Punctuation, Error
__all__ = ['Modula2Lexer']
# Multi-Dialect Modula-2 Lexer
class Modula2Lexer(RegexLexer):
"""
For `Modula-2 <http://www.modula2.org/>`_ source code.
The Modula-2 lexer supports several dialects. By default, it operates in
fallback mode, recognising the *combined* literals, punctuation symbols
and operators of all supported dialects, and the *combined* reserved words
and builtins of PIM Modula-2, ISO Modula-2 and Modula-2 R10, while not
differentiating between library defined identifiers.
To select a specific dialect, a dialect option may be passed
or a dialect tag may be embedded into a source file.
Dialect Options:
`m2pim`
Select PIM Modula-2 dialect.
`m2iso`
Select ISO Modula-2 dialect.
`m2r10`
Select Modula-2 R10 dialect.
`objm2`
Select Objective Modula-2 dialect.
The PIM and ISO dialect options may be qualified with a language extension.
Language Extensions:
`+aglet`
Select Aglet Modula-2 extensions, available with m2iso.
`+gm2`
Select GNU Modula-2 extensions, available with m2pim.
`+p1`
Select p1 Modula-2 extensions, available with m2iso.
`+xds`
Select XDS Modula-2 extensions, available with m2iso.
Passing a Dialect Option via Unix Commandline Interface
Dialect options may be passed to the lexer using the `dialect` key.
Only one such option should be passed. If multiple dialect options are
passed, the first valid option is used, any subsequent options are ignored.
Examples:
`$ pygmentize -O full,dialect=m2iso -f html -o /path/to/output /path/to/input`
Use ISO dialect to render input to HTML output
`$ pygmentize -O full,dialect=m2iso+p1 -f rtf -o /path/to/output /path/to/input`
Use ISO dialect with p1 extensions to render input to RTF output
Embedding a Dialect Option within a source file
A dialect option may be embedded in a source file in form of a dialect
tag, a specially formatted comment that specifies a dialect option.
Dialect Tag EBNF::
dialectTag :
OpeningCommentDelim Prefix dialectOption ClosingCommentDelim ;
dialectOption :
'm2pim' | 'm2iso' | 'm2r10' | 'objm2' |
'm2iso+aglet' | 'm2pim+gm2' | 'm2iso+p1' | 'm2iso+xds' ;
Prefix : '!' ;
OpeningCommentDelim : '(*' ;
ClosingCommentDelim : '*)' ;
No whitespace is permitted between the tokens of a dialect tag.
In the event that a source file contains multiple dialect tags, the first
tag that contains a valid dialect option will be used and any subsequent
dialect tags will be ignored. Ideally, a dialect tag should be placed
at the beginning of a source file.
An embedded dialect tag overrides a dialect option set via command line.
Examples:
``(*!m2r10*) DEFINITION MODULE Foobar; ...``
Use Modula2 R10 dialect to render this source file.
``(*!m2pim+gm2*) DEFINITION MODULE Bazbam; ...``
Use PIM dialect with GNU extensions to render this source file.
Algol Publication Mode:
In Algol publication mode, source text is rendered for publication of
algorithms in scientific papers and academic texts, following the format
of the Revised Algol-60 Language Report. It is activated by passing
one of two corresponding styles as an option:
`algol`
render reserved words lowercase underline boldface
and builtins lowercase boldface italic
`algol_nu`
render reserved words lowercase boldface (no underlining)
and builtins lowercase boldface italic
The lexer automatically performs the required lowercase conversion when
this mode is activated.
Example:
``$ pygmentize -O full,style=algol -f latex -o /path/to/output /path/to/input``
Render input file in Algol publication mode to LaTeX output.
Rendering Mode of First Class ADT Identifiers:
The rendering of standard library first class ADT identifiers is controlled
by option flag "treat_stdlib_adts_as_builtins".
When this option is turned on, standard library ADT identifiers are rendered
as builtins. When it is turned off, they are rendered as ordinary library
identifiers.
`treat_stdlib_adts_as_builtins` (default: On)
The option is useful for dialects that support ADTs as first class objects
and provide ADTs in the standard library that would otherwise be built-in.
At present, only Modula-2 R10 supports library ADTs as first class objects
and therefore, no ADT identifiers are defined for any other dialects.
Example:
``$ pygmentize -O full,dialect=m2r10,treat_stdlib_adts_as_builtins=Off ...``
Render standard library ADTs as ordinary library types.
.. versionadded:: 1.3
.. versionchanged:: 2.1
Added multi-dialect support.
"""
name = 'Modula-2'
aliases = ['modula2', 'm2']
filenames = ['*.def', '*.mod']
mimetypes = ['text/x-modula2']
flags = re.MULTILINE | re.DOTALL
tokens = {
'whitespace': [
(r'\n+', Text), # blank lines
(r'\s+', Text), # whitespace
],
'dialecttags': [
# PIM Dialect Tag
(r'\(\*!m2pim\*\)', Comment.Special),
# ISO Dialect Tag
(r'\(\*!m2iso\*\)', Comment.Special),
# M2R10 Dialect Tag
(r'\(\*!m2r10\*\)', Comment.Special),
# ObjM2 Dialect Tag
(r'\(\*!objm2\*\)', Comment.Special),
# Aglet Extensions Dialect Tag
(r'\(\*!m2iso\+aglet\*\)', Comment.Special),
# GNU Extensions Dialect Tag
(r'\(\*!m2pim\+gm2\*\)', Comment.Special),
# p1 Extensions Dialect Tag
(r'\(\*!m2iso\+p1\*\)', Comment.Special),
# XDS Extensions Dialect Tag
(r'\(\*!m2iso\+xds\*\)', Comment.Special),
],
'identifiers': [
(r'([a-zA-Z_$][\w$]*)', Name),
],
'prefixed_number_literals': [
#
# Base-2, whole number
(r'0b[01]+(\'[01]+)*', Number.Bin),
#
# Base-16, whole number
(r'0[ux][0-9A-F]+(\'[0-9A-F]+)*', Number.Hex),
],
'plain_number_literals': [
#
# Base-10, real number with exponent
(r'[0-9]+(\'[0-9]+)*' # integral part
r'\.[0-9]+(\'[0-9]+)*' # fractional part
r'[eE][+-]?[0-9]+(\'[0-9]+)*', # exponent
Number.Float),
#
# Base-10, real number without exponent
(r'[0-9]+(\'[0-9]+)*' # integral part
r'\.[0-9]+(\'[0-9]+)*', # fractional part
Number.Float),
#
# Base-10, whole number
(r'[0-9]+(\'[0-9]+)*', Number.Integer),
],
'suffixed_number_literals': [
#
# Base-8, whole number
(r'[0-7]+B', Number.Oct),
#
# Base-8, character code
(r'[0-7]+C', Number.Oct),
#
# Base-16, number
(r'[0-9A-F]+H', Number.Hex),
],
'string_literals': [
(r"'(\\\\|\\'|[^'])*'", String), # single quoted string
(r'"(\\\\|\\"|[^"])*"', String), # double quoted string
],
'digraph_operators': [
# Dot Product Operator
(r'\*\.', Operator),
# Array Concatenation Operator
(r'\+>', Operator), # M2R10 + ObjM2
# Inequality Operator
(r'<>', Operator), # ISO + PIM
# Less-Or-Equal, Subset
(r'<=', Operator),
# Greater-Or-Equal, Superset
(r'>=', Operator),
# Identity Operator
(r'==', Operator), # M2R10 + ObjM2
# Type Conversion Operator
(r'::', Operator), # M2R10 + ObjM2
# Assignment Symbol
(r':=', Operator),
# Postfix Increment Mutator
(r'\+\+', Operator), # M2R10 + ObjM2
# Postfix Decrement Mutator
(r'--', Operator), # M2R10 + ObjM2
],
'unigraph_operators': [
# Arithmetic Operators
(r'[+-]', Operator),
(r'[*/]', Operator),
# ISO 80000-2 compliant Set Difference Operator
(r'\\', Operator), # M2R10 + ObjM2
# Relational Operators
(r'[=#<>]', Operator),
# Dereferencing Operator
(r'\^', Operator),
# Dereferencing Operator Synonym
(r'@', Operator), # ISO
# Logical AND Operator Synonym
(r'&', Operator), # PIM + ISO
# Logical NOT Operator Synonym
(r'~', Operator), # PIM + ISO
# Smalltalk Message Prefix
(r'`', Operator), # ObjM2
],
'digraph_punctuation': [
# Range Constructor
(r'\.\.', Punctuation),
# Opening Chevron Bracket
(r'<<', Punctuation), # M2R10 + ISO
# Closing Chevron Bracket
(r'>>', Punctuation), # M2R10 + ISO
# Blueprint Punctuation
(r'->', Punctuation), # M2R10 + ISO
# Distinguish |# and # in M2 R10
(r'\|#', Punctuation),
# Distinguish ## and # in M2 R10
(r'##', Punctuation),
# Distinguish |* and * in M2 R10
(r'\|\*', Punctuation),
],
'unigraph_punctuation': [
# Common Punctuation
(r'[\(\)\[\]{},.:;\|]', Punctuation),
# Case Label Separator Synonym
(r'!', Punctuation), # ISO
# Blueprint Punctuation
(r'\?', Punctuation), # M2R10 + ObjM2
],
'comments': [
# Single Line Comment
(r'^//.*?\n', Comment.Single), # M2R10 + ObjM2
# Block Comment
(r'\(\*([^$].*?)\*\)', Comment.Multiline),
# Template Block Comment
(r'/\*(.*?)\*/', Comment.Multiline), # M2R10 + ObjM2
],
'pragmas': [
# ISO Style Pragmas
(r'<\*.*?\*>', Comment.Preproc), # ISO, M2R10 + ObjM2
# Pascal Style Pragmas
(r'\(\*\$.*?\*\)', Comment.Preproc), # PIM
],
'root': [
include('whitespace'),
include('dialecttags'),
include('pragmas'),
include('comments'),
include('identifiers'),
include('suffixed_number_literals'), # PIM + ISO
include('prefixed_number_literals'), # M2R10 + ObjM2
include('plain_number_literals'),
include('string_literals'),
include('digraph_punctuation'),
include('digraph_operators'),
include('unigraph_punctuation'),
include('unigraph_operators'),
]
}
# C o m m o n D a t a s e t s
# Common Reserved Words Dataset
common_reserved_words = (
# 37 common reserved words
'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION', 'DIV',
'DO', 'ELSE', 'ELSIF', 'END', 'EXIT', 'FOR', 'FROM', 'IF',
'IMPLEMENTATION', 'IMPORT', 'IN', 'LOOP', 'MOD', 'MODULE', 'NOT',
'OF', 'OR', 'POINTER', 'PROCEDURE', 'RECORD', 'REPEAT', 'RETURN',
'SET', 'THEN', 'TO', 'TYPE', 'UNTIL', 'VAR', 'WHILE',
)
# Common Builtins Dataset
common_builtins = (
# 16 common builtins
'ABS', 'BOOLEAN', 'CARDINAL', 'CHAR', 'CHR', 'FALSE', 'INTEGER',
'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NIL', 'ODD', 'ORD', 'REAL',
'TRUE',
)
# Common Pseudo-Module Builtins Dataset
common_pseudo_builtins = (
# 4 common pseudo builtins
'ADDRESS', 'BYTE', 'WORD', 'ADR'
)
# P I M M o d u l a - 2 D a t a s e t s
# Lexemes to Mark as Error Tokens for PIM Modula-2
pim_lexemes_to_reject = (
'!', '`', '@', '$', '%', '?', '\\', '==', '++', '--', '::', '*.',
'+>', '->', '<<', '>>', '|#', '##',
)
# PIM Modula-2 Additional Reserved Words Dataset
pim_additional_reserved_words = (
# 3 additional reserved words
'EXPORT', 'QUALIFIED', 'WITH',
)
# PIM Modula-2 Additional Builtins Dataset
pim_additional_builtins = (
# 16 additional builtins
'BITSET', 'CAP', 'DEC', 'DISPOSE', 'EXCL', 'FLOAT', 'HALT', 'HIGH',
'INC', 'INCL', 'NEW', 'NIL', 'PROC', 'SIZE', 'TRUNC', 'VAL',
)
# PIM Modula-2 Additional Pseudo-Module Builtins Dataset
pim_additional_pseudo_builtins = (
# 5 additional pseudo builtins
'SYSTEM', 'PROCESS', 'TSIZE', 'NEWPROCESS', 'TRANSFER',
)
# I S O M o d u l a - 2 D a t a s e t s
# Lexemes to Mark as Error Tokens for ISO Modula-2
iso_lexemes_to_reject = (
'`', '$', '%', '?', '\\', '==', '++', '--', '::', '*.', '+>', '->',
'<<', '>>', '|#', '##',
)
# ISO Modula-2 Additional Reserved Words Dataset
iso_additional_reserved_words = (
# 9 additional reserved words (ISO 10514-1)
'EXCEPT', 'EXPORT', 'FINALLY', 'FORWARD', 'PACKEDSET', 'QUALIFIED',
'REM', 'RETRY', 'WITH',
# 10 additional reserved words (ISO 10514-2 & ISO 10514-3)
'ABSTRACT', 'AS', 'CLASS', 'GUARD', 'INHERIT', 'OVERRIDE', 'READONLY',
'REVEAL', 'TRACED', 'UNSAFEGUARDED',
)
# ISO Modula-2 Additional Builtins Dataset
iso_additional_builtins = (
# 26 additional builtins (ISO 10514-1)
'BITSET', 'CAP', 'CMPLX', 'COMPLEX', 'DEC', 'DISPOSE', 'EXCL', 'FLOAT',
'HALT', 'HIGH', 'IM', 'INC', 'INCL', 'INT', 'INTERRUPTIBLE', 'LENGTH',
'LFLOAT', 'LONGCOMPLEX', 'NEW', 'PROC', 'PROTECTION', 'RE', 'SIZE',
'TRUNC', 'UNINTERRUBTIBLE', 'VAL',
# 5 additional builtins (ISO 10514-2 & ISO 10514-3)
'CREATE', 'DESTROY', 'EMPTY', 'ISMEMBER', 'SELF',
)
# ISO Modula-2 Additional Pseudo-Module Builtins Dataset
iso_additional_pseudo_builtins = (
# 14 additional builtins (SYSTEM)
'SYSTEM', 'BITSPERLOC', 'LOCSPERBYTE', 'LOCSPERWORD', 'LOC',
'ADDADR', 'SUBADR', 'DIFADR', 'MAKEADR', 'ADR',
'ROTATE', 'SHIFT', 'CAST', 'TSIZE',
# 13 additional builtins (COROUTINES)
'COROUTINES', 'ATTACH', 'COROUTINE', 'CURRENT', 'DETACH', 'HANDLER',
'INTERRUPTSOURCE', 'IOTRANSFER', 'IsATTACHED', 'LISTEN',
'NEWCOROUTINE', 'PROT', 'TRANSFER',
# 9 additional builtins (EXCEPTIONS)
'EXCEPTIONS', 'AllocateSource', 'CurrentNumber', 'ExceptionNumber',
'ExceptionSource', 'GetMessage', 'IsCurrentSource',
'IsExceptionalExecution', 'RAISE',
# 3 additional builtins (TERMINATION)
'TERMINATION', 'IsTerminating', 'HasHalted',
# 4 additional builtins (M2EXCEPTION)
'M2EXCEPTION', 'M2Exceptions', 'M2Exception', 'IsM2Exception',
'indexException', 'rangeException', 'caseSelectException',
'invalidLocation', 'functionException', 'wholeValueException',
'wholeDivException', 'realValueException', 'realDivException',
'complexValueException', 'complexDivException', 'protException',
'sysException', 'coException', 'exException',
)
# M o d u l a - 2 R 1 0 D a t a s e t s
# Lexemes to Mark as Error Tokens for Modula-2 R10
m2r10_lexemes_to_reject = (
'!', '`', '@', '$', '%', '&', '<>',
)
# Modula-2 R10 reserved words in addition to the common set
m2r10_additional_reserved_words = (
# 12 additional reserved words
'ALIAS', 'ARGLIST', 'BLUEPRINT', 'COPY', 'GENLIB', 'INDETERMINATE',
'NEW', 'NONE', 'OPAQUE', 'REFERENTIAL', 'RELEASE', 'RETAIN',
# 2 additional reserved words with symbolic assembly option
'ASM', 'REG',
)
# Modula-2 R10 builtins in addition to the common set
m2r10_additional_builtins = (
# 26 additional builtins
'CARDINAL', 'COUNT', 'EMPTY', 'EXISTS', 'INSERT', 'LENGTH', 'LONGCARD',
'OCTET', 'PTR', 'PRED', 'READ', 'READNEW', 'REMOVE', 'RETRIEVE', 'SORT',
'STORE', 'SUBSET', 'SUCC', 'TLIMIT', 'TMAX', 'TMIN', 'TRUE', 'TSIZE',
'UNICHAR', 'WRITE', 'WRITEF',
)
# Modula-2 R10 Additional Pseudo-Module Builtins Dataset
m2r10_additional_pseudo_builtins = (
# 13 additional builtins (TPROPERTIES)
'TPROPERTIES', 'PROPERTY', 'LITERAL', 'TPROPERTY', 'TLITERAL',
'TBUILTIN', 'TDYN', 'TREFC', 'TNIL', 'TBASE', 'TPRECISION',
'TMAXEXP', 'TMINEXP',
# 4 additional builtins (CONVERSION)
'CONVERSION', 'TSXFSIZE', 'SXF', 'VAL',
# 35 additional builtins (UNSAFE)
'UNSAFE', 'CAST', 'INTRINSIC', 'AVAIL', 'ADD', 'SUB', 'ADDC', 'SUBC',
'FETCHADD', 'FETCHSUB', 'SHL', 'SHR', 'ASHR', 'ROTL', 'ROTR', 'ROTLC',
'ROTRC', 'BWNOT', 'BWAND', 'BWOR', 'BWXOR', 'BWNAND', 'BWNOR',
'SETBIT', 'TESTBIT', 'LSBIT', 'MSBIT', 'CSBITS', 'BAIL', 'HALT',
'TODO', 'FFI', 'ADDR', 'VARGLIST', 'VARGC',
# 11 additional builtins (ATOMIC)
'ATOMIC', 'INTRINSIC', 'AVAIL', 'SWAP', 'CAS', 'INC', 'DEC', 'BWAND',
'BWNAND', 'BWOR', 'BWXOR',
# 7 additional builtins (COMPILER)
'COMPILER', 'DEBUG', 'MODNAME', 'PROCNAME', 'LINENUM', 'DEFAULT',
'HASH',
# 5 additional builtins (ASSEMBLER)
'ASSEMBLER', 'REGISTER', 'SETREG', 'GETREG', 'CODE',
)
# O b j e c t i v e M o d u l a - 2 D a t a s e t s
# Lexemes to Mark as Error Tokens for Objective Modula-2
objm2_lexemes_to_reject = (
'!', '$', '%', '&', '<>',
)
# Objective Modula-2 Extensions
# reserved words in addition to Modula-2 R10
objm2_additional_reserved_words = (
# 16 additional reserved words
'BYCOPY', 'BYREF', 'CLASS', 'CONTINUE', 'CRITICAL', 'INOUT', 'METHOD',
'ON', 'OPTIONAL', 'OUT', 'PRIVATE', 'PROTECTED', 'PROTOCOL', 'PUBLIC',
'SUPER', 'TRY',
)
# Objective Modula-2 Extensions
# builtins in addition to Modula-2 R10
objm2_additional_builtins = (
# 3 additional builtins
'OBJECT', 'NO', 'YES',
)
# Objective Modula-2 Extensions
# pseudo-module builtins in addition to Modula-2 R10
objm2_additional_pseudo_builtins = (
# None
)
# A g l e t M o d u l a - 2 D a t a s e t s
# Aglet Extensions
# reserved words in addition to ISO Modula-2
aglet_additional_reserved_words = (
# None
)
# Aglet Extensions
# builtins in addition to ISO Modula-2
aglet_additional_builtins = (
# 9 additional builtins
'BITSET8', 'BITSET16', 'BITSET32', 'CARDINAL8', 'CARDINAL16',
'CARDINAL32', 'INTEGER8', 'INTEGER16', 'INTEGER32',
)
# Aglet Modula-2 Extensions
# pseudo-module builtins in addition to ISO Modula-2
aglet_additional_pseudo_builtins = (
# None
)
# G N U M o d u l a - 2 D a t a s e t s
# GNU Extensions
# reserved words in addition to PIM Modula-2
gm2_additional_reserved_words = (
# 10 additional reserved words
'ASM', '__ATTRIBUTE__', '__BUILTIN__', '__COLUMN__', '__DATE__',
'__FILE__', '__FUNCTION__', '__LINE__', '__MODULE__', 'VOLATILE',
)
# GNU Extensions
# builtins in addition to PIM Modula-2
gm2_additional_builtins = (
# 21 additional builtins
'BITSET8', 'BITSET16', 'BITSET32', 'CARDINAL8', 'CARDINAL16',
'CARDINAL32', 'CARDINAL64', 'COMPLEX32', 'COMPLEX64', 'COMPLEX96',
'COMPLEX128', 'INTEGER8', 'INTEGER16', 'INTEGER32', 'INTEGER64',
'REAL8', 'REAL16', 'REAL32', 'REAL96', 'REAL128', 'THROW',
)
# GNU Extensions
# pseudo-module builtins in addition to PIM Modula-2
gm2_additional_pseudo_builtins = (
# None
)
# p 1 M o d u l a - 2 D a t a s e t s
# p1 Extensions
# reserved words in addition to ISO Modula-2
p1_additional_reserved_words = (
# None
)
# p1 Extensions
# builtins in addition to ISO Modula-2
p1_additional_builtins = (
# None
)
# p1 Modula-2 Extensions
# pseudo-module builtins in addition to ISO Modula-2
p1_additional_pseudo_builtins = (
# 1 additional builtin
'BCD',
)
# X D S M o d u l a - 2 D a t a s e t s
# XDS Extensions
# reserved words in addition to ISO Modula-2
xds_additional_reserved_words = (
# 1 additional reserved word
'SEQ',
)
# XDS Extensions
# builtins in addition to ISO Modula-2
xds_additional_builtins = (
# 9 additional builtins
'ASH', 'ASSERT', 'DIFFADR_TYPE', 'ENTIER', 'INDEX', 'LEN',
'LONGCARD', 'SHORTCARD', 'SHORTINT',
)
# XDS Modula-2 Extensions
# pseudo-module builtins in addition to ISO Modula-2
xds_additional_pseudo_builtins = (
# 22 additional builtins (SYSTEM)
'PROCESS', 'NEWPROCESS', 'BOOL8', 'BOOL16', 'BOOL32', 'CARD8',
'CARD16', 'CARD32', 'INT8', 'INT16', 'INT32', 'REF', 'MOVE',
'FILL', 'GET', 'PUT', 'CC', 'int', 'unsigned', 'size_t', 'void'
# 3 additional builtins (COMPILER)
'COMPILER', 'OPTION', 'EQUATION'
)
# P I M S t a n d a r d L i b r a r y D a t a s e t s
# PIM Modula-2 Standard Library Modules Dataset
pim_stdlib_module_identifiers = (
'Terminal', 'FileSystem', 'InOut', 'RealInOut', 'MathLib0', 'Storage',
)
# PIM Modula-2 Standard Library Types Dataset
pim_stdlib_type_identifiers = (
'Flag', 'FlagSet', 'Response', 'Command', 'Lock', 'Permission',
'MediumType', 'File', 'FileProc', 'DirectoryProc', 'FileCommand',
'DirectoryCommand',
)
# PIM Modula-2 Standard Library Procedures Dataset
pim_stdlib_proc_identifiers = (
'Read', 'BusyRead', 'ReadAgain', 'Write', 'WriteString', 'WriteLn',
'Create', 'Lookup', 'Close', 'Delete', 'Rename', 'SetRead', 'SetWrite',
'SetModify', 'SetOpen', 'Doio', 'SetPos', 'GetPos', 'Length', 'Reset',
'Again', 'ReadWord', 'WriteWord', 'ReadChar', 'WriteChar',
'CreateMedium', 'DeleteMedium', 'AssignName', 'DeassignName',
'ReadMedium', 'LookupMedium', 'OpenInput', 'OpenOutput', 'CloseInput',
'CloseOutput', 'ReadString', 'ReadInt', 'ReadCard', 'ReadWrd',
'WriteInt', 'WriteCard', 'WriteOct', 'WriteHex', 'WriteWrd',
'ReadReal', 'WriteReal', 'WriteFixPt', 'WriteRealOct', 'sqrt', 'exp',
'ln', 'sin', 'cos', 'arctan', 'entier', 'ALLOCATE', 'DEALLOCATE',
)
# PIM Modula-2 Standard Library Variables Dataset
pim_stdlib_var_identifiers = (
'Done', 'termCH', 'in', 'out'
)
# PIM Modula-2 Standard Library Constants Dataset
pim_stdlib_const_identifiers = (
'EOL',
)
# I S O S t a n d a r d L i b r a r y D a t a s e t s
# ISO Modula-2 Standard Library Modules Dataset
iso_stdlib_module_identifiers = (
# TO DO
)
# ISO Modula-2 Standard Library Types Dataset
iso_stdlib_type_identifiers = (
# TO DO
)
# ISO Modula-2 Standard Library Procedures Dataset
iso_stdlib_proc_identifiers = (
# TO DO
)
# ISO Modula-2 Standard Library Variables Dataset
iso_stdlib_var_identifiers = (
# TO DO
)
# ISO Modula-2 Standard Library Constants Dataset
iso_stdlib_const_identifiers = (
# TO DO
)
# M 2 R 1 0 S t a n d a r d L i b r a r y D a t a s e t s
# Modula-2 R10 Standard Library ADTs Dataset
m2r10_stdlib_adt_identifiers = (
'BCD', 'LONGBCD', 'BITSET', 'SHORTBITSET', 'LONGBITSET',
'LONGLONGBITSET', 'COMPLEX', 'LONGCOMPLEX', 'SHORTCARD', 'LONGLONGCARD',
'SHORTINT', 'LONGLONGINT', 'POSINT', 'SHORTPOSINT', 'LONGPOSINT',
'LONGLONGPOSINT', 'BITSET8', 'BITSET16', 'BITSET32', 'BITSET64',
'BITSET128', 'BS8', 'BS16', 'BS32', 'BS64', 'BS128', 'CARDINAL8',
'CARDINAL16', 'CARDINAL32', 'CARDINAL64', 'CARDINAL128', 'CARD8',
'CARD16', 'CARD32', 'CARD64', 'CARD128', 'INTEGER8', 'INTEGER16',
'INTEGER32', 'INTEGER64', 'INTEGER128', 'INT8', 'INT16', 'INT32',
'INT64', 'INT128', 'STRING', 'UNISTRING',
)
# Modula-2 R10 Standard Library Blueprints Dataset
m2r10_stdlib_blueprint_identifiers = (
'ProtoRoot', 'ProtoComputational', 'ProtoNumeric', 'ProtoScalar',
'ProtoNonScalar', 'ProtoCardinal', 'ProtoInteger', 'ProtoReal',
'ProtoComplex', 'ProtoVector', 'ProtoTuple', 'ProtoCompArray',
'ProtoCollection', 'ProtoStaticArray', 'ProtoStaticSet',
'ProtoStaticString', 'ProtoArray', 'ProtoString', 'ProtoSet',
'ProtoMultiSet', 'ProtoDictionary', 'ProtoMultiDict', 'ProtoExtension',
'ProtoIO', 'ProtoCardMath', 'ProtoIntMath', 'ProtoRealMath',
)
# Modula-2 R10 Standard Library Modules Dataset
m2r10_stdlib_module_identifiers = (
'ASCII', 'BooleanIO', 'CharIO', 'UnicharIO', 'OctetIO',
'CardinalIO', 'LongCardIO', 'IntegerIO', 'LongIntIO', 'RealIO',
'LongRealIO', 'BCDIO', 'LongBCDIO', 'CardMath', 'LongCardMath',
'IntMath', 'LongIntMath', 'RealMath', 'LongRealMath', 'BCDMath',
'LongBCDMath', 'FileIO', 'FileSystem', 'Storage', 'IOSupport',
)
# Modula-2 R10 Standard Library Types Dataset
m2r10_stdlib_type_identifiers = (
'File', 'Status',
# TO BE COMPLETED
)
# Modula-2 R10 Standard Library Procedures Dataset
m2r10_stdlib_proc_identifiers = (
'ALLOCATE', 'DEALLOCATE', 'SIZE',
# TO BE COMPLETED
)
# Modula-2 R10 Standard Library Variables Dataset
m2r10_stdlib_var_identifiers = (
'stdIn', 'stdOut', 'stdErr',
)
# Modula-2 R10 Standard Library Constants Dataset
m2r10_stdlib_const_identifiers = (
'pi', 'tau',
)
# D i a l e c t s
# Dialect modes
dialects = (
'unknown',
'm2pim', 'm2iso', 'm2r10', 'objm2',
'm2iso+aglet', 'm2pim+gm2', 'm2iso+p1', 'm2iso+xds',
)
# D a t a b a s e s
# Lexemes to Mark as Errors Database
lexemes_to_reject_db = {
# Lexemes to reject for unknown dialect
'unknown': (
# LEAVE THIS EMPTY
),
# Lexemes to reject for PIM Modula-2
'm2pim': (
pim_lexemes_to_reject,
),
# Lexemes to reject for ISO Modula-2
'm2iso': (
iso_lexemes_to_reject,
),
# Lexemes to reject for Modula-2 R10
'm2r10': (
m2r10_lexemes_to_reject,
),
# Lexemes to reject for Objective Modula-2
'objm2': (
objm2_lexemes_to_reject,
),
# Lexemes to reject for Aglet Modula-2
'm2iso+aglet': (
iso_lexemes_to_reject,
),
# Lexemes to reject for GNU Modula-2
'm2pim+gm2': (
pim_lexemes_to_reject,
),
# Lexemes to reject for p1 Modula-2
'm2iso+p1': (
iso_lexemes_to_reject,
),
# Lexemes to reject for XDS Modula-2
'm2iso+xds': (
iso_lexemes_to_reject,
),
}
# Reserved Words Database
reserved_words_db = {
# Reserved words for unknown dialect
'unknown': (
common_reserved_words,
pim_additional_reserved_words,
iso_additional_reserved_words,
m2r10_additional_reserved_words,
),
# Reserved words for PIM Modula-2
'm2pim': (
common_reserved_words,
pim_additional_reserved_words,
),
# Reserved words for Modula-2 R10
'm2iso': (
common_reserved_words,
iso_additional_reserved_words,
),
# Reserved words for ISO Modula-2
'm2r10': (
common_reserved_words,
m2r10_additional_reserved_words,
),
# Reserved words for Objective Modula-2
'objm2': (
common_reserved_words,
m2r10_additional_reserved_words,
objm2_additional_reserved_words,
),
# Reserved words for Aglet Modula-2 Extensions
'm2iso+aglet': (
common_reserved_words,
iso_additional_reserved_words,
aglet_additional_reserved_words,
),
# Reserved words for GNU Modula-2 Extensions
'm2pim+gm2': (
common_reserved_words,
pim_additional_reserved_words,
gm2_additional_reserved_words,
),
# Reserved words for p1 Modula-2 Extensions
'm2iso+p1': (
common_reserved_words,
iso_additional_reserved_words,
p1_additional_reserved_words,
),
# Reserved words for XDS Modula-2 Extensions
'm2iso+xds': (
common_reserved_words,
iso_additional_reserved_words,
xds_additional_reserved_words,
),
}
# Builtins Database
builtins_db = {
# Builtins for unknown dialect
'unknown': (
common_builtins,
pim_additional_builtins,
iso_additional_builtins,
m2r10_additional_builtins,
),
# Builtins for PIM Modula-2
'm2pim': (
common_builtins,
pim_additional_builtins,
),
# Builtins for ISO Modula-2
'm2iso': (
common_builtins,
iso_additional_builtins,
),
# Builtins for ISO Modula-2
'm2r10': (
common_builtins,
m2r10_additional_builtins,
),
# Builtins for Objective Modula-2
'objm2': (
common_builtins,
m2r10_additional_builtins,
objm2_additional_builtins,
),
# Builtins for Aglet Modula-2 Extensions
'm2iso+aglet': (
common_builtins,
iso_additional_builtins,
aglet_additional_builtins,
),
# Builtins for GNU Modula-2 Extensions
'm2pim+gm2': (
common_builtins,
pim_additional_builtins,
gm2_additional_builtins,
),
# Builtins for p1 Modula-2 Extensions
'm2iso+p1': (
common_builtins,
iso_additional_builtins,
p1_additional_builtins,
),
# Builtins for XDS Modula-2 Extensions
'm2iso+xds': (
common_builtins,
iso_additional_builtins,
xds_additional_builtins,
),
}
# Pseudo-Module Builtins Database
pseudo_builtins_db = {
# Builtins for unknown dialect
'unknown': (
common_pseudo_builtins,
pim_additional_pseudo_builtins,
iso_additional_pseudo_builtins,
m2r10_additional_pseudo_builtins,
),
# Builtins for PIM Modula-2
'm2pim': (
common_pseudo_builtins,
pim_additional_pseudo_builtins,
),
# Builtins for ISO Modula-2
'm2iso': (
common_pseudo_builtins,
iso_additional_pseudo_builtins,
),
# Builtins for ISO Modula-2
'm2r10': (
common_pseudo_builtins,
m2r10_additional_pseudo_builtins,
),
# Builtins for Objective Modula-2
'objm2': (
common_pseudo_builtins,
m2r10_additional_pseudo_builtins,
objm2_additional_pseudo_builtins,
),
# Builtins for Aglet Modula-2 Extensions
'm2iso+aglet': (
common_pseudo_builtins,
iso_additional_pseudo_builtins,
aglet_additional_pseudo_builtins,
),
# Builtins for GNU Modula-2 Extensions
'm2pim+gm2': (
common_pseudo_builtins,
pim_additional_pseudo_builtins,
gm2_additional_pseudo_builtins,
),
# Builtins for p1 Modula-2 Extensions
'm2iso+p1': (
common_pseudo_builtins,
iso_additional_pseudo_builtins,
p1_additional_pseudo_builtins,
),
# Builtins for XDS Modula-2 Extensions
'm2iso+xds': (
common_pseudo_builtins,
iso_additional_pseudo_builtins,
xds_additional_pseudo_builtins,
),
}
# Standard Library ADTs Database
stdlib_adts_db = {
# Empty entry for unknown dialect
'unknown': (
# LEAVE THIS EMPTY
),
# Standard Library ADTs for PIM Modula-2
'm2pim': (
# No first class library types
),
# Standard Library ADTs for ISO Modula-2
'm2iso': (
# No first class library types
),
# Standard Library ADTs for Modula-2 R10
'm2r10': (
m2r10_stdlib_adt_identifiers,
),
# Standard Library ADTs for Objective Modula-2
'objm2': (
m2r10_stdlib_adt_identifiers,
),
# Standard Library ADTs for Aglet Modula-2
'm2iso+aglet': (
# No first class library types
),
# Standard Library ADTs for GNU Modula-2
'm2pim+gm2': (
# No first class library types
),
# Standard Library ADTs for p1 Modula-2
'm2iso+p1': (
# No first class library types
),
# Standard Library ADTs for XDS Modula-2
'm2iso+xds': (
# No first class library types
),
}
# Standard Library Modules Database
stdlib_modules_db = {
# Empty entry for unknown dialect
'unknown': (
# LEAVE THIS EMPTY
),
# Standard Library Modules for PIM Modula-2
'm2pim': (
pim_stdlib_module_identifiers,
),
# Standard Library Modules for ISO Modula-2
'm2iso': (
iso_stdlib_module_identifiers,
),
# Standard Library Modules for Modula-2 R10
'm2r10': (
m2r10_stdlib_blueprint_identifiers,
m2r10_stdlib_module_identifiers,
m2r10_stdlib_adt_identifiers,
),
# Standard Library Modules for Objective Modula-2
'objm2': (
m2r10_stdlib_blueprint_identifiers,
m2r10_stdlib_module_identifiers,
),
# Standard Library Modules for Aglet Modula-2
'm2iso+aglet': (
iso_stdlib_module_identifiers,
),
# Standard Library Modules for GNU Modula-2
'm2pim+gm2': (
pim_stdlib_module_identifiers,
),
# Standard Library Modules for p1 Modula-2
'm2iso+p1': (
iso_stdlib_module_identifiers,
),
# Standard Library Modules for XDS Modula-2
'm2iso+xds': (
iso_stdlib_module_identifiers,
),
}
# Standard Library Types Database
stdlib_types_db = {
# Empty entry for unknown dialect
'unknown': (
# LEAVE THIS EMPTY
),
# Standard Library Types for PIM Modula-2
'm2pim': (
pim_stdlib_type_identifiers,
),
# Standard Library Types for ISO Modula-2
'm2iso': (
iso_stdlib_type_identifiers,
),
# Standard Library Types for Modula-2 R10
'm2r10': (
m2r10_stdlib_type_identifiers,
),
# Standard Library Types for Objective Modula-2
'objm2': (
m2r10_stdlib_type_identifiers,
),
# Standard Library Types for Aglet Modula-2
'm2iso+aglet': (
iso_stdlib_type_identifiers,
),
# Standard Library Types for GNU Modula-2
'm2pim+gm2': (
pim_stdlib_type_identifiers,
),
# Standard Library Types for p1 Modula-2
'm2iso+p1': (
iso_stdlib_type_identifiers,
),
# Standard Library Types for XDS Modula-2
'm2iso+xds': (
iso_stdlib_type_identifiers,
),
}
# Standard Library Procedures Database
stdlib_procedures_db = {
# Empty entry for unknown dialect
'unknown': (
# LEAVE THIS EMPTY
),
# Standard Library Procedures for PIM Modula-2
'm2pim': (
pim_stdlib_proc_identifiers,
),
# Standard Library Procedures for ISO Modula-2
'm2iso': (
iso_stdlib_proc_identifiers,
),
# Standard Library Procedures for Modula-2 R10
'm2r10': (
m2r10_stdlib_proc_identifiers,
),
# Standard Library Procedures for Objective Modula-2
'objm2': (
m2r10_stdlib_proc_identifiers,
),
# Standard Library Procedures for Aglet Modula-2
'm2iso+aglet': (
iso_stdlib_proc_identifiers,
),
# Standard Library Procedures for GNU Modula-2
'm2pim+gm2': (
pim_stdlib_proc_identifiers,
),
# Standard Library Procedures for p1 Modula-2
'm2iso+p1': (
iso_stdlib_proc_identifiers,
),
# Standard Library Procedures for XDS Modula-2
'm2iso+xds': (
iso_stdlib_proc_identifiers,
),
}
# Standard Library Variables Database
stdlib_variables_db = {
# Empty entry for unknown dialect
'unknown': (
# LEAVE THIS EMPTY
),
# Standard Library Variables for PIM Modula-2
'm2pim': (
pim_stdlib_var_identifiers,
),
# Standard Library Variables for ISO Modula-2
'm2iso': (
iso_stdlib_var_identifiers,
),
# Standard Library Variables for Modula-2 R10
'm2r10': (
m2r10_stdlib_var_identifiers,
),
# Standard Library Variables for Objective Modula-2
'objm2': (
m2r10_stdlib_var_identifiers,
),
# Standard Library Variables for Aglet Modula-2
'm2iso+aglet': (
iso_stdlib_var_identifiers,
),
# Standard Library Variables for GNU Modula-2
'm2pim+gm2': (
pim_stdlib_var_identifiers,
),
# Standard Library Variables for p1 Modula-2
'm2iso+p1': (
iso_stdlib_var_identifiers,
),
# Standard Library Variables for XDS Modula-2
'm2iso+xds': (
iso_stdlib_var_identifiers,
),
}
# Standard Library Constants Database
stdlib_constants_db = {
# Empty entry for unknown dialect
'unknown': (
# LEAVE THIS EMPTY
),
# Standard Library Constants for PIM Modula-2
'm2pim': (
pim_stdlib_const_identifiers,
),
# Standard Library Constants for ISO Modula-2
'm2iso': (
iso_stdlib_const_identifiers,
),
# Standard Library Constants for Modula-2 R10
'm2r10': (
m2r10_stdlib_const_identifiers,
),
# Standard Library Constants for Objective Modula-2
'objm2': (
m2r10_stdlib_const_identifiers,
),
# Standard Library Constants for Aglet Modula-2
'm2iso+aglet': (
iso_stdlib_const_identifiers,
),
# Standard Library Constants for GNU Modula-2
'm2pim+gm2': (
pim_stdlib_const_identifiers,
),
# Standard Library Constants for p1 Modula-2
'm2iso+p1': (
iso_stdlib_const_identifiers,
),
# Standard Library Constants for XDS Modula-2
'm2iso+xds': (
iso_stdlib_const_identifiers,
),
}
# M e t h o d s
# initialise a lexer instance
def __init__(self, **options):
#
# check dialect options
#
dialects = get_list_opt(options, 'dialect', [])
#
for dialect_option in dialects:
if dialect_option in self.dialects[1:-1]:
# valid dialect option found
self.set_dialect(dialect_option)
break
#
# Fallback Mode (DEFAULT)
else:
# no valid dialect option
self.set_dialect('unknown')
#
self.dialect_set_by_tag = False
#
# check style options
#
styles = get_list_opt(options, 'style', [])
#
# use lowercase mode for Algol style
if 'algol' in styles or 'algol_nu' in styles:
self.algol_publication_mode = True
else:
self.algol_publication_mode = False
#
# Check option flags
#
self.treat_stdlib_adts_as_builtins = get_bool_opt(
options, 'treat_stdlib_adts_as_builtins', True)
#
# call superclass initialiser
RegexLexer.__init__(self, **options)
# Set lexer to a specified dialect
def set_dialect(self, dialect_id):
#
# if __debug__:
# print 'entered set_dialect with arg: ', dialect_id
#
# check dialect name against known dialects
if dialect_id not in self.dialects:
dialect = 'unknown' # default
else:
dialect = dialect_id
#
# compose lexemes to reject set
lexemes_to_reject_set = set()
# add each list of reject lexemes for this dialect
for list in self.lexemes_to_reject_db[dialect]:
lexemes_to_reject_set.update(set(list))
#
# compose reserved words set
reswords_set = set()
# add each list of reserved words for this dialect
for list in self.reserved_words_db[dialect]:
reswords_set.update(set(list))
#
# compose builtins set
builtins_set = set()
# add each list of builtins for this dialect excluding reserved words
for list in self.builtins_db[dialect]:
builtins_set.update(set(list).difference(reswords_set))
#
# compose pseudo-builtins set
pseudo_builtins_set = set()
# add each list of builtins for this dialect excluding reserved words
for list in self.pseudo_builtins_db[dialect]:
pseudo_builtins_set.update(set(list).difference(reswords_set))
#
# compose ADTs set
adts_set = set()
# add each list of ADTs for this dialect excluding reserved words
for list in self.stdlib_adts_db[dialect]:
adts_set.update(set(list).difference(reswords_set))
#
# compose modules set
modules_set = set()
# add each list of builtins for this dialect excluding builtins
for list in self.stdlib_modules_db[dialect]:
modules_set.update(set(list).difference(builtins_set))
#
# compose types set
types_set = set()
# add each list of types for this dialect excluding builtins
for list in self.stdlib_types_db[dialect]:
types_set.update(set(list).difference(builtins_set))
#
# compose procedures set
procedures_set = set()
# add each list of procedures for this dialect excluding builtins
for list in self.stdlib_procedures_db[dialect]:
procedures_set.update(set(list).difference(builtins_set))
#
# compose variables set
variables_set = set()
# add each list of variables for this dialect excluding builtins
for list in self.stdlib_variables_db[dialect]:
variables_set.update(set(list).difference(builtins_set))
#
# compose constants set
constants_set = set()
# add each list of constants for this dialect excluding builtins
for list in self.stdlib_constants_db[dialect]:
constants_set.update(set(list).difference(builtins_set))
#
# update lexer state
self.dialect = dialect
self.lexemes_to_reject = lexemes_to_reject_set
self.reserved_words = reswords_set
self.builtins = builtins_set
self.pseudo_builtins = pseudo_builtins_set
self.adts = adts_set
self.modules = modules_set
self.types = types_set
self.procedures = procedures_set
self.variables = variables_set
self.constants = constants_set
#
# if __debug__:
# print 'exiting set_dialect'
# print ' self.dialect: ', self.dialect
# print ' self.lexemes_to_reject: ', self.lexemes_to_reject
# print ' self.reserved_words: ', self.reserved_words
# print ' self.builtins: ', self.builtins
# print ' self.pseudo_builtins: ', self.pseudo_builtins
# print ' self.adts: ', self.adts
# print ' self.modules: ', self.modules
# print ' self.types: ', self.types
# print ' self.procedures: ', self.procedures
# print ' self.variables: ', self.variables
# print ' self.types: ', self.types
# print ' self.constants: ', self.constants
# Extracts a dialect name from a dialect tag comment string and checks
# the extracted name against known dialects. If a match is found, the
# matching name is returned, otherwise dialect id 'unknown' is returned
def get_dialect_from_dialect_tag(self, dialect_tag):
#
# if __debug__:
# print 'entered get_dialect_from_dialect_tag with arg: ', dialect_tag
#
# constants
left_tag_delim = '(*!'
right_tag_delim = '*)'
left_tag_delim_len = len(left_tag_delim)
right_tag_delim_len = len(right_tag_delim)
indicator_start = left_tag_delim_len
indicator_end = -(right_tag_delim_len)
#
# check comment string for dialect indicator
if len(dialect_tag) > (left_tag_delim_len + right_tag_delim_len) \
and dialect_tag.startswith(left_tag_delim) \
and dialect_tag.endswith(right_tag_delim):
#
# if __debug__:
# print 'dialect tag found'
#
# extract dialect indicator
indicator = dialect_tag[indicator_start:indicator_end]
#
# if __debug__:
# print 'extracted: ', indicator
#
# check against known dialects
for index in range(1, len(self.dialects)):
#
# if __debug__:
# print 'dialects[', index, ']: ', self.dialects[index]
#
if indicator == self.dialects[index]:
#
# if __debug__:
# print 'matching dialect found'
#
# indicator matches known dialect
return indicator
else:
# indicator does not match any dialect
return 'unknown' # default
else:
# invalid indicator string
return 'unknown' # default
# intercept the token stream, modify token attributes and return them
def get_tokens_unprocessed(self, text):
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
#
# check for dialect tag if dialect has not been set by tag
if not self.dialect_set_by_tag and token == Comment.Special:
indicated_dialect = self.get_dialect_from_dialect_tag(value)
if indicated_dialect != 'unknown':
# token is a dialect indicator
# reset reserved words and builtins
self.set_dialect(indicated_dialect)
self.dialect_set_by_tag = True
#
# check for reserved words, predefined and stdlib identifiers
if token is Name:
if value in self.reserved_words:
token = Keyword.Reserved
if self.algol_publication_mode:
value = value.lower()
#
elif value in self.builtins:
token = Name.Builtin
if self.algol_publication_mode:
value = value.lower()
#
elif value in self.pseudo_builtins:
token = Name.Builtin.Pseudo
if self.algol_publication_mode:
value = value.lower()
#
elif value in self.adts:
if not self.treat_stdlib_adts_as_builtins:
token = Name.Namespace
else:
token = Name.Builtin.Pseudo
if self.algol_publication_mode:
value = value.lower()
#
elif value in self.modules:
token = Name.Namespace
#
elif value in self.types:
token = Name.Class
#
elif value in self.procedures:
token = Name.Function
#
elif value in self.variables:
token = Name.Variable
#
elif value in self.constants:
token = Name.Constant
#
elif token in Number:
#
# mark prefix number literals as error for PIM and ISO dialects
if self.dialect not in ('unknown', 'm2r10', 'objm2'):
if "'" in value or value[0:2] in ('0b', '0x', '0u'):
token = Error
#
elif self.dialect in ('m2r10', 'objm2'):
# mark base-8 number literals as errors for M2 R10 and ObjM2
if token is Number.Oct:
token = Error
# mark suffix base-16 literals as errors for M2 R10 and ObjM2
elif token is Number.Hex and 'H' in value:
token = Error
# mark real numbers with E as errors for M2 R10 and ObjM2
elif token is Number.Float and 'E' in value:
token = Error
#
elif token in Comment:
#
# mark single line comment as error for PIM and ISO dialects
if token is Comment.Single:
if self.dialect not in ('unknown', 'm2r10', 'objm2'):
token = Error
#
if token is Comment.Preproc:
# mark ISO pragma as error for PIM dialects
if value.startswith('<*') and \
self.dialect.startswith('m2pim'):
token = Error
# mark PIM pragma as comment for other dialects
elif value.startswith('(*$') and \
self.dialect != 'unknown' and \
not self.dialect.startswith('m2pim'):
token = Comment.Multiline
#
else: # token is neither Name nor Comment
#
# mark lexemes matching the dialect's error token set as errors
if value in self.lexemes_to_reject:
token = Error
#
# substitute lexemes when in Algol mode
if self.algol_publication_mode:
if value == '#':
value = u'≠'
elif value == '<=':
value = u'≤'
elif value == '>=':
value = u'≥'
elif value == '==':
value = u'≡'
elif value == '*.':
value = u'•'
# return result
yield index, token, value
| mit | -8,989,556,559,358,980,000 | 3,433,894,151,051,534,300 | 32.66688 | 84 | 0.541786 | false |
LongSeanSilvr/DC_Metro_Tracker | development_version/src/general_intents.py | 1 | 1923 | import build_response as br
# ======================================================================================================================
# Skill Behavior: Welcome Response
# ======================================================================================================================
class Welcome(object):
def __init__(self):
self.card_title = "Welcome"
self.reprompt_text = "What station would you like train times for?"
self.flag = "welcome"
def build_response(self):
output = br.build_response(self.card_title, self.flag, reprompt_text=self.reprompt_text)
return output
# ======================================================================================================================
# Skill Intent: Help
# ======================================================================================================================
class Help(object):
def __init__(self, intent, session): # Parameters are here so handler can treat this like the other intent classes
self.card_title = "Help"
self.reprompt_text = "What station would you like train times for?"
self.flag = "help"
def build_response(self):
output = br.build_response(self.card_title, self.flag, reprompt_text=self.reprompt_text)
return output
# ======================================================================================================================
# Skill Intent: Quit
# ======================================================================================================================
class Exit(object):
def __init__(self, intent, session): # Parameters are here so handler can treat this like the other intent classes
self.card_title = "Exiting"
self.flag = "exit"
def build_response(self):
output = br.build_response(self.card_title, self.flag)
return output
| gpl-3.0 | -1,623,422,443,279,353,300 | -7,059,054,311,856,244,000 | 44.785714 | 120 | 0.411856 | false |
jaredculp/faker | faker/providers/address/es/__init__.py | 15 | 3305 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from .. import Provider as AddressProvider
class Provider(AddressProvider):
## List of Countries https://www.un.org/es/members/
countries = (
'Afganistán', 'Albania', 'Alemania', 'Andorra', 'Angola',
'Antigua y Barbuda', 'Arabia Saudita', 'Argelia', 'Argentina',
'Armenia', 'Australia', 'Austria', 'Azerbaiyán',
'Bahamas', 'Bahrein', 'Bangladesh', 'Barbados', 'Belarús',
'Bélgica', 'Belice', 'Benin', 'Bhután', 'Bolivia',
'Bosnia y Herzegovina', 'Botswana', 'Brasil', 'Brunei Darussalam',
'Bulgaria', 'Burkina Faso', 'Burundi', 'Cabo Verde', 'Camboya',
'Camerún', 'Canadá', 'Chad', 'Chile', 'China', 'Chipre','Colombia',
'Comoras', 'Congo', 'Costa Rica', 'Côte d\'Ivoire', 'Croacia',
'Cuba', 'Dinamarca', 'Djibouti', 'Dominicana', 'Ecuador', 'Egipto',
'El Salvador', 'Emiratos Árabes Unidos', 'Eritrea', 'Eslovaquia',
'Eslovenia', 'España', 'Estados Unidos de América', 'Estonia',
'Etiopía', 'ex República Yugoslava de Macedonia',
'Federación de Rusia', 'Fiji', 'Filipinas', 'Finlandia', 'Francia',
'Gabón', 'Gambia', 'Georgia', 'Ghana', 'Granada', 'Grecia',
'Guatemala', 'Guinea', 'Guinea Bissau', 'Guinea Ecuatorial',
'Guyana', 'Haití', 'Honduras', 'Hungría', 'India', 'Indonesia',
'Irán', 'Iraq', 'Irlanda', 'Islandia', 'Islas Marshall',
'Islas Salomón', 'Israel', 'Italia', 'Jamaica', 'Japón',
'Jordania', 'Kazajstán', 'Kenya', 'Kirguistán', 'Kiribati',
'Kuwait', 'Lesotho', 'Letonia', 'Líbano', 'Liberia', 'Libia',
'Liechtenstein', 'Lituania', 'Luxemburgo', 'Madagascar',
'Malasia', 'Malawi', 'Maldivas', 'Mali', 'Malta','Marruecos',
'Mauricio', 'Mauritania', 'México', 'Micronesia', 'Mónaco',
'Mongolia', 'Montenegro','Mozambique','Myanmar', 'Namibia',
'Nauru', 'Nicaragua', 'Niger', 'Nigeria', 'Noruega',
'Nueva Zelandia', 'Omán', 'Países Bajos', 'Pakistán', 'Palau',
'Panamá', 'Papua Nueva Guinea', 'Paraguay', 'Perú', 'Polonia',
'Portugal', 'Qatar',
'Reino Unido de Gran Bretaña e Irlanda del Norte',
'República Árabe Siria', 'República Centroafricana',
'República Checa', 'República de Corea', 'República de Moldova',
'República Democrática del Congo',
'República Democrática Popular Lao',
'República Dominicana',
'República Federal Democrática de Nepal',
'República Popular Democrática de Corea',
'República Unida de Tanzanía', 'Rumania', 'Rwanda',
'Saint Kitts y Nevis', 'Samoa', 'San Marino', 'Santa Lucía',
'Santo Tomé y Príncipe', 'San Vicente y las Granadinas',
'Senegal', 'Serbia', 'Seychelles', 'Sierra Leona', 'Singapur',
'Somalia', 'Sri Lanka', 'Sudáfrica', 'Sudán', 'Sudán del Sur',
'Suecia', 'Suiza', 'Suriname', 'Swazilandia', 'Tailandia',
'Tayikistán', 'Timor-Leste', 'Togo', 'Tonga', 'Trinidad y Tabago',
'Túnez', 'Turkmenistán', 'Turquía', 'Tuvalu', 'Ucrania', 'Uganda',
'Uruguay', 'Uzbekistán', 'Vanuatu', 'Venezuela', 'Vietman',
'Yemen', 'Zambia', 'Zimbabwe'
)
| mit | -468,506,114,970,362,940 | -5,574,708,559,771,657,000 | 55.947368 | 75 | 0.599199 | false |
GNOME/libgxps | regtest/TestReferences.py | 1 | 3535 | # TestReferences.py
#
# Copyright (C) 2011 Carlos Garcia Campos <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
import errno
from Test import Test
from Config import Config
from Printer import get_printer
from Utils import get_document_paths_from_dir, get_skipped_tests
from Queue import Queue
from threading import Thread, RLock
class TestReferences:
def __init__(self, docsdir, refsdir):
self._docsdir = docsdir
self._refsdir = refsdir
self._skipped = get_skipped_tests(docsdir)
self._test = Test()
self.config = Config()
self.printer = get_printer()
self._total_tests = 1
self._n_tests = 0
self._queue = Queue()
self._lock = RLock()
try:
os.makedirs(self._refsdir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
except:
raise
def create_refs_for_file(self, filename):
if filename in self._skipped:
with self._lock:
self._n_tests += 1
self.printer.print_default("Skipping test '%s'" % (os.path.join(self._docsdir, filename)))
return
refs_path = os.path.join(self._refsdir, filename)
try:
os.makedirs(refs_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
except:
raise
doc_path = os.path.join(self._docsdir, filename)
if not self.config.force and self._test.has_results(refs_path):
with self._lock:
self._n_tests += 1
self.printer.print_default("Results found, skipping '%s'" % doc_path)
return
if self._test.create_refs(doc_path, refs_path):
self._test.create_checksums(refs_path, self.config.checksums_only)
with self._lock:
self._n_tests += 1
self.printer.printout_ln("[%d/%d] %s: done" % (self._n_tests, self._total_tests, doc_path))
def _worker_thread(self):
while True:
doc = self._queue.get()
self.create_refs_for_file(doc)
self._queue.task_done()
def create_refs(self):
docs, total_docs = get_document_paths_from_dir(self._docsdir)
self._total_tests = total_docs
self.printer.printout_ln('Found %d documents' % (total_docs))
self.printer.printout_ln('Process %d using %d worker threads' % (os.getpid(), self.config.threads))
self.printer.printout_ln()
self.printer.printout('Spawning %d workers...' % (self.config.threads))
for n_thread in range(self.config.threads):
thread = Thread(target=self._worker_thread)
thread.daemon = True
thread.start()
for doc in docs:
self._queue.put(doc)
self._queue.join()
| lgpl-2.1 | 1,319,300,266,403,067,000 | -1,350,422,978,098,173,000 | 32.990385 | 107 | 0.614427 | false |
HyechurnJang/archon | archon/view/core.py | 2 | 10791 | # -*- coding: utf-8 -*-
################################################################################
# _____ _ _____ _ #
# / ____(_) / ____| | | #
# | | _ ___ ___ ___ | (___ _ _ ___| |_ ___ _ __ ___ ___ #
# | | | / __|/ __/ _ \ \___ \| | | / __| __/ _ \ '_ ` _ \/ __| #
# | |____| \__ \ (_| (_) | ____) | |_| \__ \ || __/ | | | | \__ \ #
# \_____|_|___/\___\___/ |_____/ \__, |___/\__\___|_| |_| |_|___/ #
# __/ | #
# |___/ #
# _ __ _____ _ _____ ______ #
# | |/ / / ____| | |/ ____| ____| #
# | ' / ___ _ __ ___ __ _ | (___ ___ | | (___ | |__ #
# | < / _ \| '__/ _ \/ _` | \___ \ / _ \| |\___ \| __| #
# | . \ (_) | | | __/ (_| | ____) | (_) | |____) | |____ #
# |_|\_\___/|_| \___|\__,_| |_____/ \___/|_|_____/|______| #
# #
################################################################################
# #
# Copyright (c) 2016 Cisco Systems #
# All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT #
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #
# License for the specific language governing permissions and limitations #
# under the License. #
# #
################################################################################
import re
import uuid
class TAG(dict):
@classmethod
def ATTR(cls, attrs, **sets):
for k in sets: attrs[k] = '%s %s' % (sets[k], attrs[k]) if k in attrs else sets[k]
return attrs
@classmethod
def UUID(cls):
return 'V-' + str(uuid.uuid4())
def __init__(self, tag, **attrs):
dict.__init__(self, tag=tag, elems=[], attrs=attrs)
def __len__(self, *args, **kwargs):
return self['elems'].__len__()
def __str__(self):
return self.render()
def click(self, url):
if 'CLASS' in self['attrs']: self['attrs']['CLASS'] += ' clickable'
else: self['attrs']['CLASS'] = 'clickable'
self['attrs']['onclick'] = "GetData('%s');" % url
return self
def html(self, *elems):
for elem in elems: self['elems'].append(elem)
return self
def empty(self):
return not self['elems'].__len__()
def render(self):
tag = self['tag']
attrs = self['attrs']
elems = self['elems']
attr_str = '';
for k in attrs: attr_str += ' %s="%s"' % (k, attrs[k])
elem_str = ''
for elem in elems: elem_str += str(elem)
return '<%s%s>%s</%s>' % (tag, attr_str, elem_str, tag)
class DIV(TAG):
def __init__(self, **attrs): TAG.__init__(self, 'div', **attrs)
class SPAN(TAG):
def __init__(self, **attrs): TAG.__init__(self, 'span', **attrs)
class HEAD(TAG):
def __init__(self, level, **attrs): TAG.__init__(self, 'h' + str(level), **attrs)
class PARA(TAG):
def __init__(self, **attrs): TAG.__init__(self, 'p', **TAG.ATTR(attrs, CLASS='para'))
class ANCH(TAG):
def __init__(self, **attrs): TAG.__init__(self, 'a', **attrs)
class LABEL(TAG):
def __init__(self, **attrs): TAG.__init__(self, 'label', **attrs)
class STRONG(TAG):
def __init__(self, **attrs): TAG.__init__(self, 'strong', **attrs)
class SMALL(TAG):
def __init__(self, **attrs): TAG.__init__(self, 'small', **attrs)
class IMG(TAG):
def __init__(self, src, **attrs): TAG.__init__(self, 'img', **TAG.ATTR(attrs, src=src))
class ICON(TAG):
def __init__(self, icon, **attrs): TAG.__init__(self, 'i', **TAG.ATTR(attrs, CLASS='fa fa-%s' % icon))
class THEAD(TAG):
def __init__(self, **attrs): TAG.__init__(self, 'thead', **attrs)
class TBODY(TAG):
def __init__(self, **attrs): TAG.__init__(self, 'tbody', **attrs)
class TH(TAG):
def __init__(self, **attrs): TAG.__init__(self, 'th', **attrs)
class TR(TAG):
def __init__(self, **attrs): TAG.__init__(self, 'tr', **attrs)
class TD(TAG):
def __init__(self, **attrs): TAG.__init__(self, 'td', **attrs)
class TABLE(TAG):
class BASIC(TAG):
def __init__(self, *heads, **options):
TAG.__init__(self, 'TABLE', ID=TAG.UUID(), CLASS='table table-bordered table-hover', LIB='table_basic', **{'width':'100%'})
self.body = TBODY()
self['options'] = options
tr = TR()
order = [None for i in range(0, len(heads))]
for i in range(0, len(heads)):
head = heads[i]
kv = re.match('.+\<(?P<p>\d+)(?P<d>(\+|\-))\>$', head)
if kv:
p = int(kv.group('p'))
d = kv.group('d')
if d == '+': order[p] = [i, 'asc']
else: order[p] = [i, 'desc']
head = head.replace('<%d%s>' % (p, d), '')
tr.html(TH().html(head))
order = filter(None, order)
if order: self['options']['order'] = order
else: self['options']['order'] = [[0, 'asc']]
self.html(THEAD().html(tr)).html(self.body)
def Record(self, *cols, **attrs):
tr = TR(**attrs)
for col in cols: tr.html(TD().html(col))
self.body.html(tr)
return self
def __len__(self, *args, **kwargs):
return self.body.__len__()
class ASYNC(TAG):
@classmethod
def pageview(cls):
def wrapper(view):
def decofunc(r, m, v):
r.Draw = int(r.Query['draw'][0]) if isinstance(r.Query['draw'], list) else int(r.Query['draw'])
r.Length = int(r.Query['length'][0]) if isinstance(r.Query['length'], list) else int(r.Query['length'])
r.Start = int(r.Query['start'][0]) if isinstance(r.Query['start'], list) else int(r.Query['start'])
try:
r.OrderCol = int(r.Query['order[0][column]'][0]) if isinstance(r.Query['order[0][column]'], list) else int(r.Query['order[0][column]'])
r.OrderDir = int(r.Query['order[0][dir]'][0]) if isinstance(r.Query['order[0][dir]'], list) else int(r.Query['order[0][dir]'])
r.Search = int(r.Query['search[value]'][0]) if isinstance(r.Query['search[value]'], list) else int(r.Query['search[value]'])
except: pass
r.Page = r.Start / r.Length
return view(r, m, v)
return decofunc
return wrapper
def __init__(self, url, *heads, **attrs):
TAG.__init__(self, 'TABLE', **TAG.ATTR(attrs, ID=TAG.UUID(), CLASS='table table-bordered table-hover', LIB='table_async', **{'width':'100%', 'url':url}))
tr = TR()
for head in heads: tr.html(TH().html(head))
self.html(THEAD().html(tr))
class ASYNCDATA(dict):
def __init__(self, draw, total, count):
dict.__init__(self, draw=draw, recordsTotal=total, recordsFiltered=count)
self.data = []
self['data'] = self.data
def Record(self, *cols, **attrs):
self.data.append([str(col) for col in cols])
return self
class FLIP(TAG):
def __init__(self, *heads, **attrs):
TAG.__init__(self, 'TABLE', **TAG.ATTR(attrs, ID=TAG.UUID(), CLASS='table', LIB='table_flip', **{'data-show-toggle':'true', 'data-paging':'true', 'width':'100%'}))
self.body = TBODY()
tr = TR()
for head in heads:
if '+' in head: tr.html(TH(**{'data-type':'html', 'data-breakpoints':'all', 'data-title':head.replace('+', '')}).html(head))
else: tr.html(TH(**{'data-type':'html'}).html(head))
self.html(THEAD().html(tr)).html(self.body)
def Record(self, *cols, **attrs):
tr = TR(**attrs)
for col in cols: tr.html(TD(**{'data-type':'html'}).html(col))
self.body.html(tr)
return self
def __len__(self, *args, **kwargs):
return self.body.__len__()
def __init__(self, **attrs): TAG.__init__(self, 'table', **attrs)
class UL(TAG):
def __init__(self, **attrs): TAG.__init__(self, 'ul', **attrs)
class LI(TAG):
def __init__(self, **attrs): TAG.__init__(self, 'li', **attrs)
class FORM(TAG):
def __init__(self, **attrs): TAG.__init__(self, 'form', **attrs)
class INPUT(TAG):
def __init__(self, **attrs): TAG.__init__(self, 'input', **attrs)
class SELECT(TAG):
def __init__(self, **attrs): TAG.__init__(self, 'select', **attrs)
class OPTION(TAG):
def __init__(self, **attrs): TAG.__init__(self, 'option', **attrs)
class BUTTON(TAG):
def __init__(self, **attrs): TAG.__init__(self, 'button', **TAG.ATTR(attrs, CLASS='btn', TYPE='button'))
| apache-2.0 | -2,626,434,313,348,450,300 | -8,766,746,326,976,710,000 | 41.164 | 175 | 0.390881 | false |
chouseknecht/ansible | test/units/modules/network/f5/test_bigip_firewall_dos_profile.py | 22 | 3200 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_firewall_dos_profile import ModuleParameters
from library.modules.bigip_firewall_dos_profile import ModuleManager
from library.modules.bigip_firewall_dos_profile import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_firewall_dos_profile import ModuleParameters
from ansible.modules.network.f5.bigip_firewall_dos_profile import ModuleManager
from ansible.modules.network.f5.bigip_firewall_dos_profile import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
description='my description',
threshold_sensitivity='low',
default_whitelist='whitelist1'
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.description == 'my description'
assert p.threshold_sensitivity == 'low'
assert p.default_whitelist == '/Common/whitelist1'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create(self, *args):
set_module_args(dict(
name='foo',
description='this is a description',
threshold_sensitivity='low',
default_whitelist='whitelist1',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['description'] == 'this is a description'
| gpl-3.0 | -8,452,585,535,360,728,000 | -2,657,702,810,103,891,000 | 28.090909 | 91 | 0.653438 | false |
dakrauth/picker | picker/forms.py | 1 | 6144 | from django import forms
from django.utils import timezone
from django.utils.module_loading import import_string
from . import models as picker
from . import utils
_picker_widget = None
encoded_game_key = 'game_{}'.format
TIE_KEY = '__TIE__'
def decoded_game_key(value):
return int(value.replace('game_', ''))
def encoded_game_item(game):
return (
encoded_game_key(game.id),
str(game.winner.id) if game.winner else (TIE_KEY if game.is_tie else '')
)
def get_picker_widget(league):
global _picker_widget
if not _picker_widget:
widget_path = league.config('TEAM_PICKER_WIDGET')
if widget_path:
_picker_widget = import_string(widget_path)
_picker_widget = _picker_widget or forms.RadioSelect
return _picker_widget
class GameField(forms.ChoiceField):
def __init__(self, game, manage=False, widget=None):
choices = [(str(game.away.id), game.away), (str(game.home.id), game.home)]
if manage:
choices.insert(1, (TIE_KEY, ''))
self.game = game
self.manage = manage
self.game_id = game.id
self.is_game = True
super(GameField, self).__init__(
choices=choices,
label=game.start_time.strftime('%a, %b %d %I:%M %p'),
required=False,
help_text=game.tv,
disabled=not self.manage and (self.game.start_time <= timezone.now()),
widget=widget or get_picker_widget(game.gameset.league)
)
class FieldIter:
def __init__(self, form):
self.fields = []
self.form = form
def append(self, name):
self.fields.append(name)
def __iter__(self):
for name in self.fields:
yield self.form[name]
class BasePickForm(forms.Form):
management = False
def __init__(self, gameset, *args, **kws):
super(BasePickForm, self).__init__(*args, **kws)
self.gameset = gameset
self.game_fields = FieldIter(self)
games = list(gameset.games.select_related('home__league', 'away__league'))
if games:
for gm in games:
key = encoded_game_key(gm.id)
self.fields[key] = GameField(gm, self.management)
self.game_fields.append(key)
self.fields['points'] = forms.IntegerField(
label='{}:'.format(games[-1].vs_description),
required=False
)
class ManagementPickForm(BasePickForm):
management = True
def __init__(self, gameset, *args, **kws):
kws.setdefault('initial', {}).update(**self.get_initial_picks(gameset))
super(ManagementPickForm, self).__init__(gameset, *args, **kws)
def save(self):
gameset = self.gameset
data = self.cleaned_data.copy()
gameset.points = data.pop('points', 0) or 0
gameset.save()
for key, winner in data.items():
if winner:
pk = decoded_game_key(key)
game = gameset.games.get(pk=pk)
game.winner = None if winner == TIE_KEY else int(winner)
gameset.update_pick_status()
@staticmethod
def get_initial_picks(gameset):
return dict({
encoded_game_key(game.id): str(game.winner.id)
for game in gameset.games.played()
if game.winner
}, points=gameset.points)
class UserPickForm(BasePickForm):
def __init__(self, user, gameset, *args, **kws):
initial = self.get_initial_user_picks(gameset, user)
kws.setdefault('initial', {}).update(initial)
self.user = user
super(UserPickForm, self).__init__(gameset, *args, **kws)
def save(self):
data = self.cleaned_data.copy()
picks = picker.PickSet.objects.for_gameset_user(self.gameset, self.user)
points = data.pop('points', None)
games = {decoded_game_key(k): v for k, v in data.items() if v}
picks.update_picks(games=games, points=points)
return picks
@staticmethod
def get_initial_user_picks(gameset, user):
ps = gameset.pick_for_user(user)
initial = dict({
encoded_game_key(g_id): str(w_id) for g_id, w_id in ps.gamepicks.picked_winner_ids()
}, points=ps.points) if ps else {}
return initial
class GameForm(forms.ModelForm):
class Meta:
model = picker.Game
fields = ('start_time', 'location')
class PreferenceForm(forms.ModelForm):
class Meta:
model = picker.Preference
fields = ('autopick',)
def __init__(self, instance, *args, **kws):
kws['instance'] = instance
self.current_email = instance.user.email.lower()
kws.setdefault('initial', {})['email'] = self.current_email
super(PreferenceForm, self).__init__(*args, **kws)
for league in picker.League.objects.all():
field_name = '{}_favorite'.format(league.slug)
current = None
if instance:
try:
current = picker.PickerFavorite.objects.get(user=instance.user, league=league)
except picker.PickerFavorite.DoesNotExist:
pass
self.fields[field_name] = forms.ModelChoiceField(
picker.Team.objects.filter(league=league),
label='{} Favorite'.format(league.abbr.upper()),
empty_label='-- Select --',
required=False,
initial=current.team if current else None
)
def save(self, commit=True):
super(PreferenceForm, self).save(commit)
if commit:
picker.PickerFavorite.objects.filter(user=self.instance.user).delete()
for key in self.cleaned_data:
if not key.endswith('_favorite'):
continue
slug = key.rsplit('_')[0]
league = picker.League.objects.get(slug=slug)
picker.PickerFavorite.objects.create(
league=league,
user=self.instance.user,
team=self.cleaned_data[key]
)
| mit | 1,825,824,733,661,966,000 | 5,327,758,824,906,018,000 | 30.187817 | 98 | 0.57487 | false |
MyRobotLab/pyrobotlab | home/pedrosenarego/zorba/gestures/addknowledge.py | 3 | 1333 | import os
import sys
import fileinput
import os.path
def addKnowledge(category,pattern):
#### change somethings to make sense############
pattern = pattern.replace('my', 'your')
#### Clean the ending </aiml>############
for line in fileinput.input('/home/pedro/myrobotLab/myrobotLab-1.0.1461/develop/ProgramAB/bots/zorba/aiml/aknowledge.aiml', inplace=1):
sys.stdout.write(line.replace('</aiml>', ''))
#######add the new sentence to aiml############
text_file = open("/home/pedro/myrobotLab/myrobotLab-1.0.1461/develop/ProgramAB/bots/zorba/aiml/aknowledge.aiml", "a")
TotalAmount = '<category><pattern>'+str(category)+'</pattern><template>'+str(category)+' '+str(pattern)+'</template></category>\n</aiml>'
text_file.write("%s" % TotalAmount)
text_file.close()
##### Clean if repeated in the set ############
#for line in fileinput.input('/home/pedro/myrobotLab/myrobotLab-1.0.1461/develop/ProgramAB/bots/zorba/sets/knowledge.txt', inplace=1):
#sys.stdout.write(line.replace(str(category), ''))
#######add the new sentence to knowledge.txt############
text_file = open("/home/pedro/myrobotLab/myrobotLab-1.0.1461/develop/ProgramAB/bots/zorba/sets/knowledge.txt", "a")
TotalAmount = str(category)
text_file.write("%s\n" % TotalAmount)
text_file.close() | apache-2.0 | -2,408,562,890,136,640,000 | -4,268,474,542,696,312,000 | 30.761905 | 139 | 0.657914 | false |
mims2707/bite-project | deps/gdata-python-client/samples/apps/marketplace_sample/atom/core.py | 80 | 20759 | #!/usr/bin/env python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = '[email protected] (Jeff Scudder)'
import inspect
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
try:
from xml.dom.minidom import parseString as xmlString
except ImportError:
xmlString = None
STRING_ENCODING = 'utf-8'
class XmlElement(object):
"""Represents an element node in an XML document.
The text member is a UTF-8 encoded str or unicode.
"""
_qname = None
_other_elements = None
_other_attributes = None
# The rule set contains mappings for XML qnames to child members and the
# appropriate member classes.
_rule_set = None
_members = None
text = None
def __init__(self, text=None, *args, **kwargs):
if ('_members' not in self.__class__.__dict__
or self.__class__._members is None):
self.__class__._members = tuple(self.__class__._list_xml_members())
for member_name, member_type in self.__class__._members:
if member_name in kwargs:
setattr(self, member_name, kwargs[member_name])
else:
if isinstance(member_type, list):
setattr(self, member_name, [])
else:
setattr(self, member_name, None)
self._other_elements = []
self._other_attributes = {}
if text is not None:
self.text = text
def _list_xml_members(cls):
"""Generator listing all members which are XML elements or attributes.
The following members would be considered XML members:
foo = 'abc' - indicates an XML attribute with the qname abc
foo = SomeElement - indicates an XML child element
foo = [AnElement] - indicates a repeating XML child element, each instance
will be stored in a list in this member
foo = ('att1', '{http://example.com/namespace}att2') - indicates an XML
attribute which has different parsing rules in different versions of
the protocol. Version 1 of the XML parsing rules will look for an
attribute with the qname 'att1' but verion 2 of the parsing rules will
look for a namespaced attribute with the local name of 'att2' and an
XML namespace of 'http://example.com/namespace'.
"""
members = []
for pair in inspect.getmembers(cls):
if not pair[0].startswith('_') and pair[0] != 'text':
member_type = pair[1]
if (isinstance(member_type, tuple) or isinstance(member_type, list)
or isinstance(member_type, (str, unicode))
or (inspect.isclass(member_type)
and issubclass(member_type, XmlElement))):
members.append(pair)
return members
_list_xml_members = classmethod(_list_xml_members)
def _get_rules(cls, version):
"""Initializes the _rule_set for the class which is used when parsing XML.
This method is used internally for parsing and generating XML for an
XmlElement. It is not recommended that you call this method directly.
Returns:
A tuple containing the XML parsing rules for the appropriate version.
The tuple looks like:
(qname, {sub_element_qname: (member_name, member_class, repeating), ..},
{attribute_qname: member_name})
To give a couple of concrete example, the atom.data.Control _get_rules
with version of 2 will return:
('{http://www.w3.org/2007/app}control',
{'{http://www.w3.org/2007/app}draft': ('draft',
<class 'atom.data.Draft'>,
False)},
{})
Calling _get_rules with version 1 on gdata.data.FeedLink will produce:
('{http://schemas.google.com/g/2005}feedLink',
{'{http://www.w3.org/2005/Atom}feed': ('feed',
<class 'gdata.data.GDFeed'>,
False)},
{'href': 'href', 'readOnly': 'read_only', 'countHint': 'count_hint',
'rel': 'rel'})
"""
# Initialize the _rule_set to make sure there is a slot available to store
# the parsing rules for this version of the XML schema.
# Look for rule set in the class __dict__ proxy so that only the
# _rule_set for this class will be found. By using the dict proxy
# we avoid finding rule_sets defined in superclasses.
# The four lines below provide support for any number of versions, but it
# runs a bit slower then hard coding slots for two versions, so I'm using
# the below two lines.
#if '_rule_set' not in cls.__dict__ or cls._rule_set is None:
# cls._rule_set = []
#while len(cls.__dict__['_rule_set']) < version:
# cls._rule_set.append(None)
# If there is no rule set cache in the class, provide slots for two XML
# versions. If and when there is a version 3, this list will need to be
# expanded.
if '_rule_set' not in cls.__dict__ or cls._rule_set is None:
cls._rule_set = [None, None]
# If a version higher than 2 is requested, fall back to version 2 because
# 2 is currently the highest supported version.
if version > 2:
return cls._get_rules(2)
# Check the dict proxy for the rule set to avoid finding any rule sets
# which belong to the superclass. We only want rule sets for this class.
if cls._rule_set[version-1] is None:
# The rule set for each version consists of the qname for this element
# ('{namespace}tag'), a dictionary (elements) for looking up the
# corresponding class member when given a child element's qname, and a
# dictionary (attributes) for looking up the corresponding class member
# when given an XML attribute's qname.
elements = {}
attributes = {}
if ('_members' not in cls.__dict__ or cls._members is None):
cls._members = tuple(cls._list_xml_members())
for member_name, target in cls._members:
if isinstance(target, list):
# This member points to a repeating element.
elements[_get_qname(target[0], version)] = (member_name, target[0],
True)
elif isinstance(target, tuple):
# This member points to a versioned XML attribute.
if version <= len(target):
attributes[target[version-1]] = member_name
else:
attributes[target[-1]] = member_name
elif isinstance(target, (str, unicode)):
# This member points to an XML attribute.
attributes[target] = member_name
elif issubclass(target, XmlElement):
# This member points to a single occurance element.
elements[_get_qname(target, version)] = (member_name, target, False)
version_rules = (_get_qname(cls, version), elements, attributes)
cls._rule_set[version-1] = version_rules
return version_rules
else:
return cls._rule_set[version-1]
_get_rules = classmethod(_get_rules)
def get_elements(self, tag=None, namespace=None, version=1):
"""Find all sub elements which match the tag and namespace.
To find all elements in this object, call get_elements with the tag and
namespace both set to None (the default). This method searches through
the object's members and the elements stored in _other_elements which
did not match any of the XML parsing rules for this class.
Args:
tag: str
namespace: str
version: int Specifies the version of the XML rules to be used when
searching for matching elements.
Returns:
A list of the matching XmlElements.
"""
matches = []
ignored1, elements, ignored2 = self.__class__._get_rules(version)
if elements:
for qname, element_def in elements.iteritems():
member = getattr(self, element_def[0])
if member:
if _qname_matches(tag, namespace, qname):
if element_def[2]:
# If this is a repeating element, copy all instances into the
# result list.
matches.extend(member)
else:
matches.append(member)
for element in self._other_elements:
if _qname_matches(tag, namespace, element._qname):
matches.append(element)
return matches
GetElements = get_elements
# FindExtensions and FindChildren are provided for backwards compatibility
# to the atom.AtomBase class.
# However, FindExtensions may return more results than the v1 atom.AtomBase
# method does, because get_elements searches both the expected children
# and the unexpected "other elements". The old AtomBase.FindExtensions
# method searched only "other elements" AKA extension_elements.
FindExtensions = get_elements
FindChildren = get_elements
def get_attributes(self, tag=None, namespace=None, version=1):
"""Find all attributes which match the tag and namespace.
To find all attributes in this object, call get_attributes with the tag
and namespace both set to None (the default). This method searches
through the object's members and the attributes stored in
_other_attributes which did not fit any of the XML parsing rules for this
class.
Args:
tag: str
namespace: str
version: int Specifies the version of the XML rules to be used when
searching for matching attributes.
Returns:
A list of XmlAttribute objects for the matching attributes.
"""
matches = []
ignored1, ignored2, attributes = self.__class__._get_rules(version)
if attributes:
for qname, attribute_def in attributes.iteritems():
if isinstance(attribute_def, (list, tuple)):
attribute_def = attribute_def[0]
member = getattr(self, attribute_def)
# TODO: ensure this hasn't broken existing behavior.
#member = getattr(self, attribute_def[0])
if member:
if _qname_matches(tag, namespace, qname):
matches.append(XmlAttribute(qname, member))
for qname, value in self._other_attributes.iteritems():
if _qname_matches(tag, namespace, qname):
matches.append(XmlAttribute(qname, value))
return matches
GetAttributes = get_attributes
def _harvest_tree(self, tree, version=1):
"""Populates object members from the data in the tree Element."""
qname, elements, attributes = self.__class__._get_rules(version)
for element in tree:
if elements and element.tag in elements:
definition = elements[element.tag]
# If this is a repeating element, make sure the member is set to a
# list.
if definition[2]:
if getattr(self, definition[0]) is None:
setattr(self, definition[0], [])
getattr(self, definition[0]).append(_xml_element_from_tree(element,
definition[1], version))
else:
setattr(self, definition[0], _xml_element_from_tree(element,
definition[1], version))
else:
self._other_elements.append(_xml_element_from_tree(element, XmlElement,
version))
for attrib, value in tree.attrib.iteritems():
if attributes and attrib in attributes:
setattr(self, attributes[attrib], value)
else:
self._other_attributes[attrib] = value
if tree.text:
self.text = tree.text
def _to_tree(self, version=1, encoding=None):
new_tree = ElementTree.Element(_get_qname(self, version))
self._attach_members(new_tree, version, encoding)
return new_tree
def _attach_members(self, tree, version=1, encoding=None):
"""Convert members to XML elements/attributes and add them to the tree.
Args:
tree: An ElementTree.Element which will be modified. The members of
this object will be added as child elements or attributes
according to the rules described in _expected_elements and
_expected_attributes. The elements and attributes stored in
other_attributes and other_elements are also added a children
of this tree.
version: int Ingnored in this method but used by VersionedElement.
encoding: str (optional)
"""
qname, elements, attributes = self.__class__._get_rules(version)
encoding = encoding or STRING_ENCODING
# Add the expected elements and attributes to the tree.
if elements:
for tag, element_def in elements.iteritems():
member = getattr(self, element_def[0])
# If this is a repeating element and there are members in the list.
if member and element_def[2]:
for instance in member:
instance._become_child(tree, version)
elif member:
member._become_child(tree, version)
if attributes:
for attribute_tag, member_name in attributes.iteritems():
value = getattr(self, member_name)
if value:
tree.attrib[attribute_tag] = value
# Add the unexpected (other) elements and attributes to the tree.
for element in self._other_elements:
element._become_child(tree, version)
for key, value in self._other_attributes.iteritems():
# I'm not sure if unicode can be used in the attribute name, so for now
# we assume the encoding is correct for the attribute name.
if not isinstance(value, unicode):
value = value.decode(encoding)
tree.attrib[key] = value
if self.text:
if isinstance(self.text, unicode):
tree.text = self.text
else:
tree.text = self.text.decode(encoding)
def to_string(self, version=1, encoding=None, pretty_print=None):
"""Converts this object to XML."""
tree_string = ElementTree.tostring(self._to_tree(version, encoding))
if pretty_print and xmlString is not None:
return xmlString(tree_string).toprettyxml()
return tree_string
ToString = to_string
def __str__(self):
return self.to_string()
def _become_child(self, tree, version=1):
"""Adds a child element to tree with the XML data in self."""
new_child = ElementTree.Element('')
tree.append(new_child)
new_child.tag = _get_qname(self, version)
self._attach_members(new_child, version)
def __get_extension_elements(self):
return self._other_elements
def __set_extension_elements(self, elements):
self._other_elements = elements
extension_elements = property(__get_extension_elements,
__set_extension_elements,
"""Provides backwards compatibility for v1 atom.AtomBase classes.""")
def __get_extension_attributes(self):
return self._other_attributes
def __set_extension_attributes(self, attributes):
self._other_attributes = attributes
extension_attributes = property(__get_extension_attributes,
__set_extension_attributes,
"""Provides backwards compatibility for v1 atom.AtomBase classes.""")
def _get_tag(self, version=1):
qname = _get_qname(self, version)
if qname:
return qname[qname.find('}')+1:]
return None
def _get_namespace(self, version=1):
qname = _get_qname(self, version)
if qname.startswith('{'):
return qname[1:qname.find('}')]
else:
return None
def _set_tag(self, tag):
if isinstance(self._qname, tuple):
self._qname = self._qname.copy()
if self._qname[0].startswith('{'):
self._qname[0] = '{%s}%s' % (self._get_namespace(1), tag)
else:
self._qname[0] = tag
else:
if self._qname is not None and self._qname.startswith('{'):
self._qname = '{%s}%s' % (self._get_namespace(), tag)
else:
self._qname = tag
def _set_namespace(self, namespace):
tag = self._get_tag(1)
if tag is None:
tag = ''
if isinstance(self._qname, tuple):
self._qname = self._qname.copy()
if namespace:
self._qname[0] = '{%s}%s' % (namespace, tag)
else:
self._qname[0] = tag
else:
if namespace:
self._qname = '{%s}%s' % (namespace, tag)
else:
self._qname = tag
tag = property(_get_tag, _set_tag,
"""Provides backwards compatibility for v1 atom.AtomBase classes.""")
namespace = property(_get_namespace, _set_namespace,
"""Provides backwards compatibility for v1 atom.AtomBase classes.""")
# Provided for backwards compatibility to atom.ExtensionElement
children = extension_elements
attributes = extension_attributes
def _get_qname(element, version):
if isinstance(element._qname, tuple):
if version <= len(element._qname):
return element._qname[version-1]
else:
return element._qname[-1]
else:
return element._qname
def _qname_matches(tag, namespace, qname):
"""Logic determines if a QName matches the desired local tag and namespace.
This is used in XmlElement.get_elements and XmlElement.get_attributes to
find matches in the element's members (among all expected-and-unexpected
elements-and-attributes).
Args:
expected_tag: string
expected_namespace: string
qname: string in the form '{xml_namespace}localtag' or 'tag' if there is
no namespace.
Returns:
boolean True if the member's tag and namespace fit the expected tag and
namespace.
"""
# If there is no expected namespace or tag, then everything will match.
if qname is None:
member_tag = None
member_namespace = None
else:
if qname.startswith('{'):
member_namespace = qname[1:qname.index('}')]
member_tag = qname[qname.index('}') + 1:]
else:
member_namespace = None
member_tag = qname
return ((tag is None and namespace is None)
# If there is a tag, but no namespace, see if the local tag matches.
or (namespace is None and member_tag == tag)
# There was no tag, but there was a namespace so see if the namespaces
# match.
or (tag is None and member_namespace == namespace)
# There was no tag, and the desired elements have no namespace, so check
# to see that the member's namespace is None.
or (tag is None and namespace == ''
and member_namespace is None)
# The tag and the namespace both match.
or (tag == member_tag
and namespace == member_namespace)
# The tag matches, and the expected namespace is the empty namespace,
# check to make sure the member's namespace is None.
or (tag == member_tag and namespace == ''
and member_namespace is None))
def parse(xml_string, target_class=None, version=1, encoding=None):
"""Parses the XML string according to the rules for the target_class.
Args:
xml_string: str or unicode
target_class: XmlElement or a subclass. If None is specified, the
XmlElement class is used.
version: int (optional) The version of the schema which should be used when
converting the XML into an object. The default is 1.
encoding: str (optional) The character encoding of the bytes in the
xml_string. Default is 'UTF-8'.
"""
if target_class is None:
target_class = XmlElement
if isinstance(xml_string, unicode):
if encoding is None:
xml_string = xml_string.encode(STRING_ENCODING)
else:
xml_string = xml_string.encode(encoding)
tree = ElementTree.fromstring(xml_string)
return _xml_element_from_tree(tree, target_class, version)
Parse = parse
xml_element_from_string = parse
XmlElementFromString = xml_element_from_string
def _xml_element_from_tree(tree, target_class, version=1):
if target_class._qname is None:
instance = target_class()
instance._qname = tree.tag
instance._harvest_tree(tree, version)
return instance
# TODO handle the namespace-only case
# Namespace only will be used with Google Spreadsheets rows and
# Google Base item attributes.
elif tree.tag == _get_qname(target_class, version):
instance = target_class()
instance._harvest_tree(tree, version)
return instance
return None
class XmlAttribute(object):
def __init__(self, qname, value):
self._qname = qname
self.value = value
| apache-2.0 | 5,996,143,796,608,141,000 | 5,064,608,371,999,062,000 | 36.743636 | 79 | 0.652873 | false |
tkem/mopidy-local-sqlite | mopidy_local_sqlite/library.py | 2 | 9430 | from __future__ import unicode_literals
import hashlib
import logging
import operator
import os
import os.path
import sqlite3
import sys
from mopidy import local
from mopidy.exceptions import ExtensionError
from mopidy.local import translator
from mopidy.models import Ref, SearchResult
import uritools
from . import Extension, schema
logger = logging.getLogger(__name__)
class SQLiteLibrary(local.Library):
name = 'sqlite'
def __init__(self, config):
self._config = ext_config = config[Extension.ext_name]
self._data_dir = Extension.get_or_create_data_dir(config)
try:
self._media_dir = config['local']['media_dir']
except KeyError:
raise ExtensionError('Mopidy-Local not enabled')
self._directories = []
for line in ext_config['directories']:
name, uri = line.rsplit(None, 1)
ref = Ref.directory(uri=uri, name=name)
self._directories.append(ref)
self._dbpath = os.path.join(self._data_dir, b'library.db')
self._connection = None
def load(self):
with self._connect() as connection:
version = schema.load(connection)
logger.debug('Using SQLite database schema v%s', version)
return schema.count_tracks(connection)
def lookup(self, uri):
if uri.startswith('local:album'):
return list(schema.lookup(self._connect(), Ref.ALBUM, uri))
elif uri.startswith('local:artist'):
return list(schema.lookup(self._connect(), Ref.ARTIST, uri))
elif uri.startswith('local:track'):
return list(schema.lookup(self._connect(), Ref.TRACK, uri))
else:
logger.error('Invalid lookup URI %s', uri)
return []
def browse(self, uri):
try:
if uri == self.ROOT_DIRECTORY_URI:
return self._directories
elif uri.startswith('local:directory'):
return self._browse_directory(uri)
elif uri.startswith('local:artist'):
return self._browse_artist(uri)
elif uri.startswith('local:album'):
return self._browse_album(uri)
else:
raise ValueError('Invalid browse URI')
except Exception as e:
logger.error('Error browsing %s: %s', uri, e)
return []
def search(self, query=None, limit=100, offset=0, uris=None, exact=False):
q = []
for field, values in (query.items() if query else []):
q.extend((field, value) for value in values)
filters = [f for uri in uris or [] for f in self._filters(uri) if f]
with self._connect() as c:
tracks = schema.search_tracks(c, q, limit, offset, exact, filters)
uri = uritools.uricompose('local', path='search', query=q)
return SearchResult(uri=uri, tracks=tracks)
def get_distinct(self, field, query=None):
q = []
for key, values in (query.items() if query else []):
q.extend((key, value) for value in values)
return set(schema.list_distinct(self._connect(), field, q))
def begin(self):
return schema.tracks(self._connect())
def add(self, track):
try:
track = self._validate_track(track)
schema.insert_track(self._connect(), track)
except Exception as e:
logger.warn('Skipped %s: %s', track.uri, e)
def remove(self, uri):
schema.delete_track(self._connect(), uri)
def flush(self):
if not self._connection:
return False
self._connection.commit()
return True
def close(self):
schema.cleanup(self._connection)
self._connection.commit()
self._connection.close()
self._connection = None
def clear(self):
try:
schema.clear(self._connect())
return True
except sqlite3.Error as e:
logger.error('Error clearing SQLite database: %s', e)
return False
def _connect(self):
if not self._connection:
self._connection = sqlite3.connect(
self._dbpath,
factory=schema.Connection,
timeout=self._config['timeout'],
check_same_thread=False,
)
return self._connection
def _browse_album(self, uri, order=('disc_no', 'track_no', 'name')):
return schema.browse(self._connect(), Ref.TRACK, order, album=uri)
def _browse_artist(self, uri, order=('type', 'name COLLATE NOCASE')):
with self._connect() as c:
albums = schema.browse(c, Ref.ALBUM, order, albumartist=uri)
refs = schema.browse(c, order=order, artist=uri)
album_uris, tracks = {ref.uri for ref in albums}, []
for ref in refs:
if ref.type == Ref.ALBUM and ref.uri not in album_uris:
albums.append(Ref.directory(
uri=uritools.uricompose('local', None, 'directory', dict(
type=Ref.TRACK, album=ref.uri, artist=uri
)),
name=ref.name
))
elif ref.type == Ref.TRACK:
tracks.append(ref)
else:
logger.debug('Skipped SQLite browse result %s', ref.uri)
albums.sort(key=operator.attrgetter('name'))
return albums + tracks
def _browse_directory(self, uri, order=('type', 'name COLLATE NOCASE')):
query = dict(uritools.urisplit(uri).getquerylist())
type = query.pop('type', None)
role = query.pop('role', None)
# TODO: handle these in schema (generically)?
if type == 'date':
format = query.get('format', '%Y-%m-%d')
return map(_dateref, schema.dates(self._connect(), format=format))
if type == 'genre':
return map(_genreref, schema.list_distinct(self._connect(), 'genre')) # noqa
# Fix #38: keep sort order of album tracks; this also applies
# to composers and performers
if type == Ref.TRACK and 'album' in query:
order = ('disc_no', 'track_no', 'name')
if type == Ref.ARTIST and self._config['use_artist_sortname']:
order = ('coalesce(sortname, name) COLLATE NOCASE',)
roles = role or ('artist', 'albumartist') # FIXME: re-think 'roles'...
refs = []
for ref in schema.browse(self._connect(), type, order, role=roles, **query): # noqa
if ref.type == Ref.TRACK or (not query and not role):
refs.append(ref)
elif ref.type == Ref.ALBUM:
refs.append(Ref.directory(uri=uritools.uricompose(
'local', None, 'directory', dict(query, type=Ref.TRACK, album=ref.uri) # noqa
), name=ref.name))
elif ref.type == Ref.ARTIST:
refs.append(Ref.directory(uri=uritools.uricompose(
'local', None, 'directory', dict(query, **{role: ref.uri})
), name=ref.name))
else:
logger.warn('Unexpected SQLite browse result: %r', ref)
return refs
def _validate_artist(self, artist):
if not artist.name:
raise ValueError('Empty artist name')
uri = artist.uri or self._model_uri('artist', artist)
return artist.copy(uri=uri)
def _validate_album(self, album):
if not album.name:
raise ValueError('Empty album name')
uri = album.uri or self._model_uri('album', album)
artists = map(self._validate_artist, album.artists)
return album.copy(uri=uri, artists=artists)
def _validate_track(self, track, encoding=sys.getfilesystemencoding()):
if not track.uri:
raise ValueError('Empty track URI')
if track.name:
name = track.name
else:
path = translator.local_track_uri_to_path(track.uri, b'')
name = os.path.basename(path).decode(encoding, errors='replace')
if track.album and track.album.name:
album = self._validate_album(track.album)
else:
album = None
return track.copy(
name=name,
album=album,
artists=map(self._validate_artist, track.artists),
composers=map(self._validate_artist, track.composers),
performers=map(self._validate_artist, track.performers)
)
def _filters(self, uri):
if uri.startswith('local:directory'):
return [dict(uritools.urisplit(uri).getquerylist())]
elif uri.startswith('local:artist'):
return [{'artist': uri}, {'albumartist': uri}]
elif uri.startswith('local:album'):
return [{'album': uri}]
else:
return []
def _model_uri(self, type, model):
if model.musicbrainz_id and self._config['use_%s_mbid_uri' % type]:
return 'local:%s:mbid:%s' % (type, model.musicbrainz_id)
digest = hashlib.md5(str(model)).hexdigest()
return 'local:%s:md5:%s' % (type, digest)
def _dateref(date):
return Ref.directory(
uri=uritools.uricompose('local', None, 'directory', {'date': date}),
name=date
)
def _genreref(genre):
return Ref.directory(
uri=uritools.uricompose('local', None, 'directory', {'genre': genre}),
name=genre
)
| apache-2.0 | -1,907,408,456,713,896,200 | -7,132,572,104,325,740,000 | 36.125984 | 98 | 0.573701 | false |
ynkjm/ryu | ryu/contrib/ncclient/transport/errors.py | 77 | 1293 | # Copyright 2009 Shikhar Bhushan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ncclient import NCClientError
class TransportError(NCClientError):
pass
class AuthenticationError(TransportError):
pass
class SessionCloseError(TransportError):
def __init__(self, in_buf, out_buf=None):
msg = 'Unexpected session close'
if in_buf:
msg += '\nIN_BUFFER: `%s`' % in_buf
if out_buf:
msg += ' OUT_BUFFER: `%s`' % out_buf
SSHError.__init__(self, msg)
class SSHError(TransportError):
pass
class SSHUnknownHostError(SSHError):
def __init__(self, host, fingerprint):
SSHError.__init__(self, 'Unknown host key [%s] for [%s]' % (fingerprint, host))
self.host = host
self.fingerprint = fingerprint
| apache-2.0 | 1,647,183,705,078,776,800 | -2,915,799,838,956,216,300 | 30.536585 | 87 | 0.686002 | false |
amerlyq/piony | piony/config/argparser.py | 1 | 2747 | from argparse import ArgumentParser, RawDescriptionHelpFormatter
import piony
from piony.common.exceptions import InputError
class ArgParser(object):
def __init__(self):
self.ps = ArgumentParser(prog=piony.__appname__,
formatter_class=RawDescriptionHelpFormatter,
description=piony.__doc__, epilog="Enjoy!!!")
self._setup_options()
def parse(self, argv):
if not argv:
argv = []
elif isinstance(argv, str):
argv = argv.split()
elif not isinstance(argv, list):
raise InputError("Wrong argv type: {}".format(type(argv)))
return self.ps.parse_args(argv)
def apply(self, args):
from operator import xor
res = (False, False)
dbg = {'a': (True, True), 'v': (True, False), 'k': (False, True)}
if args.verbose:
for entry in args.verbose:
res = map(xor, res, dbg[entry])
piony.G_DEBUG_VISUALS, piony.G_DEBUG_ACTIONS = res
def _setup_options(self):
## Configuration
farg = self.ps.add_argument
farg('buds', metavar='bud', nargs='*', type=str, default=None,
help="Setup profile layout in json directly on cmdline. "
"Can be specified several times -- one for each slice. "
"Or use pathes to files with slices inside.")
farg('-v', '--version', action='version', default=None,
version="%(prog)s {0}".format(piony.__version__),
help="Version of program.")
gr_window = self.ps.add_argument_group('Window')
warg = gr_window.add_argument
warg('-c', '--config', default=None,
help="Config file with default settings.")
warg('-p', '--print', default=None,
help="Toggle action print/execute to use as frontend only.")
## Appearance
warg('-s', '--size', type=int, default=None,
help="Sets window size WxH=NxN to derive all rings sizes from it.")
warg('-F', '--fullscreen', action='store_true', default=None,
help="Overlay fullscreen/local")
warg('-T', '--no-tooltip', action='store_true', default=None,
help="Disable pop-up items, for those who is irritated.")
## Process
gr_general = self.ps.add_argument_group('General')
garg = gr_general.add_argument
garg('-k', '--kill', action='store_true', default=None,
help="Kill running daemonized program.")
garg('-V', '--verbose', nargs='?', type=str,
const='a', choices=['a', 'v', 'k'], default=None,
help="Verbose (debug): [a]ll (default), [v]isuals, [k]eys.")
| gpl-3.0 | 1,459,265,360,146,880,300 | 6,385,395,213,095,301,000 | 41.261538 | 80 | 0.560612 | false |
gautam1858/tensorflow | tensorflow/python/training/adagrad_test.py | 22 | 15078 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for aggregate operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
class AdagradOptimizerTest(test.TestCase):
def doTestBasic(self,
use_locking=False,
use_resource=False,
use_callable_params=False):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
if use_resource:
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
else:
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
learning_rate = lambda: 3.0
if not use_callable_params:
learning_rate = learning_rate()
ada_opt = adagrad.AdagradOptimizer(
learning_rate, initial_accumulator_value=0.1, use_locking=use_locking)
if not context.executing_eagerly():
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([3.0, 4.0], v1_val)
# Run 3 steps of adagrad
for _ in range(3):
if not context.executing_eagerly():
self.evaluate(ada_update)
else:
ada_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
# Validate updated params
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]), v0_val)
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]), v1_val)
def testBasic(self):
self.doTestBasic(use_locking=False)
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testBasicResource(self):
self.doTestBasic(use_locking=False, use_resource=True)
def testBasicCallableParams(self):
with context.eager_mode():
self.doTestBasic(
use_locking=False, use_resource=True, use_callable_params=True)
def testBasicLocked(self):
self.doTestBasic(use_locking=True)
@test_util.run_deprecated_v1
def testMinimizeSparseResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable(
[[1.0, 2.0], [3.0, 4.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
loss = pred * pred
sgd_op = adagrad.AdagradOptimizer(1.0).minimize(loss)
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0], [3.0, 4.0]],
self.evaluate(var0))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType([[0, 1], [3, 4]],
self.evaluate(var0),
atol=0.01)
@test_util.run_deprecated_v1
def testTensorLearningRate(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
ada_opt = adagrad.AdagradOptimizer(
constant_op.constant(3.0), initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of adagrad
for _ in range(3):
ada_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]),
self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]),
self.evaluate(var1))
@test_util.run_deprecated_v1
def testSparseBasic(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([[1.0], [2.0]], dtype=dtype)
var1 = variables.Variable([[3.0], [4.0]], dtype=dtype)
grads0 = ops.IndexedSlices(
constant_op.constant(
[0.1], shape=[1, 1], dtype=dtype),
constant_op.constant([0]),
constant_op.constant([2, 1]))
grads1 = ops.IndexedSlices(
constant_op.constant(
[0.01], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
ada_opt = adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([[1.0], [2.0]], self.evaluate(var0))
self.assertAllClose([[3.0], [4.0]], self.evaluate(var1))
# Run 3 step of sgd
for _ in range(3):
ada_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([[-1.6026098728179932], [2.0]]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([[3.0], [3.715679168701172]]), self.evaluate(var1))
@test_util.run_deprecated_v1
def testSparseRepeatedIndices(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
repeated_index_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
aggregated_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant(
[0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]),
constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(
constant_op.constant(
[0.2], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
repeated_update = adagrad.AdagradOptimizer(3.0).apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update = adagrad.AdagradOptimizer(3.0).apply_gradients(
[(grad_aggregated, aggregated_update_var)])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(aggregated_update_var.eval(),
self.evaluate(repeated_index_update_var))
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var.eval(),
self.evaluate(repeated_index_update_var))
@test_util.run_deprecated_v1
def testSparseRepeatedIndicesResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var_repeated = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtype)
loss_repeated = math_ops.reduce_sum(
embedding_ops.embedding_lookup(var_repeated, [0, 0]))
var_aggregated = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtype)
loss_aggregated = 2 * math_ops.reduce_sum(
embedding_ops.embedding_lookup(var_aggregated, [0]))
update_op_repeated = adagrad.AdagradOptimizer(
2.0).minimize(loss_repeated)
update_op_aggregated = adagrad.AdagradOptimizer(
2.0).minimize(loss_aggregated)
self.evaluate(variables.global_variables_initializer())
self.assertAllCloseAccordingToType(
self.evaluate(var_repeated), self.evaluate(var_aggregated))
for _ in range(3):
update_op_repeated.run()
update_op_aggregated.run()
self.assertAllCloseAccordingToType(
self.evaluate(var_repeated), self.evaluate(var_aggregated))
@test_util.run_deprecated_v1
def testSparseStability(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
shape = [1, 6]
var0 = variables.Variable(
[[
0.00872496, -0.106952, 0.110467, 0.226505, -0.0147257,
-0.0105945
]],
dtype=dtype)
grads0 = ops.IndexedSlices(
constant_op.constant(
[[
-5.91278e-05, 5.31673e-05, -2.5779e-06, 4.29153e-05,
-8.4877e-05, -9.48906e-05
]],
shape=shape,
dtype=dtype),
constant_op.constant([0]),
constant_op.constant(shape))
ada_opt = adagrad.AdagradOptimizer(1.0, initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(zip([grads0], [var0]))
self.assertEqual(["accumulator"], ada_opt.get_slot_names())
slot0 = ada_opt.get_slot(var0, "accumulator")
init = variables.global_variables_initializer()
for _ in range(100):
init.run()
ada_update.run()
self.assertAllCloseAccordingToType(
np.array([[0.1, 0.1, 0.1, 0.1, 0.1, 0.1]]), self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([[
0.00891194, -0.10712013, 0.11047515, 0.22636929, -0.0144573,
-0.01029443
]]), self.evaluate(var0))
@test_util.run_deprecated_v1
def testSharing(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
ada_opt = adagrad.AdagradOptimizer(3.0)
# Apply the optimizer twice. Both applications will use
# the same accums.
ada_update1 = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
ada_update2 = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.assertEqual(["accumulator"], ada_opt.get_slot_names())
slot0 = ada_opt.get_slot(var0, "accumulator")
self.assertEquals(slot0.get_shape(), var0.get_shape())
slot1 = ada_opt.get_slot(var1, "accumulator")
self.assertEquals(slot1.get_shape(), var1.get_shape())
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values.
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Mix the first and the second adagrad for 3 steps.
ada_update1.run()
ada_update2.run()
ada_update1.run()
# Validate updated params (the same as with only 1 Adagrad).
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]),
self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]),
self.evaluate(var1))
@test_util.run_v1_only("b/120545219")
def testDynamicShapeVariable_Ok(self):
with self.cached_session():
v = variable_scope.get_variable("v", initializer=constant_op.constant(1.),
validate_shape=False)
self.assertFalse(v.shape.is_fully_defined())
# Creating optimizer should cause no exception.
adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1)
@test_util.run_v1_only("b/120545219")
def testDynamicShapeVariableWithCallableInit(self):
var0 = variable_scope.get_variable("var0",
initializer=constant_op.constant(1.),
validate_shape=False)
self.assertFalse(var0.shape.is_fully_defined())
grads0 = constant_op.constant(0.1, dtype=dtypes.float32)
learning_rate = lambda: 3.0
ada_opt = adagrad.AdagradOptimizer(
learning_rate, initial_accumulator_value=0.1, use_locking=True)
if not context.executing_eagerly():
ada_update = ada_opt.apply_gradients(
zip([grads0], [var0]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
v0_val = self.evaluate([var0])
self.assertAllClose([1.0], v0_val)
# Run 3 steps of adagrad
for _ in range(3):
if not context.executing_eagerly():
self.evaluate(ada_update)
else:
ada_opt.apply_gradients(zip([grads0], [var0]))
# Validate updated params
v0_val = self.evaluate([var0])
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932]), v0_val)
if __name__ == "__main__":
test.main()
| apache-2.0 | -2,699,127,804,647,341,000 | -2,373,443,505,609,067,500 | 41.59322 | 80 | 0.620573 | false |
Stan1989/volatility | volatility/plugins/gui/vtypes/xp.py | 50 | 16283 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
# Copyright (C) 2010,2011,2012 Michael Hale Ligh <[email protected]>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
import volatility.obj as obj
import volatility.plugins.gui.constants as consts
class XP2003x86BaseVTypes(obj.ProfileModification):
"""Applies to everything x86 before Windows 7"""
def check(self, profile):
m = profile.metadata
version = (m.get('major', 0), m.get('minor', 0))
return (m.get('os', None) == 'windows' and
version < (6, 1) and
m.get('memory_model', '32bit') == '32bit')
def modification(self, profile):
profile.vtypes.update({
'tagWINDOWSTATION' : [ 0x5C, {
'dwSessionId' : [ 0x0, ['unsigned long']],
'rpwinstaNext' : [ 0x4, ['pointer', ['tagWINDOWSTATION']]],
'rpdeskList' : [ 0x8, ['pointer', ['tagDESKTOP']]],
'dwWSF_Flags' : [ 0x10, ['unsigned long']],
'ptiDrawingClipboard' : [ 0x1C, ['pointer', ['tagTHREADINFO']]],
'spwndClipOpen' : [ 0x20, ['pointer', ['tagWND']]],
'spwndClipViewer' : [ 0x24, ['pointer', ['tagWND']]],
'spwndClipOwner' : [ 0x28, ['pointer', ['tagWND']]],
'pClipBase' : [ 0x2C, ['pointer', ['array', lambda x : x.cNumClipFormats, ['tagCLIP']]]],
'cNumClipFormats' : [ 0x30, ['unsigned int']],
'iClipSerialNumber' : [ 0x34, ['unsigned int']],
'iClipSequenceNumber' : [ 0x38, ['unsigned int']],
#'spwndClipboardListener' : [ 0x3C, ['pointer', ['tagWND']]],
'pGlobalAtomTable' : [ 0x40, ['pointer', ['void']]],
}],
## This is defined in Windows 7
'tagCLIP' : [ 12, {
'fmt' : [ 0, ['Enumeration', dict(target = 'unsigned long', choices = consts.CLIPBOARD_FORMAT_ENUM)]],
'hData' : [ 4, ['unsigned int']],
'fGlobalHandle' : [ 8, ['unsigned int']],
}],
'tagDESKTOP' : [ 0x84, {
'dwSessionId' : [ 0x0, ['unsigned long']],
'pDeskInfo' : [ 0x4, ['pointer', ['tagDESKTOPINFO']]],
'rpdeskNext' : [ 0xc, ['pointer', ['tagDESKTOP']]],
'rpwinstaParent' : [ 0x10, ['pointer', ['tagWINDOWSTATION']]],
'hsectionDesktop' : [ 0x40, ['pointer', ['void']]],
'pheapDesktop' : [ 0x44, ['pointer', ['tagWIN32HEAP']]],
'PtiList' : [ 0x64, ['_LIST_ENTRY']],
}],
'tagTHREADINFO' : [ None, { # Same as Win32Thread
'pEThread' : [ 0x00, ['pointer', ['_ETHREAD']]],
'ppi' : [ 0x2C, ['pointer', ['tagPROCESSINFO']]],
'pq' : [ 0x30, ['pointer', ['tagQ']]],
'pDeskInfo' : [ 0x40, ['pointer', ['tagDESKTOPINFO']]],
'PtiLink' : [ 0xAC, ['_LIST_ENTRY']],
'fsHooks' : [ 0x98, ['unsigned long']],
'aphkStart' : [ 0xF4, ['array', 16, ['pointer', ['tagHOOK']]]],
}],
'tagQ' : [ None, {
'mlInput' : [ 0x00, ['tagMLIST']],
}],
'tagMLIST' : [ None, {
'pqmsgRead' : [ 0x00, ['pointer', ['tagQMSG']]],
'cMsgs' : [ 0x08, ['unsigned long']],
}],
'tagQMSG' : [ None, {
'pqmsgNext' : [ 0x00, ['pointer', ['tagQMSG']]],
'pqmsgPrev' : [ 0x04, ['pointer', ['tagQMSG']]],
'msg' : [ 0x08, ['tagMSG']],
}],
'tagMSG' : [ None, {
'hwnd' : [ 0x00, ['unsigned long']],
'message' : [ 0x04, ['unsigned long']],
'wParam' : [ 0x08, ['unsigned long']],
'lParam' : [ 0x0C, ['unsigned long']],
'time' : [ 0x10, ['unsigned long']],
'pt' : [ 0x14, ['tagPOINT']],
}],
'tagPOINT' : [ None, {
'x' : [ 0x00, ['long']],
'y' : [ 0x04, ['long']],
}],
'tagHOOK' : [ None, {
'head' : [ 0x0, ['_THRDESKHEAD']],
'phkNext' : [ 0x14, ['pointer', ['tagHOOK']]],
'iHook' : [ 0x18, ['long']],
'offPfn' : [ 0x1c, ['unsigned long']],
'flags': [ 0x20, ['Flags', {'bitmap': consts.HOOK_FLAGS}]],
'ihmod' : [ 0x24, ['long']],
'ptiHooked' : [ 0x28, ['pointer', ['tagTHREADINFO']]],
'rpdesk' : [ 0x2c, ['pointer', ['tagDESKTOP']]],
}],
'tagDESKTOPINFO' : [ None, {
'pvDesktopBase' : [ 0x0, ['pointer', ['void']]],
'pvDesktopLimit' : [ 0x4, ['pointer', ['void']]],
'spwnd' : [ 0x08, ['pointer', ['tagWND']]],
'fsHooks' : [ 0x0c, ['unsigned long']],
'aphkStart' : [ 0x10, ['array', 16, ['pointer', ['tagHOOK']]]],
}],
'tagSERVERINFO' : [ 0xffc, {
'cHandleEntries' : [ 8, ['unsigned long']],
'cbHandleTable' : [ 0x1bc, ['unsigned long']],
}],
'tagSHAREDINFO' : [ 0x11c, { # From Win7SP0x86
'psi' : [ 0x0, ['pointer', ['tagSERVERINFO']]],
'aheList' : [ 0x4, ['pointer', ['_HANDLEENTRY']]],
'ulSharedDelta' : [ 0xC, ['unsigned long']],
}],
'_HANDLEENTRY' : [ 0xc, { # From Win7SP0x86
'phead' : [ 0x0, ['pointer', ['_HEAD']]],
'pOwner' : [ 0x4, ['pointer', ['void']]],
'bType': [ 8, ['Enumeration', dict(target = 'unsigned char', choices = consts.HANDLE_TYPE_ENUM)]],
'bFlags' : [ 0x9, ['unsigned char']],
'wUniq' : [ 0xa, ['unsigned short']],
}],
'_HEAD' : [ 0x8, { # From Win7SP0x86
'h' : [ 0x0, ['pointer', ['void']]],
'cLockObj' : [ 0x4, ['unsigned long']],
}],
'tagPROCESSINFO' : [ None, {
'Process' : [ 0x0, ['pointer', ['_EPROCESS']]],
}],
'_THRDESKHEAD' : [ 0x14, {
'h' : [ 0x0, ['pointer', ['void']]],
'cLockObj' : [ 0x4, ['unsigned long']],
'pti' : [ 0x8, ['pointer', ['tagTHREADINFO']]],
'rpdesk' : [ 0xc, ['pointer', ['tagDESKTOP']]],
'pSelf' : [ 0x10, ['pointer', ['unsigned char']]],
}],
'tagCLS' : [ 0x5c, {
'pclsNext' : [ 0x0, ['pointer', ['tagCLS']]],
'atomClassName' : [ 0x4, ['unsigned short']],
'atomNVClassName' : [ 0x6, ['unsigned short']],
}],
'tagRECT' : [ 0x10, {
'left' : [ 0x0, ['long']],
'top' : [ 0x4, ['long']],
'right' : [ 0x8, ['long']],
'bottom' : [ 0xc, ['long']],
}],
'tagWND' : [ 0x90, {
'head' : [ 0x0, ['_THRDESKHEAD']],
'ExStyle' : [ 0x1c, ['unsigned long']],
'style' : [ 0x20, ['unsigned long']],
'hModule' : [ 0x24, ['pointer', ['void']]],
'spwndNext' : [ 0x2c, ['pointer', ['tagWND']]],
'spwndPrev' : [ 0x30, ['pointer', ['tagWND']]],
'spwndParent' : [ 0x34, ['pointer', ['tagWND']]],
'spwndChild' : [ 0x38, ['pointer', ['tagWND']]],
'spwndOwner' : [ 0x3c, ['pointer', ['tagWND']]],
'rcWindow' : [ 0x40, ['tagRECT']],
'rcClient' : [ 0x50, ['tagRECT']],
'lpfnWndProc' : [ 0x60, ['pointer', ['void']]],
'pcls' : [ 0x64, ['pointer', ['tagCLS']]],
'strName' : [ 0x80, ['_LARGE_UNICODE_STRING']],
'cbwndExtra' : [ 0x8C, ['long']],
'dwUserData' : [ 0x98, ['unsigned long']],
}],
'_LARGE_UNICODE_STRING' : [ 0xc, {
'Length' : [ 0x0, ['unsigned long']],
'MaximumLength' : [ 0x4, ['BitField', dict(start_bit = 0, end_bit = 31)]],
'bAnsi' : [ 0x4, ['BitField', dict(start_bit = 31, end_bit = 32)]],
'Buffer' : [ 0x8, ['pointer', ['unsigned short']]],
}],
})
class XP2003x64BaseVTypes(obj.ProfileModification):
"""Applies to Windows XP and 2003 x64"""
conditions = {'os': lambda x: x == 'windows',
'memory_model': lambda x: x == '64bit',
'major': lambda x: x < 6}
def modification(self, profile):
profile.vtypes.update({
'tagWINDOWSTATION' : [ 0x90, { # !poolfind Wind is 100h
'dwSessionId' : [ 0x0, ['unsigned long']],
'rpwinstaNext' : [ 0x8, ['pointer64', ['tagWINDOWSTATION']]], # FreeWindowStation
'rpdeskList' : [ 0x10, ['pointer64', ['tagDESKTOP']]],
'dwWSF_Flags' : [ 0x20, ['unsigned long']], # FreeWindowStation
'ptiDrawingClipboard' : [ 0x38, ['pointer64', ['tagTHREADINFO']]], # xxxDrawClipboard
'spwndClipOpen' : [ 0x40, ['pointer64', ['tagWND']]],
'spwndClipViewer' : [ 0x48, ['pointer64', ['tagWND']]],
'spwndClipOwner' : [ 0x50, ['pointer64', ['tagWND']]],
'pClipBase' : [ 0x58, ['pointer64', ['array', lambda x : x.cNumClipFormats, ['tagCLIP']]]], # InternalSetClipboardData
'cNumClipFormats' : [ 0x60, ['unsigned int']], # InternalSetClipboardData
'iClipSerialNumber' : [ 0x64, ['unsigned int']], # InternalSetClipboardData
'iClipSequenceNumber' : [ 0x68, ['unsigned int']], # InternalSetClipboardData
'pGlobalAtomTable' : [ 0x70, ['pointer64', ['void']]],
}],
# From Windows 7
'tagCLIP' : [ 0x18, {
'fmt' : [ 0x0, ['Enumeration', dict(target = 'unsigned long', choices = consts.CLIPBOARD_FORMAT_ENUM)]],
'hData' : [ 0x8, ['pointer64', ['void']]],
'fGlobalHandle' : [ 0x10, ['long']],
}],
'tagDESKTOP' : [ 0xd0, { # !poolfind Desk is 140h
'dwSessionId' : [ 0x0, ['unsigned long']],
'pDeskInfo' : [ 0x8, ['pointer64', ['tagDESKTOPINFO']]], # xxxCreateDesktop
'rpdeskNext' : [ 0x18, ['pointer64', ['tagDESKTOP']]], # ParseDesktop
'rpwinstaParent' : [ 0x20, ['pointer64', ['tagWINDOWSTATION']]],
'hsectionDesktop' : [ 0x70, ['pointer64', ['void']]], # MapDesktop
'pheapDesktop' : [ 0x78, ['pointer64', ['tagWIN32HEAP']]], # DesktopAlloc
'PtiList' : [ 0xa0, ['_LIST_ENTRY']], # zzzJournalAttach
}],
'tagTHREADINFO' : [ None, {
'pEThread' : [ 0x00, ['pointer', ['_ETHREAD']]],
'ppi' : [ 0x68, ['pointer64', ['tagPROCESSINFO']]], # xxxSetThreadDesktop
#'pq' : [ 0x30, ['pointer', ['tagQ']]],
'pDeskInfo' : [ 0x90, ['pointer64', ['tagDESKTOPINFO']]], # xxxDesktopThread
'PtiLink' : [ 0x160, ['_LIST_ENTRY']],
'fsHooks' : [ 0x138, ['unsigned long']], # xxxSetThreadDesktop, CheckWHFBits
'aphkStart' : [ 0x140, ['array', 16, ['pointer64', ['tagHOOK']]]],
}],
'tagDESKTOPINFO' : [ None, {
'pvDesktopBase' : [ 0x0, ['pointer64', ['void']]],
'pvDesktopLimit' : [ 0x8, ['pointer64', ['void']]],
'spwnd' : [ 0x10, ['pointer64', ['tagWND']]],
'fsHooks' : [ 0x18, ['unsigned long']], # CheckWHFBits
'aphkStart' : [ 0x20, ['array', 16, ['pointer64', ['tagHOOK']]]],
}],
'tagWND' : [ None, {
'head' : [ 0x0, ['_THRDESKHEAD']],
'ExStyle' : [ 0x30, ['unsigned long']], # xxxCreateWindowEx
'style' : [ 0x34, ['unsigned long']], # xxxCreateWindowEx
'spwndNext' : [ 0x48, ['pointer64', ['tagWND']]],
'spwndPrev' : [ 0x50, ['pointer64', ['tagWND']]],
'spwndParent' : [ 0x58, ['pointer64', ['tagWND']]],
'spwndChild' : [ 0x60, ['pointer64', ['tagWND']]],
'spwndOwner' : [ 0x68, ['pointer64', ['tagWND']]],
'rcWindow' : [ 0x70, ['tagRECT']],
'rcClient' : [ 0x80, ['tagRECT']],
'lpfnWndProc' : [ 0x90, ['pointer64', ['void']]],
'pcls' : [ 0x98, ['pointer64', ['tagCLS']]], # HMChangeOwnerThread
'strName' : [ 0xd0, ['_LARGE_UNICODE_STRING']],
}],
'tagRECT' : [ 0x10, {
'left' : [ 0x0, ['long']],
'top' : [ 0x4, ['long']],
'right' : [ 0x8, ['long']],
'bottom' : [ 0xc, ['long']],
}],
'tagCLS' : [ None, {
'pclsNext' : [ 0x0, ['pointer64', ['tagCLS']]],
'atomClassName' : [ 0x8, ['unsigned short']], # HMChangeOwnerThread
'atomNVClassName' : [ 0xA, ['unsigned short']],
}],
# From Win7 x64
'_LARGE_UNICODE_STRING' : [ 0x10, {
'Length' : [ 0x0, ['unsigned long']],
'MaximumLength' : [ 0x4, ['BitField', dict(start_bit = 0, end_bit = 31, native_type = 'unsigned long')]],
'bAnsi' : [ 0x4, ['BitField', dict(start_bit = 31, end_bit = 32, native_type = 'unsigned long')]],
'Buffer' : [ 0x8, ['pointer64', ['unsigned short']]],
}],
# From Win7 x64
'_THRDESKHEAD' : [ 0x28, {
'h' : [ 0x0, ['pointer64', ['void']]],
'cLockObj' : [ 0x8, ['unsigned long']],
'pti' : [ 0x10, ['pointer64', ['tagTHREADINFO']]],
'rpdesk' : [ 0x18, ['pointer64', ['tagDESKTOP']]],
'pSelf' : [ 0x20, ['pointer64', ['unsigned char']]],
}],
# From Win7 x64
'tagSHAREDINFO' : [ None, {
'psi' : [ 0x0, ['pointer64', ['tagSERVERINFO']]],
'aheList' : [ 0x8, ['pointer64', ['_HANDLEENTRY']]],
#'HeEntrySize' : [ 0x10, ['unsigned long']],
#'pDispInfo' : [ 0x18, ['pointer64', ['tagDISPLAYINFO']]],
'ulSharedDelta' : [ 0x18, ['unsigned long long']],
#'awmControl' : [ 0x28, ['array', 31, ['_WNDMSG']]],
#'DefWindowMsgs' : [ 0x218, ['_WNDMSG']],
#'DefWindowSpecMsgs' : [ 0x228, ['_WNDMSG']],
}],
# From Win7 x64
'_HANDLEENTRY' : [ 0x18, {
'phead' : [ 0x0, ['pointer64', ['_HEAD']]],
'pOwner' : [ 0x8, ['pointer64', ['void']]],
'bType': [ 0x10, ['Enumeration', dict(target = 'unsigned char', choices = consts.HANDLE_TYPE_ENUM)]],
'bFlags' : [ 0x11, ['unsigned char']],
'wUniq' : [ 0x12, ['unsigned short']],
}],
# From Win7 x64
'_HEAD' : [ 0x10, {
'h' : [ 0x0, ['pointer64', ['void']]],
'cLockObj' : [ 0x8, ['unsigned long']],
}],
'tagSERVERINFO' : [ None, {
'cHandleEntries' : [ 8, ['unsigned long']],
'cbHandleTable' : [ 0x330, ['unsigned long']], # HMInitHandleTable
}],
'tagPROCESSINFO' : [ None, {
'Process' : [ 0x0, ['pointer', ['_EPROCESS']]],
}],
# From Win7 x64
'tagHOOK' : [ 0x60, {
'head' : [ 0x0, ['_THRDESKHEAD']],
'phkNext' : [ 0x28, ['pointer64', ['tagHOOK']]],
'iHook' : [ 0x30, ['long']],
'offPfn' : [ 0x38, ['unsigned long long']],
'flags': [ 0x40, ['Flags', {'bitmap': consts.HOOK_FLAGS}]],
'ihmod' : [ 0x44, ['long']],
'ptiHooked' : [ 0x48, ['pointer64', ['tagTHREADINFO']]],
'rpdesk' : [ 0x50, ['pointer64', ['tagDESKTOP']]],
'nTimeout' : [ 0x58, ['BitField', dict(start_bit = 0, end_bit = 7, native_type = 'unsigned long')]],
'fLastHookHung' : [ 0x58, ['BitField', dict(start_bit = 7, end_bit = 8, native_type = 'long')]],
}],
})
| gpl-2.0 | 4,731,525,751,529,558,000 | 1,327,924,631,484,909,800 | 46.334302 | 130 | 0.467359 | false |
strahlc/exaile | xlgui/main.py | 1 | 43837 | # Copyright (C) 2008-2010 Adam Olsen
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# The developers of the Exaile media player hereby grant permission
# for non-GPL compatible GStreamer and Exaile plugins to be used and
# distributed together with GStreamer and Exaile. This permission is
# above and beyond the permissions granted by the GPL license by which
# Exaile is covered. If you modify this code, you may extend this
# exception to your version of the code, but you are not obligated to
# do so. If you do not wish to do so, delete this exception statement
# from your version.
import datetime
import logging
import os
import re
import threading
import cairo
from gi.repository import Gdk
from gi.repository import GLib
from gi.repository import GObject
from gi.repository import Gtk
from gi.repository import Pango
from xl.nls import gettext as _
from xl import (
common,
covers,
event,
formatter,
player,
playlist,
providers,
settings,
trax,
xdg
)
from xlgui.accelerators import AcceleratorManager
from xlgui.playlist_container import PlaylistContainer
from xlgui.widgets import (
dialogs,
info,
menu,
playback
)
from xlgui.widgets.playlist import (
PlaylistPage,
PlaylistView
)
from xlgui import (
guiutil,
tray,
menu as mainmenu
)
logger = logging.getLogger(__name__)
# Length of playback step when user presses seek key (sec)
SEEK_STEP_DEFAULT = 10
# Length of volume steps when user presses up/down key
VOLUME_STEP_DEFAULT = 0.1
class MainWindow(GObject.GObject):
"""
Main Exaile Window
"""
__gproperties__ = {
'is-fullscreen': (bool, 'Fullscreen',
'Whether the window is fullscreen.',
False, # Default
GObject.PARAM_READWRITE),
}
__gsignals__ = {'main-visible-toggle': (GObject.SignalFlags.RUN_LAST, bool, ())}
_mainwindow = None
def __init__(self, controller, builder, collection):
"""
Initializes the main window
@param controller: the main gui controller
"""
GObject.GObject.__init__(self)
self.controller = controller
self.collection = collection
self.playlist_manager = controller.exaile.playlists
self.current_page = -1
self._fullscreen = False
self.resuming = False
self.window_state = 0
self.minimized = False
self.builder = builder
self.window = self.builder.get_object('ExaileWindow')
self.window.set_title('Exaile')
self.title_formatter = formatter.TrackFormatter(settings.get_option(
'gui/main_window_title_format', _('$title (by $artist)') +
' - Exaile'))
self.accelgroup = Gtk.AccelGroup()
self.window.add_accel_group(self.accelgroup)
self.accel_manager = AcceleratorManager('mainwindow-accelerators', self.accelgroup)
self.menubar = self.builder.get_object("mainmenu")
fileitem = self.builder.get_object("file_menu_item")
filemenu = menu.ProviderMenu('menubar-file-menu', self)
fileitem.set_submenu(filemenu)
edititem = self.builder.get_object("edit_menu_item")
editmenu = menu.ProviderMenu('menubar-edit-menu', self)
edititem.set_submenu(editmenu)
viewitem = self.builder.get_object("view_menu_item")
viewmenu = menu.ProviderMenu('menubar-view-menu', self)
viewitem.set_submenu(viewmenu)
toolsitem = self.builder.get_object("tools_menu_item")
toolsmenu = menu.ProviderMenu('menubar-tools-menu', self)
toolsitem.set_submenu(toolsmenu)
helpitem = self.builder.get_object("help_menu_item")
helpmenu = menu.ProviderMenu('menubar-help-menu', self)
helpitem.set_submenu(helpmenu)
self._setup_widgets()
self._setup_position()
self._setup_hotkeys()
logger.info("Connecting main window events...")
self._connect_events()
MainWindow._mainwindow = self
mainmenu._create_menus()
def _setup_hotkeys(self):
"""
Sets up accelerators that haven't been set up in UI designer
"""
hotkeys = (
('<Control>S', lambda *e: self.on_save_playlist()),
('<Shift><Control>S', lambda *e: self.on_save_playlist_as()),
('<Control>F', lambda *e: self.on_panel_filter_focus()),
('<Control>G', lambda *e: self.on_search_playlist_focus()), # FIXME
('<Control><Alt>l', lambda *e: player.QUEUE.clear()), # FIXME
('<Control>P', self._on_playpause_button),
('<Control>Right', lambda *e: self._on_seek_key(True)),
('<Control>Left', lambda *e: self._on_seek_key(False)),
('<Control>plus', lambda *e: self._on_volume_key(True)),
('<Control>minus', lambda *e: self._on_volume_key(False)),
('<Control>Page_Up', self._on_prev_tab_key),
('<Control>Page_Down', self._on_next_tab_key),
('<Alt>N', self._on_focus_playlist_container),
# These 4 are subject to change.. probably should do this
# via a different mechanism too...
('<Alt>I', lambda *e: self.controller.focus_panel('files')),
#('<Alt>C', lambda *e: self.controller.focus_panel('collection')),
('<Alt>R', lambda *e: self.controller.focus_panel('radio')),
('<Alt>L', lambda *e: self.controller.focus_panel('playlists')),
('<Alt>1', lambda *e: self._on_focus_playlist_tab(0)),
('<Alt>2', lambda *e: self._on_focus_playlist_tab(1)),
('<Alt>3', lambda *e: self._on_focus_playlist_tab(2)),
('<Alt>4', lambda *e: self._on_focus_playlist_tab(3)),
('<Alt>5', lambda *e: self._on_focus_playlist_tab(4)),
('<Alt>6', lambda *e: self._on_focus_playlist_tab(5)),
('<Alt>7', lambda *e: self._on_focus_playlist_tab(6)),
('<Alt>8', lambda *e: self._on_focus_playlist_tab(7)),
('<Alt>9', lambda *e: self._on_focus_playlist_tab(8)),
('<Alt>0', lambda *e: self._on_focus_playlist_tab(9)),
)
self.accel_group = Gtk.AccelGroup()
for key, function in hotkeys:
key, mod = Gtk.accelerator_parse(key)
self.accel_group.connect(key, mod, Gtk.AccelFlags.VISIBLE,
function)
self.window.add_accel_group(self.accel_group)
def _setup_widgets(self):
"""
Sets up the various widgets
"""
# TODO: Maybe make this stackable
self.message = dialogs.MessageBar(
parent=self.builder.get_object('player_box'),
buttons=Gtk.ButtonsType.CLOSE
)
self.message.connect('response', self.on_messagebar_response)
self.info_area = MainWindowTrackInfoPane(player.PLAYER)
self.info_area.set_auto_update(True)
self.info_area.set_padding(3, 3, 3, 3)
self.info_area.hide()
self.info_area.set_no_show_all(True)
guiutil.gtk_widget_replace(self.builder.get_object('info_area'), self.info_area)
self.volume_control = playback.VolumeControl(player.PLAYER)
self.info_area.get_action_area().pack_end(self.volume_control, False, False, 0)
self.alpha_style = None
if settings.get_option('gui/use_alpha', False):
screen = self.window.get_screen()
visual = screen.get_rgba_visual()
self.window.set_visual(visual)
self.window.connect('screen-changed', self.on_screen_changed)
self.alpha_style = Gtk.CssProvider.new()
self.window.get_style_context().add_provider(self.alpha_style,
Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)
self._update_alpha()
playlist_area = self.builder.get_object('playlist_area')
self.playlist_container = PlaylistContainer('saved_tabs', player.PLAYER)
for notebook in self.playlist_container.notebooks:
notebook.connect_after('switch-page', self.on_playlist_container_switch_page)
page = notebook.get_current_tab()
if page is not None:
selection = page.view.get_selection()
selection.connect('changed', self.on_playlist_view_selection_changed)
playlist_area.pack_start(self.playlist_container, True, True, 3)
self.splitter = self.builder.get_object('splitter')
# In most (all?) RTL locales, the playback controls should still be LTR.
# Just in case that's not always the case, we provide a hidden option to
# force RTL layout instead. This can be removed once we're more certain
# that the default behavior (always LTR) is correct.
controls_direction = Gtk.TextDirection.RTL \
if settings.get_option('gui/rtl_playback_controls') \
else Gtk.TextDirection.LTR
self.play_image = Gtk.Image.new_from_icon_name('media-playback-start',
Gtk.IconSize.SMALL_TOOLBAR)
self.play_image.set_direction(controls_direction)
self.pause_image = Gtk.Image.new_from_icon_name('media-playback-pause',
Gtk.IconSize.SMALL_TOOLBAR)
self.pause_image.set_direction(controls_direction)
play_toolbar = self.builder.get_object('play_toolbar')
play_toolbar.set_direction(controls_direction)
for button in ('playpause', 'next', 'prev', 'stop'):
widget = self.builder.get_object('%s_button' % button)
setattr(self, '%s_button' % button, widget)
widget.get_child().set_direction(controls_direction)
self.progress_bar = playback.SeekProgressBar(player.PLAYER)
self.progress_bar.get_child().set_direction(controls_direction)
# Don't expand vertically; looks awful on Adwaita.
self.progress_bar.set_valign(Gtk.Align.CENTER)
guiutil.gtk_widget_replace(
self.builder.get_object('playback_progressbar_dummy'),
self.progress_bar
)
self.stop_button.toggle_spat = False
self.stop_button.add_events(Gdk.EventMask.POINTER_MOTION_MASK)
self.stop_button.connect('motion-notify-event',
self.on_stop_button_motion_notify_event)
self.stop_button.connect('leave-notify-event',
self.on_stop_button_leave_notify_event)
self.stop_button.connect('key-press-event',
self.on_stop_button_key_press_event)
self.stop_button.connect('key-release-event',
self.on_stop_button_key_release_event)
self.stop_button.connect('focus-out-event',
self.on_stop_button_focus_out_event)
self.stop_button.connect('button-press-event',
self.on_stop_button_press_event)
self.stop_button.connect('button-release-event',
self.on_stop_button_release_event)
self.stop_button.drag_dest_set(Gtk.DestDefaults.ALL,
[Gtk.TargetEntry.new("exaile-index-list", Gtk.TargetFlags.SAME_APP, 0)], Gdk.DragAction.COPY)
self.stop_button.connect('drag-motion',
self.on_stop_button_drag_motion)
self.stop_button.connect('drag-leave',
self.on_stop_button_drag_leave)
self.stop_button.connect('drag-data-received',
self.on_stop_button_drag_data_received)
self.statusbar = info.Statusbar(self.builder.get_object('status_bar'))
event.add_ui_callback(self.on_exaile_loaded, 'exaile_loaded')
def _connect_events(self):
"""
Connects the various events to their handlers
"""
self.builder.connect_signals({
'on_configure_event': self.configure_event,
'on_window_state_event': self.window_state_change_event,
'on_delete_event': self.on_delete_event,
'on_playpause_button_clicked': self._on_playpause_button,
'on_next_button_clicked':
lambda *e: player.QUEUE.next(),
'on_prev_button_clicked':
lambda *e: player.QUEUE.prev(),
'on_about_item_activate': self.on_about_item_activate,
# Controller
# 'on_scan_collection_item_activate': self.controller.on_rescan_collection,
# 'on_device_manager_item_activate': lambda *e: self.controller.show_devices(),
# 'on_track_properties_activate':self.controller.on_track_properties,
})
event.add_ui_callback(self.on_playback_resume, 'playback_player_resume',
player.PLAYER)
event.add_ui_callback(self.on_playback_end, 'playback_player_end',
player.PLAYER)
event.add_ui_callback(self.on_playback_end, 'playback_error',
player.PLAYER)
event.add_ui_callback(self.on_playback_start, 'playback_track_start',
player.PLAYER)
event.add_ui_callback(self.on_toggle_pause, 'playback_toggle_pause',
player.PLAYER)
event.add_ui_callback(self.on_track_tags_changed, 'track_tags_changed')
event.add_ui_callback(self.on_buffering, 'playback_buffering',
player.PLAYER)
event.add_ui_callback(self.on_playback_error, 'playback_error',
player.PLAYER)
event.add_ui_callback(self.on_playlist_tracks_added,
'playlist_tracks_added')
event.add_ui_callback(self.on_playlist_tracks_removed,
'playlist_tracks_removed')
# Settings
self._on_option_set('gui_option_set', settings, 'gui/show_info_area')
self._on_option_set('gui_option_set', settings, 'gui/show_info_area_covers')
event.add_ui_callback(self._on_option_set, 'option_set')
def _connect_panel_events(self):
"""
Sets up panel events
"""
# When there's nothing in the notebook, hide it
self.controller.panel_notebook.connect('page-added', self.on_panel_notebook_add_page)
self.controller.panel_notebook.connect('page-removed', self.on_panel_notebook_remove_page)
# panels
panels = self.controller.panel_notebook.panels
for panel_name in ('playlists', 'radio', 'files', 'collection'):
panel = panels[panel_name].panel
sort = False
if panel_name in ('files', 'collection'):
sort = True
panel.connect('append-items', lambda panel, items, force_play, sort=sort:
self.on_append_items(items, force_play, sort=sort))
panel.connect('queue-items', lambda panel, items, sort=sort:
self.on_append_items(items, queue=True, sort=sort))
panel.connect('replace-items', lambda panel, items, sort=sort:
self.on_append_items(items, replace=True, sort=sort))
## Collection Panel
panel = panels['collection'].panel
panel.connect('collection-tree-loaded', self.on_collection_tree_loaded)
## Playlist Panel
panel = panels['playlists'].panel
panel.connect('playlist-selected',
lambda panel, playlist:
self.playlist_container.create_tab_from_playlist(playlist))
## Radio Panel
panel = panels['radio'].panel
panel.connect('playlist-selected',
lambda panel, playlist:
self.playlist_container.create_tab_from_playlist(playlist))
## Files Panel
#panel = panels['files']
def _update_alpha(self):
if self.alpha_style is None:
return
opac = 1.0 - float(settings.get_option('gui/transparency'))
self.alpha_style.load_from_data(
'.background { ' +
('background-color: alpha(@theme_bg_color, %s);' % opac) +
'}'
)
def do_get_property(self, prop):
if prop.name == 'is-fullscreen':
return self._fullscreen
else:
return GObject.GObject.do_get_property(self, prop)
def do_set_property(self, prop, value):
if prop.name == 'is-fullscreen':
if value:
self.window.fullscreen()
else:
self.window.unfullscreen()
else:
GObject.GObject.do_set_property(self, prop, value)
def on_screen_changed(self, widget, event):
"""
Updates the colormap on screen change
"""
screen = widget.get_screen()
visual = screen.get_rgba_visual() or screen.get_rgb_visual()
self.window.set_visual(visual)
def on_messagebar_response(self, widget, response):
"""
Hides the messagebar if requested
"""
if response == Gtk.ResponseType.CLOSE:
widget.hide()
def on_panel_notebook_add_page(self, notebook, page, page_num):
if self.splitter.get_child1() is None:
self.splitter.pack1(self.controller.panel_notebook)
self.controller.panel_notebook.get_parent() \
.child_set_property(self.controller.panel_notebook, 'shrink', False)
def on_panel_notebook_remove_page(self, notebook, page, page_num):
if notebook.get_n_pages() == 0:
self.splitter.remove(self.controller.panel_notebook)
def on_stop_button_motion_notify_event(self, widget, event):
"""
Sets the hover state and shows SPAT icon
"""
widget.__hovered = True
if event.get_state() & Gdk.ModifierType.SHIFT_MASK:
widget.set_image(Gtk.Image.new_from_icon_name(
'process-stop', Gtk.IconSize.BUTTON))
else:
widget.set_image(Gtk.Image.new_from_icon_name(
'media-playback-stop', Gtk.IconSize.BUTTON))
def on_stop_button_leave_notify_event(self, widget, event):
"""
Unsets the hover state and resets the button icon
"""
widget.__hovered = False
if not widget.is_focus() and \
~(event.get_state() & Gdk.ModifierType.SHIFT_MASK):
widget.set_image(Gtk.Image.new_from_icon_name(
'media-playback-stop', Gtk.IconSize.BUTTON))
def on_stop_button_key_press_event(self, widget, event):
"""
Shows SPAT icon on Shift key press
"""
if event.keyval in (Gdk.KEY_Shift_L, Gdk.KEY_Shift_R):
widget.set_image(Gtk.Image.new_from_icon_name(
'process-stop', Gtk.IconSize.BUTTON))
widget.toggle_spat = True
if event.keyval in (Gdk.KEY_space, Gdk.KEY_Return):
if widget.toggle_spat:
self.on_spat_clicked()
else:
player.PLAYER.stop()
def on_stop_button_key_release_event(self, widget, event):
"""
Resets the button icon
"""
if event.keyval in (Gdk.KEY_Shift_L, Gdk.KEY_Shift_R):
widget.set_image(Gtk.Image.new_from_icon_name(
'media-playback-stop', Gtk.IconSize.BUTTON))
widget.toggle_spat = False
def on_stop_button_focus_out_event(self, widget, event):
"""
Resets the button icon unless
the button is still hovered
"""
if not getattr(widget, '__hovered', False):
widget.set_image(Gtk.Image.new_from_icon_name(
'media-playback-stop', Gtk.IconSize.BUTTON))
def on_stop_button_press_event(self, widget, event):
"""
Called when the user clicks on the stop button
"""
if event.button == 1:
if event.get_state() & Gdk.ModifierType.SHIFT_MASK:
self.on_spat_clicked()
elif event.button == 3:
menu = guiutil.Menu()
menu.append(_("Toggle: Stop after Selected Track"),
self.on_spat_clicked,
'process-stop')
menu.popup(None, None, None, None, event.button, event.time)
def on_stop_button_release_event(self, widget, event):
"""
Called when the user releases the mouse from the stop button
"""
rect = widget.get_allocation()
if 0 <= event.x < rect.width and 0 <= event.y < rect.height:
player.PLAYER.stop()
def on_stop_button_drag_motion(self, widget, context, x, y, time):
"""
Indicates possible SPAT during drag motion of tracks
"""
target = widget.drag_dest_find_target(context, widget.drag_dest_get_target_list()).name()
if target == 'exaile-index-list':
widget.set_image(Gtk.Image.new_from_icon_name(
'process-stop', Gtk.IconSize.BUTTON))
def on_stop_button_drag_leave(self, widget, context, time):
"""
Resets the stop button
"""
widget.set_image(Gtk.Image.new_from_icon_name(
'media-playback-stop', Gtk.IconSize.BUTTON))
def on_stop_button_drag_data_received(self, widget, context, x, y, selection, info, time):
"""
Allows for triggering the SPAT feature
by dropping tracks on the stop button
"""
source_widget = Gtk.drag_get_source_widget(context)
if selection.target.name() == 'exaile-index-list' and isinstance(source_widget, PlaylistView):
position = int(selection.data.split(',')[0])
if position == source_widget.playlist.spat_position:
position = -1
source_widget.playlist.spat_position = position
source_widget.queue_draw()
def on_spat_clicked(self, *e):
"""
Called when the user clicks on the SPAT item
"""
trs = self.get_selected_page().view.get_selected_items()
if not trs: return
# TODO: this works, but implement this some other way in the future
if player.QUEUE.current_playlist.spat_position == -1:
player.QUEUE.current_playlist.spat_position = trs[0][0]
else:
player.QUEUE.current_playlist.spat_position = -1
self.get_selected_page().view.queue_draw()
def on_append_items(self, tracks, force_play=False, queue=False, sort=False, replace=False):
"""
Called when a panel (or other component)
has tracks to append and possibly queue
:param tracks: The tracks to append
:param force_play: Force playing the first track if there
is no track currently playing. Otherwise
check a setting to determine whether the
track should be played
:param queue: Additionally queue tracks
:param sort: Sort before adding
:param replace: Clear playlist before adding
"""
if len(tracks) == 0:
return
page = self.get_selected_page()
if sort:
tracks = trax.sort_tracks(
('artist', 'date', 'album', 'discnumber', 'tracknumber'),
tracks)
if replace:
page.playlist.clear()
offset = len(page.playlist)
page.playlist.extend(tracks)
# extending the queue automatically starts playback
if queue:
if player.QUEUE is not page.playlist:
player.QUEUE.extend(tracks)
elif (force_play or settings.get_option( 'playlist/append_menu_starts_playback', False )) and \
not player.PLAYER.current:
page.view.play_track_at(offset, tracks[0])
def on_playback_error(self, type, player, message):
"""
Called when there has been a playback error
"""
self.message.show_error(_('Playback error encountered!'), message)
def on_buffering(self, type, player, percent):
"""
Called when a stream is buffering
"""
percent = min(percent, 100)
self.statusbar.set_status(_("Buffering: %d%%...") % percent, 1)
def on_track_tags_changed(self, type, track, tag):
"""
Called when tags are changed
"""
if track is player.PLAYER.current:
self._update_track_information()
def on_collection_tree_loaded(self, tree):
"""
Updates information on collection tree load
"""
self.statusbar.update_info()
def on_exaile_loaded(self, event_type, exaile, nothing):
"""
Updates information on exaile load
"""
self.statusbar.update_info()
event.remove_callback(self.on_exaile_loaded, 'exaile_loaded')
def on_playlist_tracks_added(self, type, playlist, tracks):
"""
Updates information on track add
"""
self.statusbar.update_info()
def on_playlist_tracks_removed(self, type, playlist, tracks):
"""
Updates information on track removal
"""
self.statusbar.update_info()
def on_toggle_pause(self, type, player, object):
"""
Called when the user clicks the play button after playback has
already begun
"""
if player.is_paused():
image = self.play_image
tooltip = _('Continue Playback')
else:
image = self.pause_image
tooltip = _('Pause Playback')
self.playpause_button.set_image(image)
self.playpause_button.set_tooltip_text(tooltip)
self._update_track_information()
def on_playlist_container_switch_page(self, notebook, page, page_num):
"""
Updates info after notebook page switch
"""
page = notebook.get_nth_page(page_num)
selection = page.view.get_selection()
selection.connect('changed', self.on_playlist_view_selection_changed)
self.statusbar.update_info()
def on_playlist_view_selection_changed(self, selection):
"""
Updates info after playlist page selection change
"""
self.statusbar.update_info()
def on_panel_filter_focus(self, *e):
"""
Gives focus to the filter field of the current panel
"""
try:
self.controller.get_active_panel().filter.grab_focus()
except (AttributeError, KeyError):
pass
def on_search_playlist_focus(self, *e):
"""
Gives focus to the playlist search bar
"""
plpage = get_selected_playlist()
if plpage:
plpage.get_search_entry().grab_focus()
def on_save_playlist(self, *e):
"""
Called when the user presses Ctrl+S
Spawns the save dialog of the currently selected playlist tab if
not custom, saves changes directly if custom
"""
tab = self.get_selected_tab()
if not tab: return
if tab.page.playlist.get_is_custom():
tab.do_save_changes_to_custom()
else:
tab.do_save_custom()
def on_save_playlist_as(self, *e):
"""
Called when the user presses Ctrl+S
Spawns the save as dialog of the current playlist tab
"""
tab = self.get_selected_tab()
if not tab: return
tab.do_save_custom()
def on_clear_playlist(self, *e):
"""
Clears the current playlist tab
"""
page = self.get_selected_page()
if page:
page.playlist.clear()
def on_open_item_activate(self, menuitem):
"""
Shows a dialog to open media
"""
def on_uris_selected(dialog, uris):
uris.reverse()
if len(uris) > 0:
self.controller.open_uri(uris.pop(), play=True)
for uri in uris:
self.controller.open_uri(uri, play=False)
dialog = dialogs.MediaOpenDialog(self.window)
dialog.connect('uris-selected', on_uris_selected)
dialog.show()
def on_open_url_item_activate(self, menuitem):
"""
Shows a dialog to open an URI
"""
def on_uri_selected(dialog, uri):
self.controller.open_uri(uri, play=False)
dialog = dialogs.URIOpenDialog(self.window)
dialog.connect('uri-selected', on_uri_selected)
dialog.show()
def on_open_directories_item_activate(self, menuitem):
"""
Shows a dialog to open directories
"""
def on_uris_selected(dialog, uris):
uris.reverse()
if len(uris) > 0:
self.controller.open_uri(uris.pop(), play=True)
for uri in uris:
self.controller.open_uri(uri, play=False)
dialog = dialogs.DirectoryOpenDialog(self.window)
# Selecting empty folders is useless
dialog.props.create_folders = False
dialog.connect('uris-selected', on_uris_selected)
dialog.show()
def on_export_current_playlist_activate(self, menuitem):
"""
Shows a dialog to export the current playlist
"""
page = self.get_selected_page()
if not page or not isinstance(page, PlaylistPage):
return
def on_message(dialog, message_type, message):
"""
Show messages in the main window message area
"""
if message_type == Gtk.MessageType.INFO:
self.message.show_info(markup=message)
elif message_type == Gtk.MessageType.ERROR:
self.message.show_error(_('Playlist export failed!'), message)
return True
dialog = dialogs.PlaylistExportDialog(page.playlist, self.window)
dialog.connect('message', on_message)
dialog.show()
def on_playlist_utilities_bar_visible_toggled(self, checkmenuitem):
"""
Shows or hides the playlist utilities bar
"""
settings.set_option('gui/playlist_utilities_bar_visible',
checkmenuitem.get_active())
def on_show_playing_track_item_activate(self, menuitem):
"""
Tries to show the currently playing track
"""
self.playlist_container.show_current_track()
def on_about_item_activate(self, menuitem):
"""
Shows the about dialog
"""
dialog = dialogs.AboutDialog(self.window)
dialog.show()
def on_playback_resume(self, type, player, data):
self.resuming = True
def on_playback_start(self, type, player, object):
"""
Called when playback starts
Sets the currently playing track visible in the currently selected
playlist if the user has chosen this setting
"""
if self.resuming:
self.resuming = False
return
self._update_track_information()
self.playpause_button.set_image(self.pause_image)
self.playpause_button.set_tooltip_text(_('Pause Playback'))
def on_playback_end(self, type, player, object):
"""
Called when playback ends
"""
self.window.set_title('Exaile')
self.playpause_button.set_image(self.play_image)
self.playpause_button.set_tooltip_text(_('Start Playback'))
def _on_option_set(self, name, object, option):
"""
Handles changes of settings
"""
if option == 'gui/main_window_title_format':
self.title_formatter.props.format = settings.get_option(
option, self.title_formatter.props.format)
elif option == 'gui/use_tray':
usetray = settings.get_option(option, False)
if self.controller.tray_icon and not usetray:
self.controller.tray_icon.destroy()
self.controller.tray_icon = None
elif not self.controller.tray_icon and usetray:
self.controller.tray_icon = tray.TrayIcon(self)
elif option == 'gui/show_info_area':
self.info_area.set_no_show_all(False)
if settings.get_option(option, True):
self.info_area.show_all()
else:
self.info_area.hide()
self.info_area.set_no_show_all(True)
elif option == 'gui/show_info_area_covers':
cover = self.info_area.cover
cover.set_no_show_all(False)
if settings.get_option(option, True):
cover.show_all()
else:
cover.hide()
cover.set_no_show_all(True)
elif option == 'gui/transparency':
self._update_alpha()
def _on_volume_key(self, is_up):
diff = int(100 * settings.get_option('gui/volue_key_step_size', VOLUME_STEP_DEFAULT))
if not is_up: diff = -diff
player.PLAYER.modify_volume(diff)
return True
def _on_seek_key(self, is_forward):
diff = settings.get_option('gui/seek_key_step_size', SEEK_STEP_DEFAULT)
if not is_forward: diff = -diff
if player.PLAYER.current:
player.PLAYER.modify_time(diff)
self.progress_bar.update_progress()
return True
def _on_prev_tab_key(self, *e):
self.playlist_container.get_current_notebook().select_prev_tab()
return True
def _on_next_tab_key(self, *e):
self.playlist_container.get_current_notebook().select_next_tab()
return True
def _on_playpause_button(self, *e):
self.playpause()
return True
def _on_focus_playlist_tab(self, tab_nr):
self.playlist_container.get_current_notebook().focus_tab(tab_nr)
return True
def _on_focus_playlist_container(self, *_e):
self.playlist_container.focus()
return True
def _update_track_information(self):
"""
Sets track information
"""
track = player.PLAYER.current
if not track:
return
self.window.set_title(self.title_formatter.format(track))
def playpause(self):
"""
Pauses the playlist if it is playing, starts playing if it is
paused. If stopped, try to start playing the next suitable track.
"""
if player.PLAYER.is_paused() or player.PLAYER.is_playing():
player.PLAYER.toggle_pause()
else:
pl = self.get_selected_page()
player.QUEUE.set_current_playlist(pl.playlist)
try:
trackpath = pl.view.get_selected_paths()[0]
pl.playlist.current_position = trackpath[0]
except IndexError:
pass
player.QUEUE.play(track=pl.playlist.current)
def _setup_position(self):
"""
Sets up the position and sized based on the size the window was
when it was last moved or resized
"""
if settings.get_option('gui/mainw_maximized', False):
self.window.maximize()
width = settings.get_option('gui/mainw_width', 500)
height = settings.get_option('gui/mainw_height', 475)
x = settings.get_option('gui/mainw_x', 10)
y = settings.get_option('gui/mainw_y', 10)
self.window.move(x, y)
self.window.resize(width, height)
pos = settings.get_option('gui/mainw_sash_pos', 200)
self.splitter.set_position(pos)
def on_delete_event(self, *e):
"""
Called when the user attempts to close the window
"""
sash_pos = self.splitter.get_position()
if sash_pos > 10:
settings.set_option('gui/mainw_sash_pos', sash_pos)
if settings.get_option('gui/use_tray', False) and \
settings.get_option('gui/close_to_tray', False):
self.window.hide()
else:
self.quit()
return True
def quit(self, *e):
"""
Quits Exaile
"""
self.window.hide()
GLib.idle_add(self.controller.exaile.quit)
return True
def on_restart_item_activate(self, menuitem):
"""
Restarts Exaile
"""
self.window.hide()
GLib.idle_add(self.controller.exaile.quit, True)
def toggle_visible(self, bringtofront=False):
"""
Toggles visibility of the main window
"""
toggle_handled = self.emit('main-visible-toggle')
if not toggle_handled:
if bringtofront and self.window.is_active() or \
not bringtofront and self.window.get_property('visible'):
self.window.hide()
else:
# the ordering for deiconify/show matters -- if this gets
# switched, then the minimization detection breaks
self.window.deiconify()
self.window.show()
def configure_event(self, *e):
"""
Called when the window is resized or moved
"""
# Don't save window size if it is maximized or fullscreen.
if settings.get_option('gui/mainw_maximized', False) or \
self._fullscreen:
return False
(width, height) = self.window.get_size()
if [width, height] != [ settings.get_option("gui/mainw_"+key, -1) for \
key in ["width", "height"] ]:
settings.set_option('gui/mainw_height', height, save=False)
settings.set_option('gui/mainw_width', width, save=False)
(x, y) = self.window.get_position()
if [x, y] != [ settings.get_option("gui/mainw_"+key, -1) for \
key in ["x", "y"] ]:
settings.set_option('gui/mainw_x', x, save=False)
settings.set_option('gui/mainw_y', y, save=False)
return False
def window_state_change_event(self, window, event):
"""
Saves the current maximized and fullscreen
states and minimizes to tray if requested
"""
if event.changed_mask & Gdk.WindowState.MAXIMIZED:
settings.set_option('gui/mainw_maximized',
bool(event.new_window_state & Gdk.WindowState.MAXIMIZED))
if event.changed_mask & Gdk.WindowState.FULLSCREEN:
self._fullscreen = bool(event.new_window_state & Gdk.WindowState.FULLSCREEN)
self.notify('is-fullscreen')
# detect minimization state changes
prev_minimized = self.minimized
if not self.minimized:
if event.changed_mask & Gdk.WindowState.ICONIFIED and \
not event.changed_mask & Gdk.WindowState.WITHDRAWN and \
event.new_window_state & Gdk.WindowState.ICONIFIED and \
not event.new_window_state & Gdk.WindowState.WITHDRAWN and \
not self.window_state & Gdk.WindowState.ICONIFIED:
self.minimized = True
else:
if event.changed_mask & Gdk.WindowState.WITHDRAWN and \
not event.new_window_state & (Gdk.WindowState.WITHDRAWN): #and \
self.minimized = False
# track this
self.window_state = event.new_window_state
if settings.get_option('gui/minimize_to_tray', False):
# old code to detect minimization
# -> it must have worked at some point, perhaps this is a GTK version
# specific set of behaviors? Current code works now on 2.24.17
#if wm_state is not None:
# if '_NET_WM_STATE_HIDDEN' in wm_state[2]:
# show tray
# window.hide
#else
# destroy tray
if self.minimized != prev_minimized and self.minimized == True:
if not settings.get_option('gui/use_tray', False) and \
self.controller.tray_icon is None:
self.controller.tray_icon = tray.TrayIcon(self)
window.hide()
elif not settings.get_option('gui/use_tray', False) and \
self.controller.tray_icon is not None:
self.controller.tray_icon.destroy()
self.controller.tray_icon = None
return False
def get_selected_page(self):
"""
Returns the currentry displayed playlist notebook page
"""
return self.playlist_container.get_current_tab()
def get_selected_playlist(self):
try:
page = self.get_selected_page()
except AttributeError:
return None
if not isinstance(page, PlaylistPage):
return None
return page
class MainWindowTrackInfoPane(info.TrackInfoPane, providers.ProviderHandler):
"""
Extends the regular track info pane by an area for custom widgets
The mainwindow-info-area-widget provider is used to show widgets
on the right of the info area. They should be small. The registered
provider should provide a method 'create_widget' that takes the info
area instance as a parameter, and that returns a Gtk.Widget to be
inserted into the widget_area of the info area, and an attribute
'name' that will be used when removing the provider.
"""
def __init__(self, player):
info.TrackInfoPane.__init__(self, player)
self.__player = player
self.widget_area = Gtk.Box()
self.get_child().pack_start(self.widget_area, False, False, 0)
self.__widget_area_widgets = {}
# call this last if we're using simple_init=True
providers.ProviderHandler.__init__(self, 'mainwindow-info-area-widget',
target=player, simple_init=True)
def get_player(self):
'''
Retrieves the player object that this info area
is associated with
'''
return self._TrackInfoPane__player
def on_provider_added(self, provider):
name = provider.name
widget = provider.create_widget(self)
old_widget = self.__widget_area_widgets.get(name)
if old_widget is not None:
self.widget_area.remove(old_widget)
old_widget.destroy()
self.__widget_area_widgets[name] = widget
self.widget_area.pack_start(widget, False, False, 0)
widget.show_all()
def on_provider_removed(self, provider):
widget = self.__widget_area_widgets.pop(provider.name, None)
if widget is not None:
self.widget_area.remove(widget)
widget.destroy()
def get_playlist_container():
return MainWindow._mainwindow.playlist_container
def get_playlist_notebook():
'''Retrieves the primary playlist notebook'''
return MainWindow._mainwindow.playlist_container.notebooks[0]
def get_selected_page():
return MainWindow._mainwindow.get_selected_page()
def get_selected_playlist():
return MainWindow._mainwindow.get_selected_playlist()
def mainwindow():
return MainWindow._mainwindow
# vim: et sts=4 sw=4
| gpl-2.0 | 3,044,205,755,523,633,000 | 3,711,436,791,177,421,000 | 35.930918 | 105 | 0.589593 | false |
chvogl/tardis | tardis/io/config_reader.py | 1 | 40145 | # Module to read the rather complex config data
import logging
import os
import pprint
from astropy import constants, units as u
import numpy as np
import pandas as pd
import yaml
import tardis
from tardis.io.model_reader import read_density_file, \
calculate_density_after_time, read_abundances_file
from tardis.io.config_validator import ConfigurationValidator
from tardis import atomic
from tardis.util import species_string_to_tuple, parse_quantity, \
element_symbol2atomic_number
import copy
pp = pprint.PrettyPrinter(indent=4)
logger = logging.getLogger(__name__)
data_dir = os.path.join(tardis.__path__[0], 'data')
default_config_definition_file = os.path.join(data_dir,
'tardis_config_definition.yml')
#File parsers for different file formats:
density_structure_fileparser = {}
inv_ni56_efolding_time = 1 / (8.8 * u.day)
inv_co56_efolding_time = 1 / (113.7 * u.day)
inv_cr48_efolding_time = 1 / (1.29602 * u.day)
inv_v48_efolding_time = 1 / (23.0442 * u.day)
inv_fe52_efolding_time = 1 / (0.497429 * u.day)
inv_mn52_efolding_time = 1 / (0.0211395 * u.day)
class ConfigurationError(ValueError):
pass
def parse_quantity_linspace(quantity_linspace_dictionary, add_one=True):
"""
parse a dictionary of the following kind
{'start': 5000 km/s,
'stop': 10000 km/s,
'num': 1000}
Parameters
----------
quantity_linspace_dictionary: ~dict
add_one: boolean, default: True
Returns
-------
~np.array
"""
start = parse_quantity(quantity_linspace_dictionary['start'])
stop = parse_quantity(quantity_linspace_dictionary['stop'])
try:
stop = stop.to(start.unit)
except u.UnitsError:
raise ConfigurationError('"start" and "stop" keyword must be compatible quantities')
num = quantity_linspace_dictionary['num']
if add_one:
num += 1
return np.linspace(start.value, stop.value, num=num) * start.unit
def parse_spectral_bin(spectral_bin_boundary_1, spectral_bin_boundary_2):
spectral_bin_boundary_1 = parse_quantity(spectral_bin_boundary_1).to('Angstrom', u.spectral())
spectral_bin_boundary_2 = parse_quantity(spectral_bin_boundary_2).to('Angstrom', u.spectral())
spectrum_start_wavelength = min(spectral_bin_boundary_1, spectral_bin_boundary_2)
spectrum_end_wavelength = max(spectral_bin_boundary_1, spectral_bin_boundary_2)
return spectrum_start_wavelength, spectrum_end_wavelength
def calculate_exponential_density(velocities, v_0, rho0):
"""
This function computes the exponential density profile.
:math:`\\rho = \\rho_0 \\times \\exp \\left( -\\frac{v}{v_0} \\right)`
Parameters
----------
velocities : ~astropy.Quantity
Array like velocity profile
velocity_0 : ~astropy.Quantity
reference velocity
rho0 : ~astropy.Quantity
reference density
Returns
-------
densities : ~astropy.Quantity
"""
densities = rho0 * np.exp(-(velocities / v_0))
return densities
def calculate_power_law_density(velocities, velocity_0, rho_0, exponent):
"""
This function computes a descret exponential density profile.
:math:`\\rho = \\rho_0 \\times \\left( \\frac{v}{v_0} \\right)^n`
Parameters
----------
velocities : ~astropy.Quantity
Array like velocity profile
velocity_0 : ~astropy.Quantity
reference velocity
rho0 : ~astropy.Quantity
reference density
exponent : ~float
exponent used in the powerlaw
Returns
-------
densities : ~astropy.Quantity
"""
densities = rho_0 * np.power((velocities / velocity_0), exponent)
return densities
def parse_model_file_section(model_setup_file_dict, time_explosion):
def parse_artis_model_setup_files(model_file_section_dict, time_explosion):
###### Reading the structure part of the ARTIS file pair
structure_fname = model_file_section_dict['structure_fname']
for i, line in enumerate(file(structure_fname)):
if i == 0:
no_of_shells = np.int64(line.strip())
elif i == 1:
time_of_model = u.Quantity(float(line.strip()), 'day').to('s')
elif i == 2:
break
artis_model_columns = ['velocities', 'mean_densities_0', 'ni56_fraction', 'co56_fraction', 'fe52_fraction',
'cr48_fraction']
artis_model = np.recfromtxt(structure_fname, skip_header=2, usecols=(1, 2, 4, 5, 6, 7), unpack=True,
dtype=[(item, np.float64) for item in artis_model_columns])
#converting densities from log(g/cm^3) to g/cm^3 and stretching it to the current ti
velocities = u.Quantity(np.append([0], artis_model['velocities']), 'km/s').to('cm/s')
mean_densities_0 = u.Quantity(10 ** artis_model['mean_densities_0'], 'g/cm^3')
mean_densities = calculate_density_after_time(mean_densities_0, time_of_model, time_explosion)
#Verifying information
if len(mean_densities) == no_of_shells:
logger.debug('Verified ARTIS model structure file %s (no_of_shells=length of dataset)', structure_fname)
else:
raise ConfigurationError(
'Error in ARTIS file %s - Number of shells not the same as dataset length' % structure_fname)
v_inner = velocities[:-1]
v_outer = velocities[1:]
volumes = (4 * np.pi / 3) * (time_of_model ** 3) * ( v_outer ** 3 - v_inner ** 3)
masses = (volumes * mean_densities_0 / constants.M_sun).to(1)
logger.info('Read ARTIS configuration file %s - found %d zones with total mass %g Msun', structure_fname,
no_of_shells, sum(masses.value))
if 'v_lowest' in model_file_section_dict:
v_lowest = parse_quantity(model_file_section_dict['v_lowest']).to('cm/s').value
min_shell = v_inner.value.searchsorted(v_lowest)
else:
min_shell = 1
if 'v_highest' in model_file_section_dict:
v_highest = parse_quantity(model_file_section_dict['v_highest']).to('cm/s').value
max_shell = v_outer.value.searchsorted(v_highest)
else:
max_shell = no_of_shells
artis_model = artis_model[min_shell:max_shell]
v_inner = v_inner[min_shell:max_shell]
v_outer = v_outer[min_shell:max_shell]
mean_densities = mean_densities[min_shell:max_shell]
###### Reading the abundance part of the ARTIS file pair
abundances_fname = model_file_section_dict['abundances_fname']
abundances = pd.DataFrame(np.loadtxt(abundances_fname)[min_shell:max_shell, 1:].transpose(),
index=np.arange(1, 31))
ni_stable = abundances.ix[28] - artis_model['ni56_fraction']
co_stable = abundances.ix[27] - artis_model['co56_fraction']
fe_stable = abundances.ix[26] - artis_model['fe52_fraction']
mn_stable = abundances.ix[25] - 0.0
cr_stable = abundances.ix[24] - artis_model['cr48_fraction']
v_stable = abundances.ix[23] - 0.0
ti_stable = abundances.ix[22] - 0.0
abundances.ix[28] = ni_stable
abundances.ix[28] += artis_model['ni56_fraction'] * np.exp(
-(time_explosion * inv_ni56_efolding_time).to(1).value)
abundances.ix[27] = co_stable
abundances.ix[27] += artis_model['co56_fraction'] * np.exp(
-(time_explosion * inv_co56_efolding_time).to(1).value)
abundances.ix[27] += (inv_ni56_efolding_time * artis_model['ni56_fraction'] /
(inv_ni56_efolding_time - inv_co56_efolding_time)) * \
(np.exp(-(inv_co56_efolding_time * time_explosion).to(1).value) - np.exp(
-(inv_ni56_efolding_time * time_explosion).to(1).value))
abundances.ix[26] = fe_stable
abundances.ix[26] += artis_model['fe52_fraction'] * np.exp(
-(time_explosion * inv_fe52_efolding_time).to(1).value)
abundances.ix[26] += ((artis_model['co56_fraction'] * inv_ni56_efolding_time
- artis_model['co56_fraction'] * inv_co56_efolding_time
+ artis_model['ni56_fraction'] * inv_ni56_efolding_time
- artis_model['ni56_fraction'] * inv_co56_efolding_time
- artis_model['co56_fraction'] * inv_ni56_efolding_time * np.exp(
-(inv_co56_efolding_time * time_explosion).to(1).value)
+ artis_model['co56_fraction'] * inv_co56_efolding_time * np.exp(
-(inv_co56_efolding_time * time_explosion).to(1).value)
- artis_model['ni56_fraction'] * inv_ni56_efolding_time * np.exp(
-(inv_co56_efolding_time * time_explosion).to(1).value)
+ artis_model['ni56_fraction'] * inv_co56_efolding_time * np.exp(
-(inv_ni56_efolding_time * time_explosion).to(1).value))
/ (inv_ni56_efolding_time - inv_co56_efolding_time))
abundances.ix[25] = mn_stable
abundances.ix[25] += (inv_fe52_efolding_time * artis_model['fe52_fraction'] /
(inv_fe52_efolding_time - inv_mn52_efolding_time)) * \
(np.exp(-(inv_mn52_efolding_time * time_explosion).to(1).value) - np.exp(
-(inv_fe52_efolding_time * time_explosion).to(1).value))
abundances.ix[24] = cr_stable
abundances.ix[24] += artis_model['cr48_fraction'] * np.exp(
-(time_explosion * inv_cr48_efolding_time).to(1).value)
abundances.ix[24] += ((artis_model['fe52_fraction'] * inv_fe52_efolding_time
- artis_model['fe52_fraction'] * inv_mn52_efolding_time
- artis_model['fe52_fraction'] * inv_fe52_efolding_time * np.exp(
-(inv_mn52_efolding_time * time_explosion).to(1).value)
+ artis_model['fe52_fraction'] * inv_mn52_efolding_time * np.exp(
-(inv_fe52_efolding_time * time_explosion).to(1).value))
/ (inv_fe52_efolding_time - inv_mn52_efolding_time))
abundances.ix[23] = v_stable
abundances.ix[23] += (inv_cr48_efolding_time * artis_model['cr48_fraction'] /
(inv_cr48_efolding_time - inv_v48_efolding_time)) * \
(np.exp(-(inv_v48_efolding_time * time_explosion).to(1).value) - np.exp(
-(inv_cr48_efolding_time * time_explosion).to(1).value))
abundances.ix[22] = ti_stable
abundances.ix[22] += ((artis_model['cr48_fraction'] * inv_cr48_efolding_time
- artis_model['cr48_fraction'] * inv_v48_efolding_time
- artis_model['cr48_fraction'] * inv_cr48_efolding_time * np.exp(
-(inv_v48_efolding_time * time_explosion).to(1).value)
+ artis_model['cr48_fraction'] * inv_v48_efolding_time * np.exp(
-(inv_cr48_efolding_time * time_explosion).to(1).value))
/ (inv_cr48_efolding_time - inv_v48_efolding_time))
if 'split_shells' in model_file_section_dict:
split_shells = int(model_file_section_dict['split_shells'])
else:
split_shells = 1
if split_shells > 1:
logger.info('Increasing the number of shells by a factor of %s' % split_shells)
no_of_shells = len(v_inner)
velocities = np.linspace(v_inner[0], v_outer[-1], no_of_shells * split_shells + 1)
v_inner = velocities[:-1]
v_outer = velocities[1:]
old_mean_densities = mean_densities
mean_densities = np.empty(no_of_shells * split_shells) * old_mean_densities.unit
new_abundance_data = np.empty((abundances.values.shape[0], no_of_shells * split_shells))
for i in xrange(split_shells):
mean_densities[i::split_shells] = old_mean_densities
new_abundance_data[:, i::split_shells] = abundances.values
abundances = pd.DataFrame(new_abundance_data, index=abundances.index)
#def parser_simple_ascii_model
return v_inner, v_outer, mean_densities, abundances
model_file_section_parser = {}
model_file_section_parser['artis'] = parse_artis_model_setup_files
try:
parser = model_file_section_parser[model_setup_file_dict['type']]
except KeyError:
raise ConfigurationError('In abundance file section only types %s are allowed (supplied %s) ' %
(model_file_section_parser.keys(), model_file_section_parser['type']))
return parser(model_setup_file_dict, time_explosion)
def parse_density_file_section(density_file_dict, time_explosion):
density_file_parser = {}
def parse_artis_density(density_file_dict, time_explosion):
density_file = density_file_dict['name']
for i, line in enumerate(file(density_file)):
if i == 0:
no_of_shells = np.int64(line.strip())
elif i == 1:
time_of_model = u.Quantity(float(line.strip()), 'day').to('s')
elif i == 2:
break
velocities, mean_densities_0 = np.recfromtxt(density_file, skip_header=2, usecols=(1, 2), unpack=True)
#converting densities from log(g/cm^3) to g/cm^3 and stretching it to the current ti
velocities = u.Quantity(np.append([0], velocities), 'km/s').to('cm/s')
mean_densities_0 = u.Quantity(10 ** mean_densities_0, 'g/cm^3')
mean_densities = calculate_density_after_time(mean_densities_0, time_of_model, time_explosion)
#Verifying information
if len(mean_densities) == no_of_shells:
logger.debug('Verified ARTIS file %s (no_of_shells=length of dataset)', density_file)
else:
raise ConfigurationError(
'Error in ARTIS file %s - Number of shells not the same as dataset length' % density_file)
min_shell = 1
max_shell = no_of_shells
v_inner = velocities[:-1]
v_outer = velocities[1:]
volumes = (4 * np.pi / 3) * (time_of_model ** 3) * ( v_outer ** 3 - v_inner ** 3)
masses = (volumes * mean_densities_0 / constants.M_sun).to(1)
logger.info('Read ARTIS configuration file %s - found %d zones with total mass %g Msun', density_file,
no_of_shells, sum(masses.value))
if 'v_lowest' in density_file_dict:
v_lowest = parse_quantity(density_file_dict['v_lowest']).to('cm/s').value
min_shell = v_inner.value.searchsorted(v_lowest)
else:
min_shell = 1
if 'v_highest' in density_file_dict:
v_highest = parse_quantity(density_file_dict['v_highest']).to('cm/s').value
max_shell = v_outer.value.searchsorted(v_highest)
else:
max_shell = no_of_shells
v_inner = v_inner[min_shell:max_shell]
v_outer = v_outer[min_shell:max_shell]
mean_densities = mean_densities[min_shell:max_shell]
return v_inner, v_outer, mean_densities, min_shell, max_shell
density_file_parser['artis'] = parse_artis_density
try:
parser = density_file_parser[density_file_dict['type']]
except KeyError:
raise ConfigurationError('In abundance file section only types %s are allowed (supplied %s) ' %
(density_file_parser.keys(), density_file_dict['type']))
return parser(density_file_dict, time_explosion)
def parse_density_section(density_dict, v_inner, v_outer, time_explosion):
density_parser = {}
#Parse density uniform
def parse_uniform(density_dict, v_inner, v_outer, time_explosion):
no_of_shells = len(v_inner)
return density_dict['value'].to('g cm^-3') * np.ones(no_of_shells)
density_parser['uniform'] = parse_uniform
#Parse density branch85 w7
def parse_branch85(density_dict, v_inner, v_outer, time_explosion):
velocities = 0.5 * (v_inner + v_outer)
densities = calculate_power_law_density(velocities,
density_dict['w7_v_0'],
density_dict['w7_rho_0'], -7)
densities = calculate_density_after_time(densities,
density_dict['w7_time_0'],
time_explosion)
return densities
density_parser['branch85_w7'] = parse_branch85
def parse_power_law(density_dict, v_inner, v_outer, time_explosion):
time_0 = density_dict.pop('time_0')
rho_0 = density_dict.pop('rho_0')
v_0 = density_dict.pop('v_0')
exponent = density_dict.pop('exponent')
velocities = 0.5 * (v_inner + v_outer)
densities = calculate_power_law_density(velocities, v_0, rho_0, exponent)
densities = calculate_density_after_time(densities, time_0, time_explosion)
return densities
density_parser['power_law'] = parse_power_law
def parse_exponential(density_dict, v_inner, v_outer, time_explosion):
time_0 = density_dict.pop('time_0')
rho_0 = density_dict.pop('rho_0')
v_0 = density_dict.pop('v_0')
velocities = 0.5 * (v_inner + v_outer)
densities = calculate_exponential_density(velocities, v_0, rho_0)
densities = calculate_density_after_time(densities, time_0, time_explosion)
return densities
density_parser['exponential'] = parse_exponential
try:
parser = density_parser[density_dict['type']]
except KeyError:
raise ConfigurationError('In density section only types %s are allowed (supplied %s) ' %
(density_parser.keys(), density_dict['type']))
return parser(density_dict, v_inner, v_outer, time_explosion)
def parse_abundance_file_section(abundance_file_dict, abundances, min_shell, max_shell):
abundance_file_parser = {}
def parse_artis(abundance_file_dict, abundances, min_shell, max_shell):
#### ---- debug ----
time_of_model = 0.0
####
fname = abundance_file_dict['name']
max_atom = 30
logger.info("Parsing ARTIS Abundance section from shell %d to %d", min_shell, max_shell)
abundances.values[:max_atom, :] = np.loadtxt(fname)[min_shell:max_shell, 1:].transpose()
return abundances
abundance_file_parser['artis'] = parse_artis
try:
parser = abundance_file_parser[abundance_file_dict['type']]
except KeyError:
raise ConfigurationError('In abundance file section only types %s are allowed (supplied %s) ' %
(abundance_file_parser.keys(), abundance_file_dict['type']))
return parser(abundance_file_dict, abundances, min_shell, max_shell)
def parse_supernova_section(supernova_dict):
"""
Parse the supernova section
Parameters
----------
supernova_dict: dict
YAML parsed supernova dict
Returns
-------
config_dict: dict
"""
config_dict = {}
#parse luminosity
luminosity_value, luminosity_unit = supernova_dict['luminosity_requested'].strip().split()
if luminosity_unit == 'log_lsun':
config_dict['luminosity_requested'] = 10 ** (
float(luminosity_value) + np.log10(constants.L_sun.cgs.value)) * u.erg / u.s
else:
config_dict['luminosity_requested'] = (float(luminosity_value) * u.Unit(luminosity_unit)).to('erg/s')
config_dict['time_explosion'] = parse_quantity(supernova_dict['time_explosion']).to('s')
if 'distance' in supernova_dict:
config_dict['distance'] = parse_quantity(supernova_dict['distance'])
else:
config_dict['distance'] = None
if 'luminosity_wavelength_start' in supernova_dict:
config_dict['luminosity_nu_end'] = parse_quantity(supernova_dict['luminosity_wavelength_start']). \
to('Hz', u.spectral())
else:
config_dict['luminosity_nu_end'] = np.inf * u.Hz
if 'luminosity_wavelength_end' in supernova_dict:
config_dict['luminosity_nu_start'] = parse_quantity(supernova_dict['luminosity_wavelength_end']). \
to('Hz', u.spectral())
else:
config_dict['luminosity_nu_start'] = 0.0 * u.Hz
return config_dict
def parse_spectrum_list2dict(spectrum_list):
"""
Parse the spectrum list [start, stop, num] to a list
"""
if spectrum_list[0].unit.physical_type != 'length' and \
spectrum_list[1].unit.physical_type != 'length':
raise ValueError('start and end of spectrum need to be a length')
spectrum_config_dict = {}
spectrum_config_dict['start'] = spectrum_list[0]
spectrum_config_dict['end'] = spectrum_list[1]
spectrum_config_dict['bins'] = spectrum_list[2]
spectrum_frequency = np.linspace(
spectrum_config_dict['end'].to('Hz', u.spectral()),
spectrum_config_dict['start'].to('Hz', u.spectral()),
num=spectrum_config_dict['bins'] + 1)
spectrum_config_dict['frequency'] = spectrum_frequency
return spectrum_config_dict
def parse_convergence_section(convergence_section_dict):
"""
Parse the convergence section dictionary
Parameters
----------
convergence_section_dict: ~dict
dictionary
"""
convergence_parameters = ['damping_constant', 'threshold', 'fraction',
'hold_iterations']
for convergence_variable in ['t_inner', 't_rad', 'w']:
if convergence_variable not in convergence_section_dict:
convergence_section_dict[convergence_variable] = {}
convergence_variable_section = convergence_section_dict[convergence_variable]
for param in convergence_parameters:
if convergence_variable_section.get(param, None) is None:
if param in convergence_section_dict:
convergence_section_dict[convergence_variable][param] = (
convergence_section_dict[param])
return convergence_section_dict
def calculate_w7_branch85_densities(velocities, time_explosion, time_0=19.9999584, density_coefficient=3e29):
"""
Generated densities from the fit to W7 in Branch 85 page 620 (citation missing)
Parameters
----------
velocities : `~numpy.ndarray`
velocities in cm/s
time_explosion : `float`
time since explosion needed to descale density with expansion
time_0 : `float`
time in seconds of the w7 model - default 19.999, no reason to change
density_coefficient : `float`
coefficient for the polynomial - obtained by fitting to W7, no reason to change
"""
densities = density_coefficient * (velocities * 1e-5) ** -7
densities = calculate_density_after_time(densities, time_0, time_explosion)
return densities[1:]
class ConfigurationNameSpace(dict):
"""
The configuration name space class allows to wrap a dictionary and adds
utility functions for easy access. Accesses like a.b.c are then possible
Code from http://goo.gl/KIaq8I
Parameters
----------
config_dict: ~dict
configuration dictionary
Returns
-------
config_ns: ConfigurationNameSpace
"""
@classmethod
def from_yaml(cls, fname):
"""
Read a configuration from a YAML file
Parameters
----------
fname: str
filename or path
"""
try:
yaml_dict = yaml.load(file(fname))
except IOError as e:
logger.critical('No config file named: %s', fname)
raise e
return cls.from_config_dict(yaml_dict)
@classmethod
def from_config_dict(cls, config_dict, config_definition_file=None):
"""
Validating a config file.
Parameters
----------
config_dict : ~dict
dictionary of a raw unvalidated config file
Returns
-------
`tardis.config_reader.Configuration`
"""
if config_definition_file is None:
config_definition_file = default_config_definition_file
config_definition = yaml.load(open(config_definition_file))
return cls(ConfigurationValidator(config_definition,
config_dict).get_config())
marker = object()
def __init__(self, value=None):
if value is None:
pass
elif isinstance(value, dict):
for key in value:
self.__setitem__(key, value[key])
else:
raise TypeError, 'expected dict'
def __setitem__(self, key, value):
if isinstance(value, dict) and not isinstance(value,
ConfigurationNameSpace):
value = ConfigurationNameSpace(value)
if key in self and hasattr(self[key], 'unit'):
value = u.Quantity(value, self[key].unit)
dict.__setitem__(self, key, value)
def __getitem__(self, key):
return super(ConfigurationNameSpace, self).__getitem__(key)
def __getattr__(self, item):
if item in self:
return self[item]
else:
super(ConfigurationNameSpace, self).__getattribute__(item)
__setattr__ = __setitem__
def __dir__(self):
return self.keys()
def get_config_item(self, config_item_string):
"""
Get configuration items using a string of type 'a.b.param'
Parameters
----------
config_item_string: ~str
string of shape 'section1.sectionb.param1'
"""
config_item_path = config_item_string.split('.')
if len(config_item_path) == 1:
config_item = config_item_path[0]
if config_item.startswith('item'):
return self[config_item_path[0]]
else:
return self[config_item]
elif len(config_item_path) == 2 and\
config_item_path[1].startswith('item'):
return self[config_item_path[0]][
int(config_item_path[1].replace('item', ''))]
else:
return self[config_item_path[0]].get_config_item(
'.'.join(config_item_path[1:]))
def set_config_item(self, config_item_string, value):
"""
set configuration items using a string of type 'a.b.param'
Parameters
----------
config_item_string: ~str
string of shape 'section1.sectionb.param1'
value:
value to set the parameter with it
"""
config_item_path = config_item_string.split('.')
if len(config_item_path) == 1:
self[config_item_path[0]] = value
elif len(config_item_path) == 2 and \
config_item_path[1].startswith('item'):
current_value = self[config_item_path[0]][
int(config_item_path[1].replace('item', ''))]
if hasattr(current_value, 'unit'):
self[config_item_path[0]][
int(config_item_path[1].replace('item', ''))] =\
u.Quantity(value, current_value.unit)
else:
self[config_item_path[0]][
int(config_item_path[1].replace('item', ''))] = value
else:
self[config_item_path[0]].set_config_item(
'.'.join(config_item_path[1:]), value)
def deepcopy(self):
return ConfigurationNameSpace(copy.deepcopy(dict(self)))
class Configuration(ConfigurationNameSpace):
"""
Tardis configuration class
"""
@classmethod
def from_yaml(cls, fname, test_parser=False):
try:
yaml_dict = yaml.load(open(fname))
except IOError as e:
logger.critical('No config file named: %s', fname)
raise e
tardis_config_version = yaml_dict.get('tardis_config_version', None)
if tardis_config_version != 'v1.0':
raise ConfigurationError('Currently only tardis_config_version v1.0 supported')
return cls.from_config_dict(yaml_dict, test_parser=test_parser)
@classmethod
def from_config_dict(cls, config_dict, atom_data=None, test_parser=False,
config_definition_file=None, validate=True):
"""
Validating and subsequently parsing a config file.
Parameters
----------
config_dict : ~dict
dictionary of a raw unvalidated config file
atom_data: ~tardis.atomic.AtomData
atom data object. if `None` will be tried to be read from
atom data file path in the config_dict [default=None]
test_parser: ~bool
switch on to ignore a working atom_data, mainly useful for
testing this reader
config_definition_file: ~str
path to config definition file, if `None` will be set to the default
in the `data` directory that ships with TARDIS
validate: ~bool
Turn validation on or off.
Returns
-------
`tardis.config_reader.Configuration`
"""
if config_definition_file is None:
config_definition_file = default_config_definition_file
config_definition = yaml.load(open(config_definition_file))
if validate:
validated_config_dict = ConfigurationValidator(config_definition,
config_dict).get_config()
else:
validated_config_dict = config_dict
#First let's see if we can find an atom_db anywhere:
if test_parser:
atom_data = None
elif 'atom_data' in validated_config_dict.keys():
atom_data_fname = validated_config_dict['atom_data']
validated_config_dict['atom_data_fname'] = atom_data_fname
else:
raise ConfigurationError('No atom_data key found in config or command line')
if atom_data is None and not test_parser:
logger.info('Reading Atomic Data from %s', atom_data_fname)
atom_data = atomic.AtomData.from_hdf5(atom_data_fname)
else:
atom_data = atom_data
#Parsing supernova dictionary
validated_config_dict['supernova']['luminosity_nu_start'] = \
validated_config_dict['supernova']['luminosity_wavelength_end'].to(
u.Hz, u.spectral())
try:
validated_config_dict['supernova']['luminosity_nu_end'] = \
(validated_config_dict['supernova']
['luminosity_wavelength_start'].to(u.Hz, u.spectral()))
except ZeroDivisionError:
validated_config_dict['supernova']['luminosity_nu_end'] = (
np.inf * u.Hz)
validated_config_dict['supernova']['time_explosion'] = (
validated_config_dict['supernova']['time_explosion'].cgs)
validated_config_dict['supernova']['luminosity_requested'] = (
validated_config_dict['supernova']['luminosity_requested'].cgs)
#Parsing the model section
model_section = validated_config_dict['model']
v_inner = None
v_outer = None
mean_densities = None
abundances = None
structure_section = model_section['structure']
if structure_section['type'] == 'specific':
start, stop, num = model_section['structure']['velocity']
num += 1
velocities = np.linspace(start, stop, num)
v_inner, v_outer = velocities[:-1], velocities[1:]
mean_densities = parse_density_section(
model_section['structure']['density'], v_inner, v_outer,
validated_config_dict['supernova']['time_explosion']).cgs
elif structure_section['type'] == 'file':
v_inner, v_outer, mean_densities, inner_boundary_index, \
outer_boundary_index = read_density_file(
structure_section['filename'], structure_section['filetype'],
validated_config_dict['supernova']['time_explosion'],
structure_section['v_inner_boundary'],
structure_section['v_outer_boundary'])
r_inner = validated_config_dict['supernova']['time_explosion'] * v_inner
r_outer = validated_config_dict['supernova']['time_explosion'] * v_outer
r_middle = 0.5 * (r_inner + r_outer)
structure_validated_config_dict = {}
structure_section['v_inner'] = v_inner.cgs
structure_section['v_outer'] = v_outer.cgs
structure_section['mean_densities'] = mean_densities.cgs
no_of_shells = len(v_inner)
structure_section['no_of_shells'] = no_of_shells
structure_section['r_inner'] = r_inner.cgs
structure_section['r_outer'] = r_outer.cgs
structure_section['r_middle'] = r_middle.cgs
structure_section['volumes'] = ((4. / 3) * np.pi * \
(r_outer ** 3 -
r_inner ** 3)).cgs
#### TODO the following is legacy code and should be removed
validated_config_dict['structure'] = \
validated_config_dict['model']['structure']
# ^^^^^^^^^^^^^^^^
abundances_section = model_section['abundances']
if abundances_section['type'] == 'uniform':
abundances = pd.DataFrame(columns=np.arange(no_of_shells),
index=pd.Index(np.arange(1, 120), name='atomic_number'), dtype=np.float64)
for element_symbol_string in abundances_section:
if element_symbol_string == 'type': continue
z = element_symbol2atomic_number(element_symbol_string)
abundances.ix[z] = float(abundances_section[element_symbol_string])
elif abundances_section['type'] == 'file':
index, abundances = read_abundances_file(abundances_section['filename'], abundances_section['filetype'],
inner_boundary_index, outer_boundary_index)
if len(index) != no_of_shells:
raise ConfigurationError('The abundance file specified has not the same number of cells'
'as the specified density profile')
abundances = abundances.replace(np.nan, 0.0)
abundances = abundances[abundances.sum(axis=1) > 0]
norm_factor = abundances.sum(axis=0)
if np.any(np.abs(norm_factor - 1) > 1e-12):
logger.warning("Abundances have not been normalized to 1. - normalizing")
abundances /= norm_factor
validated_config_dict['abundances'] = abundances
########### DOING PLASMA SECTION ###############
plasma_section = validated_config_dict['plasma']
if plasma_section['initial_t_inner'] < 0.0 * u.K:
luminosity_requested = validated_config_dict['supernova']['luminosity_requested']
plasma_section['t_inner'] = ((luminosity_requested /
(4 * np.pi * r_inner[0] ** 2 *
constants.sigma_sb)) ** .25).to('K')
logger.info('"initial_t_inner" is not specified in the plasma '
'section - initializing to %s with given luminosity',
plasma_section['t_inner'])
else:
plasma_section['t_inner'] = plasma_section['initial_t_inner']
plasma_section['t_rads'] = np.ones(no_of_shells) * \
plasma_section['initial_t_rad']
if plasma_section['disable_electron_scattering'] is False:
logger.debug("Electron scattering switched on")
validated_config_dict['montecarlo']['sigma_thomson'] = 6.652486e-25 / (u.cm ** 2)
else:
logger.warn('Disabling electron scattering - this is not physical')
validated_config_dict['montecarlo']['sigma_thomson'] = 1e-200 / (u.cm ** 2)
##### NLTE subsection of Plasma start
nlte_validated_config_dict = {}
nlte_species = []
nlte_section = plasma_section['nlte']
nlte_species_list = nlte_section.pop('species')
for species_string in nlte_species_list:
nlte_species.append(species_string_to_tuple(species_string))
nlte_validated_config_dict['species'] = nlte_species
nlte_validated_config_dict['species_string'] = nlte_species_list
nlte_validated_config_dict.update(nlte_section)
if 'coronal_approximation' not in nlte_section:
logger.debug('NLTE "coronal_approximation" not specified in NLTE section - defaulting to False')
nlte_validated_config_dict['coronal_approximation'] = False
if 'classical_nebular' not in nlte_section:
logger.debug('NLTE "classical_nebular" not specified in NLTE section - defaulting to False')
nlte_validated_config_dict['classical_nebular'] = False
elif nlte_section: #checks that the dictionary is not empty
logger.warn('No "species" given - ignoring other NLTE options given:\n%s',
pp.pformat(nlte_section))
if not nlte_validated_config_dict:
nlte_validated_config_dict['species'] = []
plasma_section['nlte'] = nlte_validated_config_dict
#^^^^^^^^^^^^^^ End of Plasma Section
##### Monte Carlo Section
montecarlo_section = validated_config_dict['montecarlo']
if montecarlo_section['last_no_of_packets'] < 0:
montecarlo_section['last_no_of_packets'] = \
montecarlo_section['no_of_packets']
default_convergence_section = {'type': 'damped',
'lock_t_inner_cycles': 1,
't_inner_update_exponent': -0.5,
'damping_constant': 0.5}
if montecarlo_section['convergence_strategy'] is None:
logger.warning('No convergence criteria selected - '
'just damping by 0.5 for w, t_rad and t_inner')
montecarlo_section['convergence_strategy'] = (
parse_convergence_section(default_convergence_section))
else:
montecarlo_section['convergence_strategy'] = (
parse_convergence_section(
montecarlo_section['convergence_strategy']))
black_body_section = montecarlo_section['black_body_sampling']
montecarlo_section['black_body_sampling'] = {}
montecarlo_section['black_body_sampling']['start'] = \
black_body_section[0]
montecarlo_section['black_body_sampling']['end'] = \
black_body_section[1]
montecarlo_section['black_body_sampling']['samples'] = \
black_body_section[2]
###### END of convergence section reading
validated_config_dict['spectrum'] = parse_spectrum_list2dict(
validated_config_dict['spectrum'])
return cls(validated_config_dict, atom_data)
def __init__(self, config_dict, atom_data):
super(Configuration, self).__init__(config_dict)
self.atom_data = atom_data
selected_atomic_numbers = self.abundances.index
if atom_data is not None:
self.number_densities = (self.abundances * self.structure.mean_densities.to('g/cm^3').value)
self.number_densities = self.number_densities.div(self.atom_data.atom_data.mass.ix[selected_atomic_numbers],
axis=0)
else:
logger.critical('atom_data is None, only sensible for testing the parser')
| bsd-3-clause | -5,139,257,636,736,233,000 | -148,591,471,643,015,840 | 36.553789 | 120 | 0.587695 | false |
vitor-alves/pixel-canvas-bot | packages/chardet/langhungarianmodel.py | 269 | 12592 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin2_HungarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
46, 71, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253,
253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8,
23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253,
159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,
175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,
191,192,193,194,195,196,197, 75,198,199,200,201,202,203,204,205,
79,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,
221, 51, 81,222, 78,223,224,225,226, 44,227,228,229, 61,230,231,
232,233,234, 58,235, 66, 59,236,237,238, 60, 69, 63,239,240,241,
82, 14, 74,242, 70, 80,243, 72,244, 15, 83, 77, 84, 30, 76, 85,
245,246,247, 25, 73, 42, 24,248,249,250, 31, 56, 29,251,252,253,
)
win1250HungarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
46, 72, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253,
253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8,
23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253,
161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,
177,178,179,180, 78,181, 69,182,183,184,185,186,187,188,189,190,
191,192,193,194,195,196,197, 76,198,199,200,201,202,203,204,205,
81,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,
221, 51, 83,222, 80,223,224,225,226, 44,227,228,229, 61,230,231,
232,233,234, 58,235, 66, 59,236,237,238, 60, 70, 63,239,240,241,
84, 14, 75,242, 71, 82,243, 73,244, 15, 85, 79, 86, 30, 77, 87,
245,246,247, 25, 74, 42, 24,248,249,250, 31, 56, 29,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 94.7368%
# first 1024 sequences:5.2623%
# rest sequences: 0.8894%
# negative sequences: 0.0009%
HungarianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,2,3,3,1,1,2,2,2,2,2,1,2,
3,2,2,3,3,3,3,3,2,3,3,3,3,3,3,1,2,3,3,3,3,2,3,3,1,1,3,3,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,
3,2,1,3,3,3,3,3,2,3,3,3,3,3,1,1,2,3,3,3,3,3,3,3,1,1,3,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,1,1,2,3,3,3,1,3,3,3,3,3,1,3,3,2,2,0,3,2,3,
0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,3,3,2,3,3,2,2,3,2,3,2,0,3,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,3,3,2,3,3,3,1,2,3,2,2,3,1,2,3,3,2,2,0,3,3,3,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,3,2,3,3,3,3,2,3,3,3,3,0,2,3,2,
0,0,0,1,1,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,3,3,2,1,3,2,2,3,2,1,3,2,2,1,0,3,3,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,2,2,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,3,2,2,3,1,1,3,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,1,3,3,3,3,3,2,2,1,3,3,3,0,1,1,2,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,2,0,3,2,3,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,1,3,2,2,2,3,1,1,3,3,1,1,0,3,3,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,2,3,3,3,3,3,1,2,3,2,2,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,2,2,3,1,3,3,2,2,1,3,3,3,1,1,3,1,2,3,2,3,2,2,2,1,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,2,2,3,2,1,0,3,2,0,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,1,0,3,3,3,3,0,2,3,0,0,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,2,2,2,2,3,3,0,1,2,3,2,3,2,2,3,2,1,2,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,3,3,3,3,3,1,2,3,3,3,2,1,2,3,3,2,2,2,3,2,3,3,1,3,3,1,1,0,2,3,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,1,2,2,2,2,3,3,3,1,1,1,3,3,1,1,3,1,1,3,2,1,2,3,1,1,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,1,2,1,1,3,3,1,1,1,1,3,3,1,1,2,2,1,2,1,1,2,2,1,1,0,2,2,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,1,1,2,1,1,3,3,1,0,1,1,3,3,2,0,1,1,2,3,1,0,2,2,1,0,0,1,3,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,2,1,3,3,3,3,3,1,2,3,2,3,3,2,1,1,3,2,3,2,1,2,2,0,1,2,1,0,0,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,2,2,2,3,1,2,2,1,1,3,3,0,3,2,1,2,3,2,1,3,3,1,1,0,2,1,3,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,2,2,3,2,3,3,3,2,1,1,3,3,1,1,1,2,2,3,2,3,2,2,2,1,0,2,2,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,0,3,3,3,3,3,0,0,3,3,2,3,0,0,0,2,3,3,1,0,1,2,0,0,1,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,2,3,3,3,3,3,1,2,3,3,2,2,1,1,0,3,3,2,2,1,2,2,1,0,2,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,2,1,3,1,2,3,3,2,2,1,1,2,2,1,1,1,1,3,2,1,1,1,1,2,1,0,1,2,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
2,3,3,1,1,1,1,1,3,3,3,0,1,1,3,3,1,1,1,1,1,2,2,0,3,1,1,2,0,2,1,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,1,0,1,2,1,2,2,0,1,2,3,1,2,0,0,0,2,1,1,1,1,1,2,0,0,1,1,0,0,0,0,
1,2,1,2,2,2,1,2,1,2,0,2,0,2,2,1,1,2,1,1,2,1,1,1,0,1,0,0,0,1,1,0,
1,1,1,2,3,2,3,3,0,1,2,2,3,1,0,1,0,2,1,2,2,0,1,1,0,0,1,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,3,3,2,2,1,0,0,3,2,3,2,0,0,0,1,1,3,0,0,1,1,0,0,2,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,1,0,1,3,2,3,1,1,1,0,1,1,1,1,1,3,1,0,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,1,2,2,2,1,0,1,2,3,3,2,0,0,0,2,1,1,1,2,1,1,1,0,1,1,1,0,0,0,
1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,2,1,1,1,1,1,1,0,1,1,1,0,0,1,1,
3,2,2,1,0,0,1,1,2,2,0,3,0,1,2,1,1,0,0,1,1,1,0,1,1,1,1,0,2,1,1,1,
2,2,1,1,1,2,1,2,1,1,1,1,1,1,1,2,1,1,1,2,3,1,1,1,1,1,1,1,1,1,0,1,
2,3,3,0,1,0,0,0,3,3,1,0,0,1,2,2,1,0,0,0,0,2,0,0,1,1,1,0,2,1,1,1,
2,1,1,1,1,1,1,2,1,1,0,1,1,0,1,1,1,0,1,2,1,1,0,1,1,1,1,1,1,1,0,1,
2,3,3,0,1,0,0,0,2,2,0,0,0,0,1,2,2,0,0,0,0,1,0,0,1,1,0,0,2,0,1,0,
2,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1,
3,2,2,0,1,0,1,0,2,3,2,0,0,1,2,2,1,0,0,1,1,1,0,0,2,1,0,1,2,2,1,1,
2,1,1,1,1,1,1,2,1,1,1,1,1,1,0,2,1,0,1,1,0,1,1,1,0,1,1,2,1,1,0,1,
2,2,2,0,0,1,0,0,2,2,1,1,0,0,2,1,1,0,0,0,1,2,0,0,2,1,0,0,2,1,1,1,
2,1,1,1,1,2,1,2,1,1,1,2,2,1,1,2,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,
1,2,3,0,0,0,1,0,3,2,1,0,0,1,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,2,1,
1,1,0,0,0,1,0,1,1,1,1,1,2,0,0,1,0,0,0,2,0,0,1,1,1,1,1,1,1,1,0,1,
3,0,0,2,1,2,2,1,0,0,2,1,2,2,0,0,0,2,1,1,1,0,1,1,0,0,1,1,2,0,0,0,
1,2,1,2,2,1,1,2,1,2,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,0,0,1,
1,3,2,0,0,0,1,0,2,2,2,0,0,0,2,2,1,0,0,0,0,3,1,1,1,1,0,0,2,1,1,1,
2,1,0,1,1,1,0,1,1,1,1,1,1,1,0,2,1,0,0,1,0,1,1,0,1,1,1,1,1,1,0,1,
2,3,2,0,0,0,1,0,2,2,0,0,0,0,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,1,0,
2,1,1,1,1,2,1,2,1,2,0,1,1,1,0,2,1,1,1,2,1,1,1,1,0,1,1,1,1,1,0,1,
3,1,1,2,2,2,3,2,1,1,2,2,1,1,0,1,0,2,2,1,1,1,1,1,0,0,1,1,0,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,0,0,0,0,0,2,2,0,0,0,0,2,2,1,0,0,0,1,1,0,0,1,2,0,0,2,1,1,1,
2,2,1,1,1,2,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,1,1,0,1,2,1,1,1,0,1,
1,0,0,1,2,3,2,1,0,0,2,0,1,1,0,0,0,1,1,1,1,0,1,1,0,0,1,0,0,0,0,0,
1,2,1,2,1,2,1,1,1,2,0,2,1,1,1,0,1,2,0,0,1,1,1,0,0,0,0,0,0,0,0,0,
2,3,2,0,0,0,0,0,1,1,2,1,0,0,1,1,1,0,0,0,0,2,0,0,1,1,0,0,2,1,1,1,
2,1,1,1,1,1,1,2,1,0,1,1,1,1,0,2,1,1,1,1,1,1,0,1,0,1,1,1,1,1,0,1,
1,2,2,0,1,1,1,0,2,2,2,0,0,0,3,2,1,0,0,0,1,1,0,0,1,1,0,1,1,1,0,0,
1,1,0,1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,0,0,1,1,1,0,1,0,1,
2,1,0,2,1,1,2,2,1,1,2,1,1,1,0,0,0,1,1,0,1,1,1,1,0,0,1,1,1,0,0,0,
1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,1,0,
1,2,3,0,0,0,1,0,2,2,0,0,0,0,2,2,0,0,0,0,0,1,0,0,1,0,0,0,2,0,1,0,
2,1,1,1,1,1,0,2,0,0,0,1,2,1,1,1,1,0,1,2,0,1,0,1,0,1,1,1,0,1,0,1,
2,2,2,0,0,0,1,0,2,1,2,0,0,0,1,1,2,0,0,0,0,1,0,0,1,1,0,0,2,1,0,1,
2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1,
1,2,2,0,0,0,1,0,2,2,2,0,0,0,1,1,0,0,0,0,0,1,1,0,2,0,0,1,1,1,0,1,
1,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,0,0,1,1,0,1,0,1,1,1,1,1,0,0,0,1,
1,0,0,1,0,1,2,1,0,0,1,1,1,2,0,0,0,1,1,0,1,0,1,1,0,0,1,0,0,0,0,0,
0,2,1,2,1,1,1,1,1,2,0,2,0,1,1,0,1,2,1,0,1,1,1,0,0,0,0,0,0,1,0,0,
2,1,1,0,1,2,0,0,1,1,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,0,0,0,2,1,0,1,
2,2,1,1,1,1,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,0,1,0,1,1,1,1,1,0,1,
1,2,2,0,0,0,0,0,1,1,0,0,0,0,2,1,0,0,0,0,0,2,0,0,2,2,0,0,2,0,0,1,
2,1,1,1,1,1,1,1,0,1,1,0,1,1,0,1,0,0,0,1,1,1,1,0,0,1,1,1,1,0,0,1,
1,1,2,0,0,3,1,0,2,1,1,1,0,0,1,1,1,0,0,0,1,1,0,0,0,1,0,0,1,0,1,0,
1,2,1,0,1,1,1,2,1,1,0,1,1,1,1,1,0,0,0,1,1,1,1,1,0,1,0,0,0,1,0,0,
2,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,2,0,0,0,
2,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,1,0,1,
2,1,1,1,2,1,1,1,0,1,1,2,1,0,0,0,0,1,1,1,1,0,1,0,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,1,1,1,1,0,0,1,1,2,1,0,0,0,1,1,0,0,0,1,1,0,0,1,0,1,0,0,0,
1,2,1,1,1,1,1,1,1,1,0,1,0,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,
2,0,0,0,1,1,1,1,0,0,1,1,0,0,0,0,0,1,1,1,2,0,0,1,0,0,1,0,1,0,0,0,
0,1,1,1,1,1,1,1,1,2,0,1,1,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,0,0,2,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,
0,1,1,1,1,1,1,0,1,1,0,1,0,1,1,0,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,1,0,0,1,1,0,1,0,1,0,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,1,0,0,0,0,0,0,1,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,0,1,0,0,1,1,0,1,0,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
2,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,0,1,0,0,1,0,1,0,1,1,1,0,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,0,0,0,1,1,1,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,1,1,0,1,1,0,1,0,1,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
)
Latin2HungarianModel = {
'char_to_order_map': Latin2_HungarianCharToOrderMap,
'precedence_matrix': HungarianLangModel,
'typical_positive_ratio': 0.947368,
'keep_english_letter': True,
'charset_name': "ISO-8859-2",
'language': 'Hungarian',
}
Win1250HungarianModel = {
'char_to_order_map': win1250HungarianCharToOrderMap,
'precedence_matrix': HungarianLangModel,
'typical_positive_ratio': 0.947368,
'keep_english_letter': True,
'charset_name': "windows-1250",
'language': 'Hungarian',
}
| gpl-3.0 | 4,953,028,019,419,045,000 | -6,900,775,002,566,996,000 | 54.964444 | 70 | 0.554638 | false |
ShownX/incubator-mxnet | example/rcnn/rcnn/io/rpn.py | 34 | 10297 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
RPN:
data =
{'data': [num_images, c, h, w],
'im_info': [num_images, 4] (optional)}
label =
{'gt_boxes': [num_boxes, 5] (optional),
'label': [batch_size, 1] <- [batch_size, num_anchors, feat_height, feat_width],
'bbox_target': [batch_size, num_anchors, feat_height, feat_width],
'bbox_weight': [batch_size, num_anchors, feat_height, feat_width]}
"""
import logging
import numpy as np
import numpy.random as npr
from ..logger import logger
from ..config import config
from .image import get_image, tensor_vstack
from ..processing.generate_anchor import generate_anchors
from ..processing.bbox_transform import bbox_overlaps, bbox_transform
def get_rpn_testbatch(roidb):
"""
return a dict of testbatch
:param roidb: ['image', 'flipped']
:return: data, label, im_info
"""
assert len(roidb) == 1, 'Single batch only'
imgs, roidb = get_image(roidb)
im_array = imgs[0]
im_info = np.array([roidb[0]['im_info']], dtype=np.float32)
data = {'data': im_array,
'im_info': im_info}
label = {}
return data, label, im_info
def get_rpn_batch(roidb):
"""
prototype for rpn batch: data, im_info, gt_boxes
:param roidb: ['image', 'flipped'] + ['gt_boxes', 'boxes', 'gt_classes']
:return: data, label
"""
assert len(roidb) == 1, 'Single batch only'
imgs, roidb = get_image(roidb)
im_array = imgs[0]
im_info = np.array([roidb[0]['im_info']], dtype=np.float32)
# gt boxes: (x1, y1, x2, y2, cls)
if roidb[0]['gt_classes'].size > 0:
gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
gt_boxes = np.empty((roidb[0]['boxes'].shape[0], 5), dtype=np.float32)
gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :]
gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
else:
gt_boxes = np.empty((0, 5), dtype=np.float32)
data = {'data': im_array,
'im_info': im_info}
label = {'gt_boxes': gt_boxes}
return data, label
def assign_anchor(feat_shape, gt_boxes, im_info, feat_stride=16,
scales=(8, 16, 32), ratios=(0.5, 1, 2), allowed_border=0):
"""
assign ground truth boxes to anchor positions
:param feat_shape: infer output shape
:param gt_boxes: assign ground truth
:param im_info: filter out anchors overlapped with edges
:param feat_stride: anchor position step
:param scales: used to generate anchors, affects num_anchors (per location)
:param ratios: aspect ratios of generated anchors
:param allowed_border: filter out anchors with edge overlap > allowed_border
:return: dict of label
'label': of shape (batch_size, 1) <- (batch_size, num_anchors, feat_height, feat_width)
'bbox_target': of shape (batch_size, num_anchors * 4, feat_height, feat_width)
'bbox_inside_weight': *todo* mark the assigned anchors
'bbox_outside_weight': used to normalize the bbox_loss, all weights sums to RPN_POSITIVE_WEIGHT
"""
def _unmap(data, count, inds, fill=0):
"""" unmap a subset inds of data into original data of size count """
if len(data.shape) == 1:
ret = np.empty((count,), dtype=np.float32)
ret.fill(fill)
ret[inds] = data
else:
ret = np.empty((count,) + data.shape[1:], dtype=np.float32)
ret.fill(fill)
ret[inds, :] = data
return ret
im_info = im_info[0]
scales = np.array(scales, dtype=np.float32)
base_anchors = generate_anchors(base_size=feat_stride, ratios=list(ratios), scales=scales)
num_anchors = base_anchors.shape[0]
feat_height, feat_width = feat_shape[-2:]
logger.debug('anchors: %s' % base_anchors)
logger.debug('anchor shapes: %s' % np.hstack((base_anchors[:, 2::4] - base_anchors[:, 0::4],
base_anchors[:, 3::4] - base_anchors[:, 1::4])))
logger.debug('im_info %s' % im_info)
logger.debug('height %d width %d' % (feat_height, feat_width))
logger.debug('gt_boxes shape %s' % np.array(gt_boxes.shape))
logger.debug('gt_boxes %s' % gt_boxes)
# 1. generate proposals from bbox deltas and shifted anchors
shift_x = np.arange(0, feat_width) * feat_stride
shift_y = np.arange(0, feat_height) * feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(), shift_y.ravel())).transpose()
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (K*A, 4) shifted anchors
A = num_anchors
K = shifts.shape[0]
all_anchors = base_anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2))
all_anchors = all_anchors.reshape((K * A, 4))
total_anchors = int(K * A)
# only keep anchors inside the image
inds_inside = np.where((all_anchors[:, 0] >= -allowed_border) &
(all_anchors[:, 1] >= -allowed_border) &
(all_anchors[:, 2] < im_info[1] + allowed_border) &
(all_anchors[:, 3] < im_info[0] + allowed_border))[0]
logger.debug('total_anchors %d' % total_anchors)
logger.debug('inds_inside %d' % len(inds_inside))
# keep only inside anchors
anchors = all_anchors[inds_inside, :]
logger.debug('anchors shape %s' % np.array(anchors.shape))
# label: 1 is positive, 0 is negative, -1 is dont care
labels = np.empty((len(inds_inside),), dtype=np.float32)
labels.fill(-1)
if gt_boxes.size > 0:
# overlap between the anchors and the gt boxes
# overlaps (ex, gt)
overlaps = bbox_overlaps(anchors.astype(np.float), gt_boxes.astype(np.float))
argmax_overlaps = overlaps.argmax(axis=1)
max_overlaps = overlaps[np.arange(len(inds_inside)), argmax_overlaps]
gt_argmax_overlaps = overlaps.argmax(axis=0)
gt_max_overlaps = overlaps[gt_argmax_overlaps, np.arange(overlaps.shape[1])]
gt_argmax_overlaps = np.where(overlaps == gt_max_overlaps)[0]
if not config.TRAIN.RPN_CLOBBER_POSITIVES:
# assign bg labels first so that positive labels can clobber them
labels[max_overlaps < config.TRAIN.RPN_NEGATIVE_OVERLAP] = 0
# fg label: for each gt, anchor with highest overlap
labels[gt_argmax_overlaps] = 1
# fg label: above threshold IoU
labels[max_overlaps >= config.TRAIN.RPN_POSITIVE_OVERLAP] = 1
if config.TRAIN.RPN_CLOBBER_POSITIVES:
# assign bg labels last so that negative labels can clobber positives
labels[max_overlaps < config.TRAIN.RPN_NEGATIVE_OVERLAP] = 0
else:
labels[:] = 0
# subsample positive labels if we have too many
num_fg = int(config.TRAIN.RPN_FG_FRACTION * config.TRAIN.RPN_BATCH_SIZE)
fg_inds = np.where(labels == 1)[0]
if len(fg_inds) > num_fg:
disable_inds = npr.choice(fg_inds, size=(len(fg_inds) - num_fg), replace=False)
if logger.level == logging.DEBUG:
disable_inds = fg_inds[:(len(fg_inds) - num_fg)]
labels[disable_inds] = -1
# subsample negative labels if we have too many
num_bg = config.TRAIN.RPN_BATCH_SIZE - np.sum(labels == 1)
bg_inds = np.where(labels == 0)[0]
if len(bg_inds) > num_bg:
disable_inds = npr.choice(bg_inds, size=(len(bg_inds) - num_bg), replace=False)
if logger.level == logging.DEBUG:
disable_inds = bg_inds[:(len(bg_inds) - num_bg)]
labels[disable_inds] = -1
bbox_targets = np.zeros((len(inds_inside), 4), dtype=np.float32)
if gt_boxes.size > 0:
bbox_targets[:] = bbox_transform(anchors, gt_boxes[argmax_overlaps, :4])
bbox_weights = np.zeros((len(inds_inside), 4), dtype=np.float32)
bbox_weights[labels == 1, :] = np.array(config.TRAIN.RPN_BBOX_WEIGHTS)
if logger.level == logging.DEBUG:
_sums = bbox_targets[labels == 1, :].sum(axis=0)
_squared_sums = (bbox_targets[labels == 1, :] ** 2).sum(axis=0)
_counts = np.sum(labels == 1)
means = _sums / (_counts + 1e-14)
stds = np.sqrt(_squared_sums / _counts - means ** 2)
logger.debug('means %s' % means)
logger.debug('stdevs %s' % stds)
# map up to original set of anchors
labels = _unmap(labels, total_anchors, inds_inside, fill=-1)
bbox_targets = _unmap(bbox_targets, total_anchors, inds_inside, fill=0)
bbox_weights = _unmap(bbox_weights, total_anchors, inds_inside, fill=0)
if logger.level == logging.DEBUG:
if gt_boxes.size > 0:
logger.debug('rpn: max max_overlaps %f' % np.max(max_overlaps))
logger.debug('rpn: num_positives %f' % np.sum(labels == 1))
logger.debug('rpn: num_negatives %f' % np.sum(labels == 0))
_fg_sum = np.sum(labels == 1)
_bg_sum = np.sum(labels == 0)
_count = 1
logger.debug('rpn: num_positive avg %f' % (_fg_sum / _count))
logger.debug('rpn: num_negative avg %f' % (_bg_sum / _count))
labels = labels.reshape((1, feat_height, feat_width, A)).transpose(0, 3, 1, 2)
labels = labels.reshape((1, A * feat_height * feat_width))
bbox_targets = bbox_targets.reshape((1, feat_height, feat_width, A * 4)).transpose(0, 3, 1, 2)
bbox_weights = bbox_weights.reshape((1, feat_height, feat_width, A * 4)).transpose((0, 3, 1, 2))
label = {'label': labels,
'bbox_target': bbox_targets,
'bbox_weight': bbox_weights}
return label
| apache-2.0 | 7,166,841,047,365,027,000 | -4,135,382,040,475,341,000 | 41.20082 | 104 | 0.619307 | false |
shellderp/sublime-robot-plugin | lib/robot/running/runkwregister.py | 2 | 1734 | # Copyright 2008-2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from robot import utils
class _RunKeywordRegister:
def __init__(self):
self._libs = {}
def register_run_keyword(self, libname, keyword, args_to_process=None):
if args_to_process is None:
args_to_process = self._get_args_from_method(keyword)
keyword = keyword.__name__
if libname not in self._libs:
self._libs[libname] = utils.NormalizedDict(ignore=['_'])
self._libs[libname][keyword] = int(args_to_process)
def get_args_to_process(self, libname, kwname):
if libname in self._libs and kwname in self._libs[libname]:
return self._libs[libname][kwname]
return -1
def is_run_keyword(self, libname, kwname):
return self.get_args_to_process(libname, kwname) >= 0
def _get_args_from_method(self, method):
if inspect.ismethod(method):
return method.im_func.func_code.co_argcount -1
elif inspect.isfunction(method):
return method.func_code.co_argcount
raise ValueError("Needs function or method!")
RUN_KW_REGISTER = _RunKeywordRegister()
| apache-2.0 | 7,399,282,971,078,821,000 | 2,260,212,539,402,210,300 | 33.68 | 75 | 0.673587 | false |
PokemonGoF/PokemonGo-Bot-Desktop | build/pywin/Lib/hmac.py | 70 | 4588 | """HMAC (Keyed-Hashing for Message Authentication) Python module.
Implements the HMAC algorithm as described by RFC 2104.
"""
import warnings as _warnings
from operator import _compare_digest as compare_digest
trans_5C = "".join ([chr (x ^ 0x5C) for x in xrange(256)])
trans_36 = "".join ([chr (x ^ 0x36) for x in xrange(256)])
# The size of the digests returned by HMAC depends on the underlying
# hashing module used. Use digest_size from the instance of HMAC instead.
digest_size = None
# A unique object passed by HMAC.copy() to the HMAC constructor, in order
# that the latter return very quickly. HMAC("") in contrast is quite
# expensive.
_secret_backdoor_key = []
class HMAC:
"""RFC 2104 HMAC class. Also complies with RFC 4231.
This supports the API for Cryptographic Hash Functions (PEP 247).
"""
blocksize = 64 # 512-bit HMAC; can be changed in subclasses.
def __init__(self, key, msg = None, digestmod = None):
"""Create a new HMAC object.
key: key for the keyed hash object.
msg: Initial input for the hash, if provided.
digestmod: A module supporting PEP 247. *OR*
A hashlib constructor returning a new hash object.
Defaults to hashlib.md5.
"""
if key is _secret_backdoor_key: # cheap
return
if digestmod is None:
import hashlib
digestmod = hashlib.md5
if hasattr(digestmod, '__call__'):
self.digest_cons = digestmod
else:
self.digest_cons = lambda d='': digestmod.new(d)
self.outer = self.digest_cons()
self.inner = self.digest_cons()
self.digest_size = self.inner.digest_size
if hasattr(self.inner, 'block_size'):
blocksize = self.inner.block_size
if blocksize < 16:
# Very low blocksize, most likely a legacy value like
# Lib/sha.py and Lib/md5.py have.
_warnings.warn('block_size of %d seems too small; using our '
'default of %d.' % (blocksize, self.blocksize),
RuntimeWarning, 2)
blocksize = self.blocksize
else:
_warnings.warn('No block_size attribute on given digest object; '
'Assuming %d.' % (self.blocksize),
RuntimeWarning, 2)
blocksize = self.blocksize
if len(key) > blocksize:
key = self.digest_cons(key).digest()
key = key + chr(0) * (blocksize - len(key))
self.outer.update(key.translate(trans_5C))
self.inner.update(key.translate(trans_36))
if msg is not None:
self.update(msg)
## def clear(self):
## raise NotImplementedError, "clear() method not available in HMAC."
def update(self, msg):
"""Update this hashing object with the string msg.
"""
self.inner.update(msg)
def copy(self):
"""Return a separate copy of this hashing object.
An update to this copy won't affect the original object.
"""
other = self.__class__(_secret_backdoor_key)
other.digest_cons = self.digest_cons
other.digest_size = self.digest_size
other.inner = self.inner.copy()
other.outer = self.outer.copy()
return other
def _current(self):
"""Return a hash object for the current state.
To be used only internally with digest() and hexdigest().
"""
h = self.outer.copy()
h.update(self.inner.digest())
return h
def digest(self):
"""Return the hash value of this hashing object.
This returns a string containing 8-bit data. The object is
not altered in any way by this function; you can continue
updating the object after calling this function.
"""
h = self._current()
return h.digest()
def hexdigest(self):
"""Like digest(), but returns a string of hexadecimal digits instead.
"""
h = self._current()
return h.hexdigest()
def new(key, msg = None, digestmod = None):
"""Create a new hashing object and return it.
key: The starting key for the hash.
msg: if available, will immediately be hashed into the object's starting
state.
You can now feed arbitrary strings into the object using its update()
method, and can ask for the hash value at any time by calling its digest()
method.
"""
return HMAC(key, msg, digestmod)
| mit | -5,713,062,254,457,251,000 | -2,716,691,547,094,000,000 | 32.735294 | 78 | 0.597428 | false |
h3biomed/ansible | lib/ansible/modules/database/postgresql/postgresql_idx.py | 2 | 15109 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Andrey Klychkov (@Andersson007) <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: postgresql_idx
short_description: Create or drop indexes from a PostgreSQL database
description:
- Create or drop indexes from a PostgreSQL database.
- For more information see U(https://www.postgresql.org/docs/current/sql-createindex.html),
U(https://www.postgresql.org/docs/current/sql-dropindex.html).
version_added: '2.8'
options:
idxname:
description:
- Name of the index to create or drop.
type: str
required: true
aliases:
- name
db:
description:
- Name of database to connect to and where the index will be created/dropped.
type: str
aliases:
- login_db
session_role:
description:
- Switch to session_role after connecting.
The specified session_role must be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
schema:
description:
- Name of a database schema where the index will be created.
type: str
state:
description:
- Index state.
- I(state=present) implies the index will be created if it does not exist.
- I(state=absent) implies the index will be dropped if it exists.
type: str
default: present
choices: [ absent, present ]
table:
description:
- Table to create index on it.
- Mutually exclusive with I(state=absent).
type: str
required: true
columns:
description:
- List of index columns that need to be covered by index.
- Mutually exclusive with I(state=absent).
type: list
aliases:
- column
cond:
description:
- Index conditions.
- Mutually exclusive with I(state=absent).
type: str
idxtype:
description:
- Index type (like btree, gist, gin, etc.).
- Mutually exclusive with I(state=absent).
type: str
aliases:
- type
concurrent:
description:
- Enable or disable concurrent mode (CREATE / DROP INDEX CONCURRENTLY).
- Pay attention, if I(concurrent=no), the table will be locked (ACCESS EXCLUSIVE) during the building process.
For more information about the lock levels see U(https://www.postgresql.org/docs/current/explicit-locking.html).
- If the building process was interrupted for any reason when I(cuncurrent=yes), the index becomes invalid.
In this case it should be dropped and created again.
- Mutually exclusive with I(cascade=yes).
type: bool
default: yes
tablespace:
description:
- Set a tablespace for the index.
- Mutually exclusive with I(state=absent).
required: false
type: str
storage_params:
description:
- Storage parameters like fillfactor, vacuum_cleanup_index_scale_factor, etc.
- Mutually exclusive with I(state=absent).
type: list
cascade:
description:
- Automatically drop objects that depend on the index,
and in turn all objects that depend on those objects U(https://www.postgresql.org/docs/current/sql-dropindex.html).
- It used only with I(state=absent).
- Mutually exclusive with I(concurrent=yes)
type: bool
default: no
notes:
- The index building process can affect database performance.
- To avoid table locks on production databases, use I(concurrent=yes) (default behavior).
- The default authentication assumes that you are either logging in as or
sudo'ing to the postgres account on the host.
- This module uses psycopg2, a Python PostgreSQL database adapter. You must
ensure that psycopg2 is installed on the host before using this module.
- If the remote host is the PostgreSQL server (which is the default case), then
PostgreSQL must also be installed on the remote host.
- For Ubuntu-based systems, install the postgresql, libpq-dev, and python-psycopg2 packages
on the remote host before using this module.
requirements:
- psycopg2
author:
- Andrew Klychkov (@Andersson007)
extends_documentation_fragment: postgres
'''
EXAMPLES = r'''
- name: Create btree index if not exists test_idx concurrently covering columns id and name of table products
postgresql_idx:
db: acme
table: products
columns: id,name
name: test_idx
- name: Create btree index test_idx concurrently with tablespace called ssd and storage parameter
postgresql_idx:
db: acme
table: products
columns:
- id
- name
idxname: test_idx
tablespace: ssd
storage_params:
- fillfactor=90
- name: Create gist index test_gist_idx concurrently on column geo_data of table map
postgresql_idx:
db: somedb
table: map
idxtype: gist
columns: geo_data
idxname: test_gist_idx
# Note: for the example below pg_trgm extension must be installed for gin_trgm_ops
- name: Create gin index gin0_idx not concurrently on column comment of table test
postgresql_idx:
idxname: gin0_idx
table: test
columns: comment gin_trgm_ops
concurrent: no
idxtype: gin
- name: Drop btree test_idx concurrently
postgresql_idx:
db: mydb
idxname: test_idx
state: absent
- name: Drop test_idx cascade
postgresql_idx:
db: mydb
idxname: test_idx
state: absent
cascade: yes
concurrent: no
- name: Create btree index test_idx concurrently on columns id,comment where column id > 1
postgresql_idx:
db: mydb
table: test
columns: id,comment
idxname: test_idx
cond: id > 1
'''
RETURN = r'''
name:
description: Index name.
returned: always
type: str
sample: 'foo_idx'
state:
description: Index state.
returned: always
type: str
sample: 'present'
schema:
description: Schema where index exists.
returned: always
type: str
sample: 'public'
tablespace:
description: Tablespace where index exists.
returned: always
type: str
sample: 'ssd'
query:
description: Query that was tried to be executed.
returned: always
type: str
sample: 'CREATE INDEX CONCURRENTLY foo_idx ON test_table USING BTREE (id)'
storage_params:
description: Index storage parameters.
returned: always
type: list
sample: [ "fillfactor=90" ]
valid:
description: Index validity.
returned: always
type: bool
sample: true
'''
try:
from psycopg2.extras import DictCursor
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.database import SQLParseError
from ansible.module_utils.postgres import connect_to_db, get_conn_params, postgres_common_argument_spec
from ansible.module_utils._text import to_native
VALID_IDX_TYPES = ('BTREE', 'HASH', 'GIST', 'SPGIST', 'GIN', 'BRIN')
# ===========================================
# PostgreSQL module specific support methods.
#
class Index(object):
def __init__(self, module, cursor, schema, name):
self.name = name
if schema:
self.schema = schema
else:
self.schema = 'public'
self.module = module
self.cursor = cursor
self.info = {
'name': self.name,
'state': 'absent',
'schema': '',
'tblname': '',
'tblspace': '',
'valid': True,
'storage_params': [],
}
self.exists = False
self.__exists_in_db()
self.executed_query = ''
def get_info(self):
"""
Getter to refresh and return table info
"""
self.__exists_in_db()
return self.info
def __exists_in_db(self):
"""
Check index and collect info
"""
query = ("SELECT i.schemaname, i.tablename, i.tablespace, "
"pi.indisvalid, c.reloptions "
"FROM pg_catalog.pg_indexes AS i "
"JOIN pg_catalog.pg_class AS c "
"ON i.indexname = c.relname "
"JOIN pg_catalog.pg_index AS pi "
"ON c.oid = pi.indexrelid "
"WHERE i.indexname = '%s'" % self.name)
res = self.__exec_sql(query)
if res:
self.exists = True
self.info = dict(
name=self.name,
state='present',
schema=res[0][0],
tblname=res[0][1],
tblspace=res[0][2] if res[0][2] else '',
valid=res[0][3],
storage_params=res[0][4] if res[0][4] else [],
)
return True
else:
self.exists = False
return False
def create(self, tblname, idxtype, columns, cond, tblspace, storage_params, concurrent=True):
"""
Create PostgreSQL index.
"""
# To change existing index we should write
# 'postgresql_alter_table' standalone module.
if self.exists:
return False
changed = False
if idxtype is None:
idxtype = "BTREE"
query = 'CREATE INDEX'
if concurrent:
query += ' CONCURRENTLY'
query += ' %s' % self.name
if self.schema:
query += ' ON %s.%s ' % (self.schema, tblname)
else:
query += 'public.%s ' % tblname
query += 'USING %s (%s)' % (idxtype, columns)
if storage_params:
query += ' WITH (%s)' % storage_params
if tblspace:
query += ' TABLESPACE %s' % tblspace
if cond:
query += ' WHERE %s' % cond
self.executed_query = query
if self.__exec_sql(query, ddl=True):
return True
return False
def drop(self, schema, cascade=False, concurrent=True):
"""
Drop PostgreSQL index.
"""
changed = False
if not self.exists:
return False
query = 'DROP INDEX'
if concurrent:
query += ' CONCURRENTLY'
if not schema:
query += ' public.%s' % self.name
else:
query += ' %s.%s' % (schema, self.name)
if cascade:
query += ' CASCADE'
self.executed_query = query
if self.__exec_sql(query, ddl=True):
return True
return False
def __exec_sql(self, query, ddl=False):
try:
self.cursor.execute(query)
if not ddl:
res = self.cursor.fetchall()
return res
return True
except SQLParseError as e:
self.module.fail_json(msg=to_native(e))
except Exception as e:
self.module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e)))
return False
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
idxname=dict(type='str', required=True, aliases=['name']),
db=dict(type='str', aliases=['login_db']),
state=dict(type='str', default='present', choices=['absent', 'present']),
concurrent=dict(type='bool', default=True),
table=dict(type='str'),
idxtype=dict(type='str', aliases=['type']),
columns=dict(type='list', aliases=['column']),
cond=dict(type='str'),
session_role=dict(type='str'),
tablespace=dict(type='str'),
storage_params=dict(type='list'),
cascade=dict(type='bool', default=False),
schema=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
idxname = module.params["idxname"]
state = module.params["state"]
concurrent = module.params["concurrent"]
table = module.params["table"]
idxtype = module.params["idxtype"]
columns = module.params["columns"]
cond = module.params["cond"]
tablespace = module.params["tablespace"]
storage_params = module.params["storage_params"]
cascade = module.params["cascade"]
schema = module.params["schema"]
if concurrent and cascade:
module.fail_json(msg="Cuncurrent mode and cascade parameters are mutually exclusive")
if state == 'present':
if not table:
module.fail_json(msg="Table must be specified")
if not columns:
module.fail_json(msg="At least one column must be specified")
else:
if table or columns or cond or idxtype or tablespace:
module.fail_json(msg="Index %s is going to be removed, so it does not "
"make sense to pass a table name, columns, conditions, "
"index type, or tablespace" % idxname)
if cascade and state != 'absent':
module.fail_json(msg="cascade parameter used only with state=absent")
conn_params = get_conn_params(module, module.params)
db_connection = connect_to_db(module, conn_params, autocommit=True)
cursor = db_connection.cursor(cursor_factory=DictCursor)
# Set defaults:
changed = False
# Do job:
index = Index(module, cursor, schema, idxname)
kw = index.get_info()
kw['query'] = ''
#
# check_mode start
if module.check_mode:
if state == 'present' and index.exists:
kw['changed'] = False
module.exit_json(**kw)
elif state == 'present' and not index.exists:
kw['changed'] = True
module.exit_json(**kw)
elif state == 'absent' and not index.exists:
kw['changed'] = False
module.exit_json(**kw)
elif state == 'absent' and index.exists:
kw['changed'] = True
module.exit_json(**kw)
# check_mode end
#
if state == "present":
if idxtype and idxtype.upper() not in VALID_IDX_TYPES:
module.fail_json(msg="Index type '%s' of %s is not in valid types" % (idxtype, idxname))
columns = ','.join(columns)
if storage_params:
storage_params = ','.join(storage_params)
changed = index.create(table, idxtype, columns, cond, tablespace, storage_params, concurrent)
if changed:
kw = index.get_info()
kw['state'] = 'present'
kw['query'] = index.executed_query
else:
changed = index.drop(schema, cascade, concurrent)
if changed:
kw['state'] = 'absent'
kw['query'] = index.executed_query
if not kw['valid']:
db_connection.rollback()
module.warn("Index %s is invalid! ROLLBACK" % idxname)
if not concurrent:
db_connection.commit()
kw['changed'] = changed
db_connection.close()
module.exit_json(**kw)
if __name__ == '__main__':
main()
| gpl-3.0 | 8,635,899,451,257,119,000 | -6,077,632,875,124,730,000 | 27.779048 | 121 | 0.610894 | false |
Parrot-Developers/bybop | src/Bybop_NetworkAL.py | 2 | 4049 | import socket
import struct
import threading
class DataType:
ACK=1
DATA=2
DATA_LOW_LATENCY=3
DATA_WITH_ACK=4
class NetworkAL(object):
"""
Alternate implementation of the ARNetworkAL protocol, for Wifi devices.
This implementations is fully compliant with the protocol, and has no major
limiations.
This implementation uses a thread to do background reads from the socket, and
send data to the application through a listener. This listener must implement a
'data_received' function, which will receive the following arguments:
- type : The type of data received (ack, data, low latency, data with ack)
- buf : The buffer on which this data was retrieved
- seq : The sequence number of the data
- recv_data : The actual data, as a packed string (use the struct module to unpack)
And a 'did_disconnect' function, without arguments, which will be called if the product
does not send any data on the network (probably because we lost the network link, or
because the product has run out of battery)
"""
def __init__(self, ip, c2d_port, d2c_port, listener):
"""
Create and start a new instance of ARNetworkAL.
Arguments:
- ip (string) : The device address
- c2d_port : The remove reading port
- d2c_port : The local reading port
- listener : A listener which will have its data_received function called
when a data is received from the network.
"""
self._ip = ip
self._c2d_port = int(c2d_port)
self._d2c_port = int(d2c_port)
self._listener = listener
self._alive = False
self._running = False
self._thread = None
self.start()
def stop(self):
"""
Stop the current ARNetworkAL instance.
Once stopped, an instance can be restarded with the start method.
"""
if self._running:
self._alive = False
self._send_sock.close()
def start(self):
"""
Start the current ARNetworkAL instance.
This function has no effect if the instance is already started.
"""
if self._running:
return
self._alive = True
self._send_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._recv_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._recv_sock.settimeout(5.0)
self._recv_sock.bind(('0.0.0.0', self._d2c_port))
self._thread = threading.Thread(target=self._read_loop)
self._thread.start()
self._running = True
def send_data(self, type, buf, seq, data):
"""
Send the given data to the remote ARNetworkAL.
This function returns a boolean indicating whether the send worked.
This boolean is not an acknowlege, just an indicator that the socket
write did not fail.
Arguments:
- type : The type of data (ack, data, low latency, data with ack)
- buf : The target buffer for the data
- seq : The sequence number of the data
- data : The actual data (ususally a string packed with the struct module)
"""
sock_data = struct.pack('<BBBI', type, buf, seq, len(data) + 7)
sock_data += data
try:
self._send_sock.sendto(sock_data, (self._ip, self._c2d_port))
except:
return False
return True
def _read_loop(self):
while self._alive:
try:
sock_data, _ = self._recv_sock.recvfrom(66000)
except Exception as e:
break
the_data = sock_data
while the_data:
(type, buf, seq, size) = struct.unpack('<BBBI', the_data[0:7])
recv_data = the_data[7:size]
self._listener.data_received(type, buf, seq, recv_data)
the_data = the_data[size:]
self._recv_sock.close()
self._listener.did_disconnect()
self._running = False
| bsd-3-clause | -8,951,975,459,521,426,000 | -1,581,938,700,757,450,200 | 33.606838 | 91 | 0.602618 | false |
amw2104/fireplace | fireplace/cards/classic/paladin.py | 1 | 2853 | from ..utils import *
##
# Hero Powers
# Reinforce (Uther Lightbringer)
class CS2_101:
activate = Summon(CONTROLLER, "CS2_101t")
# Reinforce (Uther Skin 1)
class CS2_101_H1:
activate = CS2_101.activate
##
# Minions
# Guardian of Kings
class CS2_088:
play = Heal(FRIENDLY_HERO, 6)
# Argent Protector
class EX1_362:
play = GiveDivineShield(TARGET)
# Aldor Peacekeeper
class EX1_382:
play = Buff(TARGET, "EX1_382e")
class EX1_382e:
atk = SET(1)
# Tirion Fordring
class EX1_383:
deathrattle = Summon(CONTROLLER, "EX1_383t")
##
# Spells
# Blessing of Might
class CS2_087:
play = Buff(TARGET, "CS2_087e")
CS2_087e = buff(atk=3)
# Holy Light
class CS2_089:
play = Heal(TARGET, 6)
# Blessing of Kings
class CS2_092:
play = Buff(TARGET, "CS2_092e")
CS2_092e = buff(+4, +4)
# Consecration
class CS2_093:
play = Hit(ENEMY_CHARACTERS, 2)
# Hammer of Wrath
class CS2_094:
play = Hit(TARGET, 3), Draw(CONTROLLER)
# Divine Favor
class EX1_349:
play = DrawUntil(CONTROLLER, Count(ENEMY_HAND))
# Lay on Hands
class EX1_354:
play = Heal(TARGET, 8), Draw(CONTROLLER) * 3
# Blessed Champion
class EX1_355:
play = Buff(TARGET, "EX1_355e")
class EX1_355e:
atk = lambda self, i: i * 2
# Humility
class EX1_360:
play = Buff(TARGET, "EX1_360e")
class EX1_360e:
atk = SET(1)
# Blessing of Wisdom
class EX1_363:
play = Buff(TARGET, "EX1_363e")
class EX1_363e:
events = Attack(OWNER).on(Draw(CONTROLLER))
# Blessing of Wisdom (Unused)
class EX1_363e2:
events = Attack(OWNER).on(Draw(OWNER_OPPONENT))
# Holy Wrath
class EX1_365:
play = Draw(CONTROLLER).then(Hit(TARGET, COST(Draw.CARD)))
# Hand of Protection
class EX1_371:
play = GiveDivineShield(TARGET)
# Avenging Wrath
class EX1_384:
def play(self):
count = self.controller.get_spell_damage(8)
yield Hit(RANDOM_ENEMY_CHARACTER, 1) * count
# Equality
class EX1_619:
play = Buff(ALL_MINIONS, "EX1_619e")
class EX1_619e:
max_health = SET(1)
##
# Secrets
# Noble Sacrifice
class EX1_130:
secret = Attack(ENEMY_MINIONS).on(FULL_BOARD | (
Reveal(SELF), Retarget(Attack.ATTACKER, Summon(CONTROLLER, "EX1_130a"))
))
# Eye for an Eye
class EX1_132:
secret = Damage(FRIENDLY_HERO).on(
Reveal(SELF), Hit(ENEMY_HERO, Damage.AMOUNT)
)
# Redemption
class EX1_136:
secret = Death(FRIENDLY + MINION).on(FULL_BOARD | (
Reveal(SELF),
Summon(CONTROLLER, Copy(Death.ENTITY)).then(SetCurrentHealth(Summon.CARD, 1))
))
# Repentance
class EX1_379:
secret = Play(OPPONENT, MINION | HERO).after(
Reveal(SELF), Buff(Play.CARD, "EX1_379e")
)
class EX1_379e:
max_health = SET(1)
##
# Weapons
# Truesilver Champion
class CS2_097:
events = Attack(FRIENDLY_HERO).on(Heal(FRIENDLY_HERO, 2))
# Sword of Justice
class EX1_366:
events = Summon(CONTROLLER, MINION).after(
Buff(Summon.CARD, "EX1_366e"),
Hit(SELF, 1)
)
EX1_366e = buff(+1, +1)
| agpl-3.0 | -671,716,551,374,201,100 | -4,662,194,528,417,907,000 | 14.256684 | 79 | 0.685594 | false |
renyi533/tensorflow | tensorflow/python/keras/mixed_precision/experimental/policy.py | 1 | 25763 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the Policy class for mixed precision training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.keras import backend
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.mixed_precision.experimental import device_compatibility_check
from tensorflow.python.keras.mixed_precision.experimental import loss_scale as keras_loss_scale_module
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.platform import tf_logging
from tensorflow.python.training.experimental import mixed_precision_global_state
from tensorflow.python.util.tf_export import keras_export
# Default value of certain arguments, indicating the default behavior for
# that argument should be used.
USE_DEFAULT = 'USE_DEFAULT'
@keras_export('keras.mixed_precision.experimental.Policy')
class Policy(object):
"""A dtype policy for a Keras layer.
A dtype policy determines dtype-related aspects of a layer, such as its
computation and variable dtypes. Each layer has a policy. Policies can be
passed to the `dtype` argument of layer constructors, or a global policy can
be set with `tf.keras.mixed_precision.experimental.set_policy`. A layer will
default to the global policy if no policy is passed to it's constructor.
For many models, each layer's policy will have the same compute dtype and
variable dtype, which will typically be float32. In this case, we refer to the
singular dtype as the layer's dtype, which can be queried by the property
`tf.keras.layers.Layer.dtype`.
When mixed precision training is used, most layers will instead have a float16
or bfloat16 compute dtype and a float32 variable dtype, and so the layer does
not have a single dtype. When the variable dtype does not match the compute
dtype, variables will be automatically casted to the compute dtype to avoid
type errors. In this case, `tf.keras.layers.Layer.dtype` refers to the
variable dtype, not the compute dtype. See [the mixed precision
guide](https://www.tensorflow.org/guide/keras/mixed_precision) for more
information on how to use mixed precision.
Certain policies also have a `tf.mixed_precision.experimental.LossScale`
instance, which is used by `tf.keras.Model`s to performance loss scaling. Loss
scaling is a technique used with mixed precision to avoid numerical underflow
in float16 gradients. Loss scaling is only done by Models in `Model.fit`,
`Model.train_on_batch`, and similar methods. Layers which are not Models
ignore the loss scale.
Policies are constructed by passing a string to the constructor, e.g.
`tf.keras.mixed_precision.experimental.Policy('float32')`. The string
determines the compute and variable dtypes. It can be one of the following:
* Any dtype name, such as 'float32' or 'float64'. Both the variable and
compute dtypes will be that dtype. No loss scaling is done by default.
* 'mixed_float16' or 'mixed_bfloat16': The compute dtype is float16 or
bfloat16, while the variable dtype is float32. These policies are used for
mixed precision training. With 'mixed_float16', a dynamic loss scale is
used by default. 'mixed_bfloat16' does no loss scaling by default, as loss
scaling is unnecessary with bfloat16.
### How to use mixed precision in a Keras model
To use mixed precision in a Keras model, the `'mixed_float16'` or
`'mixed_bfloat16'` policy can be used.
`tf.keras.mixed_precision.experimental.set_policy` can be used to set the
default policy for layers if no policy is passed to them. For example:
>>> tf.keras.mixed_precision.experimental.set_policy('mixed_float16')
>>> model = tf.keras.models.Sequential([
... tf.keras.layers.Input((100,)),
... # Dense layers use global policy of 'mixed_float16', which does
... # computations in float16 while keeping variables in float32.
... tf.keras.layers.Dense(10),
... tf.keras.layers.Dense(10),
... # Softmax should be done in float32 for numeric stability. We pass
... # dtype='float32' to use float32 instead of the global policy.
... tf.keras.layers.Activation('softmax', dtype='float32')
... ])
Alternatively, the policy can be passed to individual layers instead of
setting the global policy with `set_policy`:
>>> policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16')
>>> model = tf.keras.models.Sequential([
... tf.keras.layers.Input((100,)),
... tf.keras.layers.Dense(10, dtype=policy),
... tf.keras.layers.Dense(10, dtype=policy),
... # Softmax should be done in float32 for numeric stability.
... tf.keras.layers.Activation('softmax', dtype='float32')
... ])
Note the `'mixed_float16'` policy will apply loss scaling by default in
`Model.fit`, `Model.train_on_batch`, and other training methods. If no such
method is used (e.g., a custom training loop is used) and `'mixed_float16'` is
used, the loss scale must be manually applied. See
`tf.keras.mixed_precision.experimental.LossScaleOptimizer` for details. For
`'mixed_bfloat16'`, no loss scaling is done and loss scaling never needs to be
manually applied.
See [the mixed precision
guide](https://www.tensorflow.org/guide/keras/mixed_precision) for more
information on using mixed precision
### How to use float64 in a Keras model
Using float64 is similar to mixed precision. Either the global policy can be
set to float64, or `dtype='float64'` can be passed to individual layers. For
example, to set the global policy:
>>> tf.keras.mixed_precision.experimental.set_policy('float64')
>>> model = tf.keras.models.Sequential([
... tf.keras.layers.Input((100,)),
... # All layers use global policy of 'float64', which does computations
... # and creates variables in float64.
... tf.keras.layers.Dense(10),
... tf.keras.layers.Dense(10),
... tf.keras.layers.Activation('softmax')
... ])
>>> # Optionaly set policy back to float32 if any other models use float32
>>> tf.keras.mixed_precision.experimental.set_policy('float32')
### How a layer uses its policy's compute dtype
A layer will cast its inputs to its compute dtype in TensorFlow 2. For
example:
>>> x = tf.ones((4, 4, 4, 4), dtype='float64')
>>> # `layer`'s policy defaults to float32.
>>> layer = tf.keras.layers.Conv2D(filters=4, kernel_size=2)
>>> # `layer` casts it's inputs to its compute dtype, which is float32, and
>>> # does computations in float32.
>>> y = layer(x)
>>> y.dtype
tf.float32
Note that the base `tf.keras.layers.Layer` class inserts the casts. If
subclassing your own layer, you do not have to insert any casts.
Currently, only tensors in the first argument to the layer's `call` method are
casted. For example:
>>> class MyLayer(tf.keras.layers.Layer):
... # Bug! `b` will not be casted.
... def call(self, a, b):
... return a + 1., b + 1.
>>> a = tf.constant(1., dtype="float32")
>>> b = tf.constant(1., dtype="float32")
>>> layer = MyLayer(dtype="float64")
>>> x, y = layer(a, b)
>>> x.dtype
tf.float64
>>> y.dtype
tf.float32
If writing your own layer, it is recommended to accept tensors only in the
first argument. This way, all tensors are casted to the layer's compute dtype.
`MyLayer` should therefore be written as:
>>> class MyLayer(tf.keras.layers.Layer):
... # Now, all tensor inputs will be casted.
... def call(self, inputs):
... a, b = inputs
... return a + 1., b + 1.
>>> a = tf.constant(1., dtype="float32")
>>> b = tf.constant(1., dtype="float32")
>>> layer = MyLayer(dtype="float64")
>>> x, y = layer((a, b))
>>> x.dtype
tf.float64
>>> y.dtype
tf.float64
Other arguments are not automatically casted for technical reasons, but this
may change in a future minor release.
A layer subclass can prevent its inputs from being autocasted by passing
`autocast=False` to the layer constructor. For example:
>>> class NonAutoCastingLayer(tf.keras.layers.Layer):
... def __init__(self, **kwargs):
... kwargs['autocast'] = False
... super(NonAutoCastingLayer, self).__init__(**kwargs)
... def call(self, inp):
... return inp
>>> x = tf.ones((4, 4, 4, 4), dtype='float32')
>>> layer = NonAutoCastingLayer(dtype='float64')
>>> y = layer(x) # Will not cast inputs to it's compute dtype of float64
>>> y.dtype
tf.float32
### How a layer uses its policy's variable dtype
The default dtype of variables created by `tf.keras.layers.Layer.add_weight`
is the layer's policy's variable dtype.
If a layer's compute and variable dtypes differ, `add_weight` will wrap
floating-point variables with a special wrapper called an `AutoCastVariable`.
This wrapper is identical to the original variable except it casts itself to
the layer's compute dtype when used within `Layer.call`. Outside `Layer.call`,
the variable is not casted.
A layer author can prevent a variable from being wrapped with an
`AutoCastVariable` by passing `experimental_autocast=False` to `add_weight`:
>>> class MyLayer(tf.keras.layers.Layer):
... def build(self, input_shape):
... self.x = self.add_weight('x')
... self.y = self.add_weight('y', experimental_autocast=False)
>>> policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16')
>>> layer = MyLayer(dtype=policy)
>>> layer.build((2, 2))
>>> layer.x
<AutoCastVariable 'x:0' shape=() dtype=float32 true_dtype=float32, numpy=...>
>>> layer.y
<tf.Variable 'y:0' shape=() dtype=float32, numpy=...>
Passing `experimental_autocast=False` is useful for layers which may
internally do some math in the variable dtype instead of the compute dtype.
For example, you may wish to compute variable statistics, such as mean and
variance, in the variable dtype.
### How to write a layer that supports mixed precision and float64.
For the most part, layers will automatically support mixed precision and
float64 without any additional work, due to the fact the base layer
automatically casts inputs, creates variables of the correct type, and in the
case of mixed precision, wraps variables with `AutoCastVariables`.
For example, this simple dense layer does not require any additional work to
support mixed precision or float64. Keras automatically casts the inputs and
variable to the appropriate dtype.
>>> class MyDense(tf.keras.layers.Layer):
... def build(self, input_shape):
... self.kernel = self.add_weight('kernel', (input_shape[-1], 10))
... def call(self, inputs):
... return tf.matmul(inputs, self.kernel)
>>> policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16')
>>> layer = MyDense(dtype=policy)
>>> x = np.random.rand(10, 10)
>>> y = layer(x)
>>> y.dtype
tf.float16
The primary case where you need extra work to support mixed precision or
float64 is when you create a new tensor, such as with `tf.ones` or
`tf.constant`. In such cases, you must create the tensor of the correct dtype.
For example, suppose you modify the `MyDense` layer to add a random number to
the output using `tf.random.normal`. You must pass the input dtype to
`tf.random.normal` to ensure the dtypes match.
>>> class MyDense(tf.keras.layers.Layer):
... def build(self, input_shape):
... self.kernel = self.add_weight('kernel', (input_shape[-1], 10))
... def call(self, inputs):
... rand = tf.random.normal(shape=inputs.shape, dtype=inputs.dtype)
... return tf.matmul(inputs, self.kernel) + rand
>>>
>>> layer = MyDense(dtype=policy)
>>> y = layer(x)
>>> y.dtype
tf.float16
If you did not pass `dtype=inputs.dtype` to `tf.random.normal`, a `TypeError`
would have occurred. This is because the dtype defaults to `"float32"`, so the
layer would only work if the inputs were float32.
### The deprecated "infer" policy
In addition to the above mentioned policies, a policy can also be "infer".
This Policy is deprecated, and it is not recommended. When a layer has an
infer policy, it will infer the computation and variable dtype from the first
input the first time the layer is called. Once the layer is called for the
first time, the layer's policy will change to the dtype of the first input.
In TensorFlow 1, only the "infer" policy is available.
"""
def __init__(self, name, loss_scale=USE_DEFAULT):
"""Constructs the policy.
The `name` argument determines the compute and variable dtype, the default
loss scale, and has no additional effect on the Policy. The compute and
variable dtypes can only be specified through `name`, and cannot be
specified directly.
Args:
name: A string. Can be one of the following values:
* Any dtype name, such as 'float32' or 'float64'. Both the variable and
compute dtypes will be that dtype.
* 'mixed_float16' or 'mixed_bfloat16': The compute dtype is float16 or
bfloat16, while the variable dtype is float32. With 'mixed_float16',
a dynamic loss scale is used. These policies are used for mixed
precision training.
* 'infer' (deprecated): Infer the compute and variable dtype from the
input dtype.
loss_scale: A `tf.mixed_precision.experimental.LossScale`, an int (which
uses a `FixedLossScale`), or the string "dynamic" (which uses a
`DynamicLossScale`). Defaults to using no loss scaling unless `name` is
"mixed_float16", in which case this defaults to "dynamic". Only
`tf.keras.Model`s, not layers, use the loss scale, and it is only used
during `Model.fit`, `Model.train_on_batch`, and other similar methods.
"""
if isinstance(name, dtypes.DType):
raise TypeError("'name' must be a string, not a DType. "
"Instead, pass DType.name. Got: %s" % (name.name,))
elif not isinstance(name, six.string_types):
raise TypeError("'name' must be a string, but got: %s" % (name,))
self._name = name
self._compute_dtype, self._variable_dtype = self._parse_name(name)
if loss_scale == USE_DEFAULT:
loss_scale = 'dynamic' if name == 'mixed_float16' else None
self._using_default_loss_scale = True
else:
self._using_default_loss_scale = False
if loss_scale and self._compute_dtype not in (None, 'float16'):
tf_logging.warn('Creating a Policy with a loss scale is only useful for '
'float16 policies. You passed loss_scale=%r for policy '
'%s. Consider not passing any loss_scale instead.' %
(loss_scale, name))
self._loss_scale = keras_loss_scale_module.get(loss_scale)
if name in ('mixed_float16', 'mixed_bloat16'):
device_compatibility_check.log_device_compatibility_check(name)
def _parse_name(self, name):
"""Parses a Policy name into a compute and variable dtype.
Args:
name: The name of the policy:
Returns:
The (compute_dtype, variable_dtype) pair.
"""
if name.endswith('_float32_vars'):
error_msg = ('Policies ending in \'_float32_vars\' have been removed '
'from TensorFlow.')
if name in ('infer_float32_vars', 'infer_with_float32_vars'):
error_msg += (' Please use the \'mixed_float16\' or \'mixed_bfloat16\' '
'policy instead.')
elif name == 'float16_with_float32_vars':
error_msg += (' Please use the \'mixed_float16\' policy instead.')
elif name == 'bfloat16_with_float32_vars':
error_msg += (' Please use the \'mixed_bfloat16\' policy instead.')
error_msg += ' Got policy name: \'%s\'' % name
raise ValueError(error_msg)
if name == 'mixed_float16':
return 'float16', 'float32'
elif name == 'mixed_bfloat16':
return 'bfloat16', 'float32'
elif name == 'infer':
return None, None
try:
dtype = dtypes.as_dtype(name).name
except TypeError:
error = ("Cannot convert value %s to a mixed precision Policy. "
"Valid policies include include 'mixed_float16', "
"'mixed_bfloat16', and the name of any dtype such as "
"'float32'." % (name,))
# six.raise_from suppresses the original TypeError from being raised
six.raise_from(ValueError(error), None)
return dtype, dtype
@property
def variable_dtype(self):
"""The variable dtype of this policy.
This is the dtype layers will create their variables in, unless a layer
explicitly chooses a different dtype. If this is different than
`Policy.compute_dtype`, Layers will cast variables to the compute dtype to
avoid type errors.
Returns:
The variable dtype of this policy, or None if the variable dtype should be
inferred from the inputs.
"""
return self._variable_dtype
@property
def compute_dtype(self):
"""The compute dtype of this policy.
This is the dtype layers will do their computations in.
Note that even if the compute dtype is float16 or bfloat16, hardware devices
may not do individual adds, multiplies, and other fundamental operations in
[b]float16, but instead may do some of them in float32 for numeric
stability. The compute dtype is the dtype of the inputs and outputs of the
TensorFlow ops that the layer executes. Internally, many TensorFlow ops will
do certain internal calculations in float32, or some other device-internal
intermediate format with higher precision than [b]float16, to increase
numeric stability.
For example, a `tf.keras.layers.Dense` layer, when run on a GPU with a
float16 compute dtype, will pass float16 inputs to tf.matmul. But, tf.matmul
will do use float32 intermediate math. The performance benefit of float16 is
still apparent, due to increased memory bandwidth and the fact modern GPUs
have specialized hardware for computing matmuls on float16 while still
keeping intermediate computations in float32.
Returns:
The compute dtype of this policy, or None if the compute dtype should be
inferred from the inputs.
"""
return self._compute_dtype
@property
def should_cast_variables(self):
"""Returns True if variables should be casted.
This is true if the variable dtype is not the same as the compute dtype.
Returns:
True, if variables should be casted.
"""
return self.variable_dtype != self.compute_dtype
@property
def loss_scale(self):
"""Returns the loss scale of this Policy.
Returns:
A `tf.mixed_precision.experimental.LossScale`, or None.
"""
return self._loss_scale
@property
def name(self):
"""Returns the name of this policy."""
return self._name
def __repr__(self):
return '<Policy "%s", loss_scale=%s>' % (self._name, self.loss_scale)
def get_config(self):
config = {
'name': self.name
}
if not self._using_default_loss_scale:
# We only include the loss scale if the default loss scale is not used.
# This allows us to change the loss scale config format without breaking
# users who use the default loss scale.
config['loss_scale'] = keras_loss_scale_module.serialize(self.loss_scale)
return config
@classmethod
def from_config(cls, config, custom_objects=None):
if 'loss_scale' in config and isinstance(config['loss_scale'], dict):
config = config.copy()
config['loss_scale'] = keras_loss_scale_module.deserialize(
config['loss_scale'], custom_objects=custom_objects)
return cls(**config)
# The current global policy in effect. If None, it means the current value of
# floatx should be used as the policy if the V2 dtype behavior is enabled,
# or "infer" otherwise.
# TODO(reedwm): Make this thread local?
_global_policy = None
@keras_export('keras.mixed_precision.experimental.global_policy')
def global_policy():
"""Returns the global Policy.
The global policy is the default policy used for layers, if no policy is
passed to the layer constructor. If no policy has been set with
`keras.mixed_precision.experimental.set_policy`, this will return a policy
constructed from `tf.keras.backend.floatx()` in TensorFlow 2 (floatx defaults
to float32), or an "infer" policy in TensorFlow 1.
See `keras.mixed_precision.experimental.Policy` for more information.
Returns:
The global Policy.
"""
if _global_policy is None:
if base_layer_utils.v2_dtype_behavior_enabled():
return Policy(backend.floatx())
else:
return Policy('infer')
return _global_policy
def policy_defaults_to_floatx():
"""Returns True if `global_policy()` will use the current value of floatx."""
return _global_policy is None and base_layer_utils.v2_dtype_behavior_enabled()
def _check_if_mixed_precision_graph_rewrite_is_enabled():
# TODO(reedwm): Update this comment once the Keras API is complete.
if mixed_precision_global_state.mixed_precision_graph_rewrite_is_enabled:
raise ValueError(
'The mixed precision policy cannot be set, because the mixed '
'precision graph rewrite has already been enabled.\n'
'At most, one of the following functions can be called:\n\n'
' 1. tf.train.experimental.enable_mixed_precision_graph_rewrite() '
'(You called this first)\n'
' 2. tf.keras.mixed_precision.experimental.set_policy() (You called '
'this second)\n\n'
'You called both functions, which is an error, because both functions '
'enable you to use mixed precision. If in doubt which function to use, '
'use the second, as it supports Eager execution and is more '
'customizable.')
@keras_export('keras.mixed_precision.experimental.set_policy')
def set_policy(policy):
"""Sets the global Policy.
The global policy is the default policy used for layers, if no policy is
passed to the layer constructor. If no global policy is set, layers will
instead default to a Policy constructed from `tf.keras.backend.floatx()` in
TensorFlow 2. In TensorFlow 1, layers default to an "infer" policy.
See `keras.mixed_precision.experimental.Policy` for more information.
Args:
policy: A Policy, or a string that will be converted to a Policy..
"""
global _global_policy
_check_if_mixed_precision_graph_rewrite_is_enabled()
if policy is not None and not isinstance(policy, Policy):
policy = Policy(policy)
if (policy and not base_layer_utils.v2_dtype_behavior_enabled() and
policy.compute_dtype):
raise ValueError(
'The global policy can only be set to a non-infer policy in TensorFlow '
'2')
_global_policy = policy
mixed_precision_global_state.using_default_mixed_precision_policy = (
_global_policy is None)
# TODO(reedwm): Make this thread local
@contextlib.contextmanager
def policy_scope(policy):
"""A context manager that sets the global Policy under it.
Args:
policy: A Policy, or a string that will be converted to a Policy..
Yields:
Nothing.
"""
old_policy = _global_policy
try:
set_policy(policy)
yield
finally:
set_policy(old_policy)
def _is_convertible_to_dtype(dtype):
try:
dtypes.as_dtype(dtype)
return True
except TypeError:
return False
def _policy_equivalent_to_dtype(policy):
"""Returns True if the Policy is equivalent to a single dtype.
A policy is equivalent to a single dtype if the policy's compute and variable
dtypes are the same and the policy does not cause the layer/model to have
additional behavior, such as loss scaling.
The "infer" policy is considered equivalent to a single dtype.
Args:
policy: A Policy.
Returns:
True, if the policy is equivalent to a single dtype.
"""
# We use type() instead of isinstance because a sublcass of Policy is never
# equivalent to a dtype.
return (type(policy) == Policy and # pylint: disable=unidiomatic-typecheck
list(policy.get_config().keys()) == ['name'] and
(policy.name == 'infer' or _is_convertible_to_dtype(policy.name)))
def serialize(policy):
if _policy_equivalent_to_dtype(policy):
# We return either None or the policy name for compatibility with older
# versions of Keras. If the policy name is returned, it is a dtype string
# such as 'float32'.
return None if policy.name == 'infer' else policy.name
return generic_utils.serialize_keras_object(policy)
def deserialize(config, custom_objects=None):
if isinstance(config, str) and _is_convertible_to_dtype(config):
return Policy(config)
if config is None:
return Policy('infer')
module_objects = {'Policy': Policy}
return generic_utils.deserialize_keras_object(
config,
module_objects=module_objects,
custom_objects=custom_objects,
printable_module_name='dtype policy')
| apache-2.0 | 4,941,920,061,406,158,000 | 1,541,051,865,001,965,800 | 39.958665 | 102 | 0.695843 | false |
PrismTech/opensplice | build/docs/DDSTutorial/source/conf.py | 2 | 8804 | # -*- coding: utf-8 -*-
#
# Vortex OpenSplice Tutorial build configuration file, created by
# ReST Editor on 24-Mar-2015
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import time
# import liteconfig
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
#extensions = ['sphinx.ext.todo']
#extensions = ['sphinx.ext.todo', 'numfig']
extensions = ['sphinx.ext.todo', 'sphinx.ext.ifconfig']
def setup(app):
app.add_config_value('rmi_languages', '', True)
#rmi_languages = 'C++ and Java'
rmi_languages = 'C++'
#rmi_languages = 'Java'
rst_prolog = """
.. |rmi_langs| replace:: C++
.. |product_name| replace:: OpenSplice
"""
#.. |rmi_langs| replace:: C++ and Java
# Add any paths that contain templates here, relative to this directory.
templates_path = [u'_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = u'utf-8-sig'
# The master toctree document.
master_doc = u'index'
# General information about the project.
project = u'The Data Distribution Service Tutorial'
this_year = time.strftime( '%Y' )
copyright = u'{y}, ADLINK Technology Limited'.format( y = this_year )
print 'Copyright string is:', copyright
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
#version = u's'
#version = liteconfig.version
#version = u'6.x'
# The full version, including alpha/beta/rc tags.
#release = u's'
#release = version
#release = u'.0'
#print 'Short version string is:', version
#print 'Full version string is:', release
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = u'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# Force blank date with today = ' ' (space, not empty string)
today = ' '
# ***************
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = u'sphinxdoc'
html_theme = u'vortextheme'
html_theme_path = ['../../.']
#build theme directory in lite using environment variable, so shared amongst books
# insight team can delete,
#html_theme_path = [os.environ['VL_HOME'] + '/build/docs']
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
html_title = 'The Data Distribution Service Tutorial'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
html_short_title = 'DDS Tutorial'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
html_logo = './images/Vortex_logo_2014.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = []
html_static_path = [u'_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'The Data Distribution Service Tutorial'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = u'a4'
# The font size ('10pt', '11pt' or '12pt').
latex_font_size = u'10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [('index', 'OpenSplice_DDSTutorial.tex', u'The DDS Tutorial', u'', 'manual', True)]
# Note 'author' field empty
# Added 'True' to end of generated line to suppress 'Index & Tables'
# A dictionary that contains LaTeX snippets that override those Sphinx usually
# puts into the generated .tex files.
latex_elements = { 'babel': '\\usepackage[english]{babel}' }
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
latex_logo = 'images/Vortex-Cover.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# THIS GETS RID OF BLANK PAGES AT ENDS OF CHAPTERS & ToC
latex_elements = {
'classoptions': ',openany, oneside',
'babel': '\\usepackage[english]{babel}'
}
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [('index', 'DDS_Tutorial', u'DDS_Tutorial Documentation', [u'ADLINK Technology Limited'], 1)]
# -- Additional options --------------------------------------------------------
todo_include_todos = True
| gpl-3.0 | -3,900,707,443,852,006,000 | -4,053,769,422,396,271,600 | 31.607407 | 105 | 0.702067 | false |
googleapis/googleapis-gen | google/cloud/networkmanagement/v1/networkmanagement-v1-py/google/cloud/network_management_v1/services/reachability_service/transports/grpc.py | 1 | 21150 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.network_management_v1.types import connectivity_test
from google.cloud.network_management_v1.types import reachability
from google.longrunning import operations_pb2 # type: ignore
from .base import ReachabilityServiceTransport, DEFAULT_CLIENT_INFO
class ReachabilityServiceGrpcTransport(ReachabilityServiceTransport):
"""gRPC backend transport for ReachabilityService.
The Reachability service in the Google Cloud Network
Management API provides services that analyze the reachability
within a single Google Virtual Private Cloud (VPC) network,
between peered VPC networks, between VPC and on-premises
networks, or between VPC networks and internet hosts. A
reachability analysis is based on Google Cloud network
configurations.
You can use the analysis results to verify these configurations
and to troubleshoot connectivity issues.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(self, *,
host: str = 'networkmanagement.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(cls,
host: str = 'networkmanagement.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def list_connectivity_tests(self) -> Callable[
[reachability.ListConnectivityTestsRequest],
reachability.ListConnectivityTestsResponse]:
r"""Return a callable for the list connectivity tests method over gRPC.
Lists all Connectivity Tests owned by a project.
Returns:
Callable[[~.ListConnectivityTestsRequest],
~.ListConnectivityTestsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_connectivity_tests' not in self._stubs:
self._stubs['list_connectivity_tests'] = self.grpc_channel.unary_unary(
'/google.cloud.networkmanagement.v1.ReachabilityService/ListConnectivityTests',
request_serializer=reachability.ListConnectivityTestsRequest.serialize,
response_deserializer=reachability.ListConnectivityTestsResponse.deserialize,
)
return self._stubs['list_connectivity_tests']
@property
def get_connectivity_test(self) -> Callable[
[reachability.GetConnectivityTestRequest],
connectivity_test.ConnectivityTest]:
r"""Return a callable for the get connectivity test method over gRPC.
Gets the details of a specific Connectivity Test.
Returns:
Callable[[~.GetConnectivityTestRequest],
~.ConnectivityTest]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_connectivity_test' not in self._stubs:
self._stubs['get_connectivity_test'] = self.grpc_channel.unary_unary(
'/google.cloud.networkmanagement.v1.ReachabilityService/GetConnectivityTest',
request_serializer=reachability.GetConnectivityTestRequest.serialize,
response_deserializer=connectivity_test.ConnectivityTest.deserialize,
)
return self._stubs['get_connectivity_test']
@property
def create_connectivity_test(self) -> Callable[
[reachability.CreateConnectivityTestRequest],
operations_pb2.Operation]:
r"""Return a callable for the create connectivity test method over gRPC.
Creates a new Connectivity Test. After you create a test, the
reachability analysis is performed as part of the long running
operation, which completes when the analysis completes.
If the endpoint specifications in ``ConnectivityTest`` are
invalid (for example, containing non-existent resources in the
network, or you don't have read permissions to the network
configurations of listed projects), then the reachability result
returns a value of ``UNKNOWN``.
If the endpoint specifications in ``ConnectivityTest`` are
incomplete, the reachability result returns a value of
AMBIGUOUS. For more information, see the Connectivity Test
documentation.
Returns:
Callable[[~.CreateConnectivityTestRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'create_connectivity_test' not in self._stubs:
self._stubs['create_connectivity_test'] = self.grpc_channel.unary_unary(
'/google.cloud.networkmanagement.v1.ReachabilityService/CreateConnectivityTest',
request_serializer=reachability.CreateConnectivityTestRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['create_connectivity_test']
@property
def update_connectivity_test(self) -> Callable[
[reachability.UpdateConnectivityTestRequest],
operations_pb2.Operation]:
r"""Return a callable for the update connectivity test method over gRPC.
Updates the configuration of an existing ``ConnectivityTest``.
After you update a test, the reachability analysis is performed
as part of the long running operation, which completes when the
analysis completes. The Reachability state in the test resource
is updated with the new result.
If the endpoint specifications in ``ConnectivityTest`` are
invalid (for example, they contain non-existent resources in the
network, or the user does not have read permissions to the
network configurations of listed projects), then the
reachability result returns a value of UNKNOWN.
If the endpoint specifications in ``ConnectivityTest`` are
incomplete, the reachability result returns a value of
``AMBIGUOUS``. See the documentation in ``ConnectivityTest`` for
for more details.
Returns:
Callable[[~.UpdateConnectivityTestRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_connectivity_test' not in self._stubs:
self._stubs['update_connectivity_test'] = self.grpc_channel.unary_unary(
'/google.cloud.networkmanagement.v1.ReachabilityService/UpdateConnectivityTest',
request_serializer=reachability.UpdateConnectivityTestRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['update_connectivity_test']
@property
def rerun_connectivity_test(self) -> Callable[
[reachability.RerunConnectivityTestRequest],
operations_pb2.Operation]:
r"""Return a callable for the rerun connectivity test method over gRPC.
Rerun an existing ``ConnectivityTest``. After the user triggers
the rerun, the reachability analysis is performed as part of the
long running operation, which completes when the analysis
completes.
Even though the test configuration remains the same, the
reachability result may change due to underlying network
configuration changes.
If the endpoint specifications in ``ConnectivityTest`` become
invalid (for example, specified resources are deleted in the
network, or you lost read permissions to the network
configurations of listed projects), then the reachability result
returns a value of ``UNKNOWN``.
Returns:
Callable[[~.RerunConnectivityTestRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'rerun_connectivity_test' not in self._stubs:
self._stubs['rerun_connectivity_test'] = self.grpc_channel.unary_unary(
'/google.cloud.networkmanagement.v1.ReachabilityService/RerunConnectivityTest',
request_serializer=reachability.RerunConnectivityTestRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['rerun_connectivity_test']
@property
def delete_connectivity_test(self) -> Callable[
[reachability.DeleteConnectivityTestRequest],
operations_pb2.Operation]:
r"""Return a callable for the delete connectivity test method over gRPC.
Deletes a specific ``ConnectivityTest``.
Returns:
Callable[[~.DeleteConnectivityTestRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'delete_connectivity_test' not in self._stubs:
self._stubs['delete_connectivity_test'] = self.grpc_channel.unary_unary(
'/google.cloud.networkmanagement.v1.ReachabilityService/DeleteConnectivityTest',
request_serializer=reachability.DeleteConnectivityTestRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['delete_connectivity_test']
__all__ = (
'ReachabilityServiceGrpcTransport',
)
| apache-2.0 | 9,098,083,035,357,102,000 | -4,317,687,385,841,342,000 | 45.792035 | 96 | 0.636359 | false |
VlachosGroup/VlachosGroupAdditivity | pgradd/DrawMol.py | 1 | 2230 | """
=========================================
Defenition to draw RDKIT mol object (:mod:`pgradd.DrawMol`)
=========================================
Coverts a rdkit mol object to a svg image and display.
"""
from rdkit import Chem
from rdkit.Chem import rdDepictor
from rdkit.Chem.Draw import rdMolDraw2D
from IPython.display import SVG, display
# http://rdkit.blogspot.com/2015/02/new-drawing-code.html
def moltosvg(mol, highlight=[], molSize=(400, 400), kekulize=True):
mc = Chem.Mol(mol.ToBinary())
if kekulize:
try:
Chem.Kekulize(mc)
except Exception:
mc = Chem.Mol(mol.ToBinary())
if not mc.GetNumConformers():
rdDepictor.Compute2DCoords(mc)
drawer = rdMolDraw2D.MolDraw2DSVG(molSize[0], molSize[1])
# Atom Label
opts = drawer.drawOptions()
# Atom name and index
for i in range(mol.GetNumAtoms()):
opts.atomLabels[i] = mol.GetAtomWithIdx(i).GetSymbol()+str(i)
# radicals and charges
for atom in mol.GetAtoms():
nr = atom.GetNumRadicalElectrons()
nc = atom.GetFormalCharge()
if nr > 0:
string = atom.GetSymbol() + ':'*divmod(nr, 2)[0] +\
'.'*divmod(nr, 2)[1]
opts.atomLabels[atom.GetIdx()] += string
elif nc == 1:
string = atom.GetSymbol() + '+'
opts.atomLabels[atom.GetIdx()] += string
elif nc > 1:
string = atom.GetSymbol() + '+' + str(nc)
opts.atomLabels[atom.GetIdx()] += string
elif nc == -1:
string = atom.GetSymbol() + '-'
opts.atomLabels[atom.GetIdx()] += string
elif nc < -1:
string = atom.GetSymbol() + '-' + str(nc)
opts.atomLabels[atom.GetIdx()] += string
# highlight
if highlight:
drawer.DrawMolecule(mc, highlightAtoms=highlight)
else:
drawer.DrawMolecule(mc)
drawer.FinishDrawing()
svg = drawer.GetDrawingText()
# It seems that the svg renderer used doesn't quite hit the spec.
# Here are some fixes to make it work in the notebook, although I think
# the underlying issue needs to be resolved at the generation step
svg.replace('svg:', '')
display(SVG(svg))
| mit | 7,193,969,965,303,007,000 | -1,437,300,390,542,569,500 | 32.283582 | 75 | 0.58296 | false |
aldebaran/qibuild | python/qitest/parsers.py | 1 | 7334 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2021 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license (see the COPYING file).
""" Collection of parser fonctions for qitests actions """
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import os
import qisys.parsers
import qitest.project
import qibuild.parsers
class EmptyTestListException(Exception):
""" No test to run exception """
pass
def test_parser(parser, with_num_jobs=True):
""" Test Parser """
qisys.parsers.worktree_parser(parser)
group = parser.add_argument_group("test options")
group.add_argument("--perf", dest="perf", action="store_true",
help="run perfs tests instead of pure tests.")
group.add_argument("-k", "--pattern", dest="patterns", action="append",
help="Filter tests matching these patterns")
group.add_argument("-x", "--exclude", dest="excludes", action="append",
help="Exclude test matching these patterns")
group.add_argument("-V", dest="verbose_tests", action="store_true",
help="display tests output")
group.add_argument("--valgrind", dest="valgrind", action="store_true",
help="run tests under valgrind")
group.add_argument("--nightmare", dest="nightmare", action="store_true",
help="run tests in shuffle and 20 times (apply only to gtest)")
group.add_argument("--coverage", dest="coverage", action="store_true",
help="run coverage")
group.add_argument("--ncpu", dest="num_cpus", default=-1, type=int,
help="set number of CPU each test is allowed to use (linux)")
group.add_argument("--nightly", action="store_true", dest="nightly")
group.add_argument("--break-on-failure", action="store_true", dest="break_on_failure",
help="Break on failure (for gtest only)")
group.add_argument("--repeat-until-fail", default=0, type=int, metavar="N",
help="Repeat tests until they fail (at most N times)")
group.add_argument("--qitest-json", dest="qitest_jsons", action="append")
group.add_argument("--test-output-dir", type=os.path.abspath,
dest="test_output_dir",
help="Generate XML test reports in the given directory "
"(instead of build-<platform>/sdk/test-results)")
group.add_argument("--coverage-output-dir", dest="coverage_output_dir",
help="Generate XML and HTML coverage reports in the given "
"directory (instead of build-<platform>/sdk/coverage-results)")
group.add_argument("--root-output-dir", dest="test_output_dir", metavar="ROOT_OUTPUT_DIR",
help="same as --test-output-dir (deprecated)")
group.add_argument("--no-capture", dest="capture", action="store_false")
group.add_argument("--ignore-timeouts", dest="ignore_timeouts", action="store_true",
help="Ignore timeouts when running tests")
group.add_argument("--lf", "--last-failed", dest="last_failed", action="store_true",
help="Run the failing test from previous run")
group.add_argument("--allow-no-test", dest="allow_no_test", action="store_true",
help="Don't fail if no tests to run")
parser.set_defaults(nightly=False, capture=True, last_failed=False,
ignore_timeouts=False)
if with_num_jobs:
qisys.parsers.parallel_parser(group, default=1)
return group
def get_test_runner(args, build_project=None, qitest_json=None):
""" Get Test Runner """
test_project = None
if not qitest_json:
qitest_json = vars(args).get("qitest_json")
if not qitest_json:
candidate = os.path.join(os.getcwd(), "qitest.json")
if os.path.exists(candidate):
qitest_json = candidate
if qitest_json:
test_project = qitest.project.TestProject(qitest_json)
if not test_project:
if build_project:
test_project = build_project.to_test_project()
else:
return None
test_runner = qibuild.test_runner.ProjectTestRunner(test_project)
if build_project:
test_runner.cwd = build_project.sdk_directory
test_runner.env = build_project.build_worktree.get_env()
else:
test_runner.cwd = qisys.sh.to_native_path(os.path.dirname(qitest_json))
test_runner.patterns = args.patterns
test_runner.excludes = args.excludes
test_runner.perf = args.perf
test_runner.coverage = args.coverage
test_runner.break_on_failure = args.break_on_failure
test_runner.valgrind = args.valgrind
test_runner.verbose = args.verbose_tests
test_runner.num_cpus = args.num_cpus
test_runner.num_jobs = args.num_jobs
test_runner.repeat_until_fail = args.repeat_until_fail
test_runner.nightly = args.nightly
test_runner.nightmare = args.nightmare
test_runner.test_output_dir = args.test_output_dir
test_runner.capture = args.capture
test_runner.last_failed = args.last_failed
test_runner.ignore_timeouts = args.ignore_timeouts
return test_runner
def parse_build_projects(args):
""" Parse Build Projects """
res = list()
try:
build_worktree = qibuild.parsers.get_build_worktree(args)
solve_deps = False
if args.use_deps:
solve_deps = True
build_projects = qibuild.parsers.get_build_projects(
build_worktree,
args, solve_deps=solve_deps)
for build_project in build_projects:
test_runner = None
try:
test_runner = get_test_runner(args, build_project=build_project)
except qibuild.project.NoQiTestJson:
pass
if test_runner:
res.append(test_runner)
except (qisys.worktree.NotInWorkTree, qibuild.parsers.CouldNotGuessProjectName):
pass
return res
def get_test_runners(args):
""" Get Test Runners """
res = list()
qitest_jsons = args.qitest_jsons or list()
# first case: qitest.json in current working directory
test_runner = get_test_runner(args)
if test_runner:
res.append(test_runner)
# second case: qitest.json specified with --qitest-json
for qitest_json in qitest_jsons:
test_runner = get_test_runner(args, qitest_json=qitest_json)
res.append(test_runner)
# third case: parsing build projects
build_projects_runners = parse_build_projects(args)
# avoid appending a test_runner guessed from a build project
# when res already contains a test runner computed from a
# --qitest-json argument
known_cwds = [x.cwd for x in res]
for test_runner in build_projects_runners:
if test_runner.cwd not in known_cwds:
res.append(test_runner)
if args.coverage and not build_projects_runners:
raise Exception("""--coverage can only be used from a qibuild CMake project\n""")
elif args.coverage:
return build_projects_runners
if not res:
raise EmptyTestListException("Nothing found to test")
return res
| bsd-3-clause | -5,038,579,766,160,057,000 | 6,067,308,602,233,706,000 | 43.993865 | 94 | 0.637715 | false |
pekeler/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/test/test_pep352.py | 51 | 9655 | import unittest
import __builtin__
import exceptions
import warnings
from test.test_support import run_unittest
import os
from platform import system as platform_system
def ignore_message_warning():
"""Ignore the DeprecationWarning for BaseException.message."""
warnings.resetwarnings()
warnings.filterwarnings("ignore", "BaseException.message",
DeprecationWarning)
class ExceptionClassTests(unittest.TestCase):
"""Tests for anything relating to exception objects themselves (e.g.,
inheritance hierarchy)"""
def test_builtins_new_style(self):
self.failUnless(issubclass(Exception, object))
def verify_instance_interface(self, ins):
with warnings.catch_warnings():
ignore_message_warning()
for attr in ("args", "message", "__str__", "__repr__",
"__getitem__"):
self.failUnless(hasattr(ins, attr),
"%s missing %s attribute" %
(ins.__class__.__name__, attr))
def test_inheritance(self):
# Make sure the inheritance hierarchy matches the documentation
exc_set = set(x for x in dir(exceptions) if not x.startswith('_'))
inheritance_tree = open(os.path.join(os.path.split(__file__)[0],
'exception_hierarchy.txt'))
try:
superclass_name = inheritance_tree.readline().rstrip()
try:
last_exc = getattr(__builtin__, superclass_name)
except AttributeError:
self.fail("base class %s not a built-in" % superclass_name)
self.failUnless(superclass_name in exc_set)
exc_set.discard(superclass_name)
superclasses = [] # Loop will insert base exception
last_depth = 0
for exc_line in inheritance_tree:
exc_line = exc_line.rstrip()
depth = exc_line.rindex('-')
exc_name = exc_line[depth+2:] # Slice past space
if '(' in exc_name:
paren_index = exc_name.index('(')
platform_name = exc_name[paren_index+1:-1]
exc_name = exc_name[:paren_index-1] # Slice off space
if platform_system() != platform_name:
exc_set.discard(exc_name)
continue
if '[' in exc_name:
left_bracket = exc_name.index('[')
exc_name = exc_name[:left_bracket-1] # cover space
try:
exc = getattr(__builtin__, exc_name)
except AttributeError:
self.fail("%s not a built-in exception" % exc_name)
if last_depth < depth:
superclasses.append((last_depth, last_exc))
elif last_depth > depth:
while superclasses[-1][0] >= depth:
superclasses.pop()
self.failUnless(issubclass(exc, superclasses[-1][1]),
"%s is not a subclass of %s" % (exc.__name__,
superclasses[-1][1].__name__))
try: # Some exceptions require arguments; just skip them
self.verify_instance_interface(exc())
except TypeError:
pass
self.failUnless(exc_name in exc_set)
exc_set.discard(exc_name)
last_exc = exc
last_depth = depth
finally:
inheritance_tree.close()
self.failUnlessEqual(len(exc_set), 0, "%s not accounted for" % exc_set)
interface_tests = ("length", "args", "message", "str", "unicode", "repr",
"indexing")
def interface_test_driver(self, results):
for test_name, (given, expected) in zip(self.interface_tests, results):
self.failUnlessEqual(given, expected, "%s: %s != %s" % (test_name,
given, expected))
def test_interface_single_arg(self):
# Make sure interface works properly when given a single argument
arg = "spam"
exc = Exception(arg)
with warnings.catch_warnings():
ignore_message_warning()
results = ([len(exc.args), 1], [exc.args[0], arg],
[exc.message, arg],
[str(exc), str(arg)], [unicode(exc), unicode(arg)],
[repr(exc), exc.__class__.__name__ + repr(exc.args)], [exc[0],
arg])
self.interface_test_driver(results)
def test_interface_multi_arg(self):
# Make sure interface correct when multiple arguments given
arg_count = 3
args = tuple(range(arg_count))
exc = Exception(*args)
with warnings.catch_warnings():
ignore_message_warning()
results = ([len(exc.args), arg_count], [exc.args, args],
[exc.message, ''], [str(exc), str(args)],
[unicode(exc), unicode(args)],
[repr(exc), exc.__class__.__name__ + repr(exc.args)],
[exc[-1], args[-1]])
self.interface_test_driver(results)
def test_interface_no_arg(self):
# Make sure that with no args that interface is correct
exc = Exception()
with warnings.catch_warnings():
ignore_message_warning()
results = ([len(exc.args), 0], [exc.args, tuple()],
[exc.message, ''],
[str(exc), ''], [unicode(exc), u''],
[repr(exc), exc.__class__.__name__ + '()'], [True, True])
self.interface_test_driver(results)
def test_message_deprecation(self):
# As of Python 2.6, BaseException.message is deprecated.
with warnings.catch_warnings():
warnings.resetwarnings()
warnings.filterwarnings('error')
try:
BaseException().message
except DeprecationWarning:
pass
else:
self.fail("BaseException.message not deprecated")
exc = BaseException()
try:
exc.message = ''
except DeprecationWarning:
pass
else:
self.fail("BaseException.message assignment not deprecated")
class UsageTests(unittest.TestCase):
"""Test usage of exceptions"""
def raise_fails(self, object_):
"""Make sure that raising 'object_' triggers a TypeError."""
try:
raise object_
except TypeError:
return # What is expected.
self.fail("TypeError expected for raising %s" % type(object_))
def catch_fails(self, object_):
"""Catching 'object_' should raise a TypeError."""
try:
try:
raise StandardError
except object_:
pass
except TypeError:
pass
except StandardError:
self.fail("TypeError expected when catching %s" % type(object_))
try:
try:
raise StandardError
except (object_,):
pass
except TypeError:
return
except StandardError:
self.fail("TypeError expected when catching %s as specified in a "
"tuple" % type(object_))
def test_raise_classic(self):
# Raising a classic class is okay (for now).
class ClassicClass:
pass
try:
raise ClassicClass
except ClassicClass:
pass
except:
self.fail("unable to raise classic class")
try:
raise ClassicClass()
except ClassicClass:
pass
except:
self.fail("unable to raise class class instance")
def test_raise_new_style_non_exception(self):
# You cannot raise a new-style class that does not inherit from
# BaseException; the ability was not possible until BaseException's
# introduction so no need to support new-style objects that do not
# inherit from it.
class NewStyleClass(object):
pass
self.raise_fails(NewStyleClass)
self.raise_fails(NewStyleClass())
def test_raise_string(self):
# Raising a string raises TypeError.
self.raise_fails("spam")
def test_catch_string(self):
# Catching a string should trigger a DeprecationWarning.
with warnings.catch_warnings():
warnings.resetwarnings()
warnings.filterwarnings("error")
str_exc = "spam"
try:
try:
raise StandardError
except str_exc:
pass
except DeprecationWarning:
pass
except StandardError:
self.fail("catching a string exception did not raise "
"DeprecationWarning")
# Make sure that even if the string exception is listed in a tuple
# that a warning is raised.
try:
try:
raise StandardError
except (AssertionError, str_exc):
pass
except DeprecationWarning:
pass
except StandardError:
self.fail("catching a string exception specified in a tuple did "
"not raise DeprecationWarning")
def test_main():
run_unittest(ExceptionClassTests, UsageTests)
if __name__ == '__main__':
test_main()
| apache-2.0 | 3,968,800,333,551,173,600 | 8,684,531,583,290,684,000 | 36.714844 | 81 | 0.529674 | false |
ahmed-mahran/hue | desktop/core/ext-py/guppy-0.1.10/guppy/heapy/Part.py | 37 | 19486 | #._cv_part guppy.heapy.Part
class Format(object):
__slots__ = 'impl', 'mod'
def __init__(self, impl):
self.impl = impl
self.mod = impl.mod
def get_formatted_row(self, row):
fr = self.get_stat_data(row)
rows = []
rs = row.name.split('\n')
subsequent_indent = len(fr)*' '
rows.extend(self.mod.wrap(
fr+rs[0],
width=self.mod.line_length,
subsequent_indent=subsequent_indent))
for r in rs[1:]:
rows.extend(self.mod.wrap(
r,
width=self.mod.line_length,
initial_indent=subsequent_indent,
subsequent_indent=subsequent_indent))
return '\n'.join(rows)
def get_more_index(self, idx=None):
if idx is None:
idx = 0
idx += 10
return idx
def get_row_header(self):
impl = self.impl
if not (impl.count or impl.size):
return ''
sh = self.get_stat_header()
return self.mod.fill(
sh + self.impl.kindheader,
width=self.mod.line_length,
subsequent_indent=' '*len(sh))
def load_statrow_csk(self, r):
impl = self.impl
count, size, kind = r.split(' ', 2)
count = int(count)
size = int(size)
impl.cum_size += size
return StatRow(count, size, kind, impl.cur_index, impl.cum_size)
def load_statrow_sk(self, r):
impl = self.impl
size, kind = r.split(' ', 1)
size = int(size)
impl.cum_size += size
return StatRow(1, size, kind, impl.cur_index, impl.cum_size)
def ppob(self, ob, idx=None):
impl = self.impl
if idx is None:
label = self.get_label()
if label is not None:
print >>ob, label
idx = 0
if idx < 0:
idx = impl.numrows + startindex
it = impl.get_rows(idx)
print >>ob, self.get_row_header()
numrows = 0
for row in it:
form = self.get_formatted_row(row)
print >>ob, form
numrows += 1
if numrows >= 10:
nummore = impl.numrows - 1 - row.index
if nummore > 1:
print >>ob, \
"<%d more rows. Type e.g. '_.more' to view.>"%nummore
break
class SetFormat(Format):
__slots__ = ()
def get_label(self):
impl = self.impl
if impl.count != 1:
s = 's'
else:
s = ''
return 'Partition of a set of %d object%s. Total size = %d bytes.'%(
impl.count, s, impl.size)
def get_rowdata(self, row):
return '%d %d %s'%(row.count, row.size, row.name)
def get_stat_header(self):
return (
' Index Count % Size % Cumulative % ')
def get_stat_data(self, row):
format = '%6d %6d %3d %8d %3d %9d %3d '
impl = self.impl
fr = format % (
row.index,
row.count, int('%.0f'%(row.count * 100.0/impl.count)),
row.size, int('%.0f'%(row.size * 100.0/impl.size)),
row.cumulsize, int('%.0f'%(row.cumulsize * 100.0/impl.size)),
)
return fr
def load_statrow(self, r):
return self.load_statrow_csk(r)
class IdFormat(Format):
__slots__ = ()
def get_label(self):
impl = self.impl
if impl.count != 1:
s = 's'
else:
s = ''
return (
'Set of %d %s object%s. Total size = %d bytes.'%(
impl.count, impl.kindname, s, impl.size))
return part
def get_rowdata(self, row):
return '%d %s'%(row.size, row.name)
def get_stat_header(self):
return (
' Index Size % Cumulative % ')
def get_stat_data(self, row):
impl = self.impl
format = '%6d %8d %5.1f %9d %5.1f '
fr = format % (
row.index,
row.size, (row.size * 100.0/impl.size),
row.cumulsize, row.cumulsize * 100.0/impl.size,
)
return fr
def load_statrow(self, r):
return self.load_statrow_sk(r)
class DiffFormat(Format):
__slots__ = ()
def _percent_of_b(self, size):
if self.impl.b_size != 0:
return '%9.3g'%(size*100.0/self.impl.b_size,)
else:
return ' (n.a.)'
def get_label(self):
impl = self.impl
x = (
'Summary of difference operation (A-B).\n'+
' Count Size\n'+
' A %6d %8d\n'%(impl.count+impl.b_count, impl.size+impl.b_size)+
' B %6d %8d\n'%(impl.b_count, impl.b_size)+
' A-B %6d %8d = %s %% of B\n'%(impl.count, impl.size, self._percent_of_b(impl.size)))
if impl.count or impl.size:
x += '\nDifferences by kind, largest absolute size diffs first.'
return x
def get_rowdata(self, row):
return '%d %d %s'%(row.count, row.size, row.name)
def get_stat_header(self):
return (
' Index Count Size Cumulative % of B ')
def get_stat_data(self, row):
impl = self.impl
format = '%6d %6d %8d %9d %s '
fr = format % (
row.index,
row.count,
row.size,
row.cumulsize,
self._percent_of_b(row.cumulsize),
)
return fr
def load_statrow(self, r):
return self.load_statrow_csk(r)
class StatRow(object):
__slots__ = 'count', 'size', 'name', 'index', 'cumulsize'
def __init__(self, count, size, name, index=None, cumulsize=None):
self.count = count
self.size = size
self.name = name
self.index = index
self.cumulsize = cumulsize
class PartRow(StatRow):
__slots__ = 'set', 'kind'
def __init__(self, count, size, name, index, cumulsize, set, kind):
self.count = count
self.size = size
self.name = name
self.index = index
self.cumulsize = cumulsize
self.set = set
self.kind = kind
class Stat:
def __init__(self, mod, get_trows, firstheader=''):
self.mod = mod
self._hiding_tag_ = mod._hiding_tag_
self.get_trows = get_trows
self.firstheader = firstheader
self.it = iter(get_trows())
self.cur_index = 0
self.cum_size = 0
self.rows = []
r = self.get_next()
while r and not r.startswith('.r:'):
name = r[1:r.index(':')]
value = r[r.index(':')+1:].strip()
try:
value = int(value)
except ValueError:
pass
setattr(self, name, value)
r = self.get_next()
self.format_name = self.format
self.format_class = getattr(self.mod, self.format)
self.format = self.format_class(self)
self.timemade = float(self.timemade)
def __getitem__(self, idx):
if isinstance(idx, (int, long)):
if idx < 0:
idx = self.numrows + idx
if not (0 <= idx < self.numrows):
raise IndexError, 'Stat index out of range.'
rows = [self.get_row(idx)]
elif isinstance(idx, slice):
start, stop, step = idx.indices(self.numrows)
rows = [self.get_row(idx) for idx in range(start, stop, step)]
else:
raise IndexError, 'Stat indices must be integers or slices.'
count = 0
size = 0
for r in rows:
count += r.count
size += r.size
trows = [
'.loader: _load_stat',
'.format: %s'%self.format_name,
'.timemade: %f'%self.timemade,
'.count: %d'%count,
'.size: %d'%size,
'.kindheader: %s'%self.kindheader,
'.kindname: %s'%self.kindname,
'.numrows: %d'%len(rows),
]
if getattr(self, 'b_count', None) is not None:
trows.append('.b_count: %d'%self.b_count)
trows.append('.b_size: %d'%self.b_size)
for r in rows:
trows.append('.r: %s'%self.format.get_rowdata(r))
return self.mod.load(trows)
def __len__(self):
return self.numrows
def __repr__(self):
ob = self.mod.output_buffer()
self.ppob(ob)
return self.firstheader + ob.getvalue().rstrip()
def __sub__(self, other):
if not isinstance(other, Stat):
raise TypeError, 'Can only take difference with other Stat instance.'
if self.kindheader != other.kindheader:
raise ValueError, 'Mismatching table kind header, %r vs %r.'%(
self.kindheader, other.kindheader)
rows = []
otab = {}
stab = {}
for r in other.get_rows():
o = otab.get(r.name)
if o:
otab[r.name] = StatRow(r.count+o.count, r.size+o.size, r.name, o.index, None)
else:
otab[r.name] = r
for r in self.get_rows():
o = otab.get(r.name)
if o:
del otab[r.name]
count = r.count - o.count
size = r.size - o.size
else:
count = r.count
size = r.size
if count == 0 and size == 0:
continue
sr = stab.get(r.name)
if sr:
sr.count += count
sr.size += size
else:
sr = StatRow(count, size, r.name)
stab[sr.name] = sr
rows.append(sr)
rs = otab.values()
rs.sort(lambda x,y:cmp(x.index, y.index)) # Preserve orig. order
for r in rs:
sr = StatRow(-r.count, -r.size, r.name)
assert sr.name not in stab
rows.append(sr)
rows.sort(lambda x,y:cmp(abs(y.size), abs(x.size)))
cumulcount = 0
cumulsize = 0
for r in rows:
cumulcount += r.count
cumulsize += r.size
r.cumulsize = cumulsize
trows = [
'.loader: _load_stat',
'.format: DiffFormat',
'.timemade: %f'%self.mod.time.time(),
'.b_count: %d'%other.count,
'.b_size: %d'%other.size,
'.count: %d'%cumulcount,
'.size: %d'%cumulsize,
'.kindheader: %s'%self.kindheader,
'.kindname: %s'%self.kindname,
'.numrows: %d'%len(rows),
]
for r in rows:
trows.append('.r: %d %d %s'%(r.count, r.size, r.name))
return self.mod.load(trows)
def dump(self, fn, mode='a'):
if not hasattr(fn, 'write'):
f = open(fn, mode)
else:
f = fn
try:
for r in self.get_trows():
if not r[-1:] == '\n':
r += '\n'
f.write(r)
end = '.end: .loader: %s\n'%self.loader
if r != end:
f.write(end)
finally:
if f is not fn:
f.close()
def _get_more(self):
return self.mod.basic_more_printer(self, self)
more = property(_get_more)
def get_more_index(self, idx=None):
return self.format.get_more_index(idx)
def get_next(self):
try:
r = self.it.next()
except StopIteration:
r = None
else:
r = r.rstrip('\n')
self.last = r
return r
def get_row(self, idx):
while idx >= len(self.rows):
self.parse_next_row()
return self.rows[idx]
def get_rows(self, idx = None):
if idx is None:
idx = 0
while idx < self.numrows:
try:
row = self.get_row(idx)
except IndexError:
return
else:
yield row
idx += 1
def get_rows_of_kinds(self, kinds):
# Return the rows with names in sequence kinds of unique names
# in that order. None if no such kind.
kindtab = {}
N = len(kinds)
res = [None] * len(kinds)
for i, kind in enumerate(kinds):
kindtab[kind] = i
assert len(kindtab) == N
n = 0
for row in self.get_rows():
idx = kindtab.get(row.name)
if idx is not None:
res[idx] = row
n += 1
if n >= N:
break
return res
def get_rows_n_and_other(self, N, sortby='Size'):
# Get N rows, the largest first
# mix in an '<Other>' row at a sorted position
# Size is either size if sortby = 'Size',
# or count if sortby = 'Count'.
# Returns a NEW LIST (caller may modify/sort it)
if sortby not in ('Size', 'Count'):
raise ValueError, "Argument 'sortby' must be 'Size' or 'Count'."
# Rows are already sorted by Size, largest first.
# If they want by Count, we need to resort them.
rows = self.get_rows()
if sortby == 'Count':
rows = list(rows)
rows.sort(lambda x, y: cmp(y.count, x.count))
retrows = []
cumulcount = 0
cumulsize = 0
for (i, r) in enumerate(rows):
if i >= N:
othercount = self.count - cumulcount
othersize = self.size - cumulsize
other = StatRow(othercount,
othersize,
'<Other>')
if sortby == 'Size':
for (i, r) in enumerate(retrows):
if r.size < othersize:
retrows[i:i] = [other]
break
else:
retrows.append(other)
elif sortby == 'Count':
for (i, r) in enumerate(retrows):
if r.count < othercount:
retrows[i:i] = [other]
break
else:
retrows.append(other)
else:
assert 0
break
cumulcount += r.count
cumulsize += r.size
retrows.append(r)
else:
assert cumulcount == self.count
assert cumulsize == self.size
return retrows
def parse_next_row(self):
r = self.last
if not r:
raise IndexError, 'Row index out of range.'
if r.startswith('.r: '):
r = r[4:]
sr = self.format.load_statrow(r)
self.cur_index += 1
self.rows.append(sr)
self.get_next()
return
elif r.startswith('.end'):
raise IndexError, 'Row index out of range.'
else:
raise SyntaxError
def ppob(self, ob, idx=None):
return self.format.ppob(ob, idx)
class Partition:
def __init__(self, mod, set, er):
self.mod = mod
self.set = set
self.er = er
self._hiding_tag_ = mod._hiding_tag_
self.timemade = mod.time.time()
def __iter__(self):
# The default iteration is over the sets
# To iterate over rows (if more info is needed), get_rows() is available.
return self.get_sets()
def get_more_index(self, idx=None):
return self.format.get_more_index(idx)
def get_rows(self, rowindex = None):
# Iterator over rows
if rowindex is None:
rowindex = 0
while 1:
try:
row = self.get_row(rowindex)
except IndexError:
return
else:
yield row
rowindex += 1
def get_set(self, index):
if isinstance(index, slice):
start, stop, step = index.indices(self.numrows)
ns = self.get_nodeset(start, stop, step)
return self.mod.idset(ns, er=self.er)
else:
if index < 0:
index += self.numrows
return self.get_rowset(index)
def get_sets(self, index=None):
for idx in range(self.numrows):
yield self.get_rowset(idx)
def get_stat(self):
# Avoid any references into the set!
trows = list(self.get_trows())
def get_trows():
return trows
return self.mod._load_stat(get_trows)
def get_trows(self):
yield '.loader: _load_stat'
yield '.format: %s'%self.format.__class__.__name__
yield '.timemade: %f'%self.timemade
yield '.count: %d'%self.count
yield '.size: %d'%self.size
yield '.kindname: %s'%self.kindname
yield '.kindheader: %s'%self.kindheader
yield '.numrows: %d'%self.numrows
for row in self.get_rows():
yield '.r: %s'%self.format.get_rowdata(row)
def init_format(self, FormatClass):
self.format = FormatClass(self)
def ppob(self, ob, idx=None):
return self.format.ppob(ob, idx)
class IdentityPartitionCluster(object):
# Contains objects of same size.
# to speed up management of identity partition
# - since otherwise we'd have to sort all the objects,
# on their string representation in worst case.
__slots__ = 'objects','locount','hicount','losize','obsize','issorted'
def __init__(self, objects, locount, count, losize, obsize):
self.objects = objects # tuple of objects in this segment
self.locount = locount # count BEFORE objects in this cluster
self.hicount = locount+count # count AFTER these objects
self.losize = losize # size BEFORE objects in this cluster
self.obsize = obsize # size of EACH object in this segment
self.issorted = False # indicates if .objects is sorted
class IdentityPartition(Partition):
def __init__(self, mod, set, er):
Partition.__init__(self, mod, set, er)
clusters = []
sizeclasses = mod.Size.classifier.partition_cli(set.nodes)
sizeclasses.sort()
sizeclasses.reverse()
totcount = 0
totsize = 0
for size, v in sizeclasses:
count = len(v)
clusters.append(IdentityPartitionCluster(
self.mod.observation_list(v), totcount, count, totsize, size))
totsize += size * count
totcount += count
assert totcount == set.count
self.cluidx = 0
self.clusters = clusters
self.count = totcount
self.kind = kind = set.byclodo.kind
self.kindheader = kind.fam.c_get_idpart_header(kind)
self.kindname = kind.fam.c_get_idpart_label(kind)
self.numrows = totcount
self.render = kind.fam.c_get_idpart_render(kind)
self.size = totsize
self.sortrender = kind.fam.c_get_idpart_sortrender(kind)
self.init_format(IdFormat)
def get_nodeset(self, start, stop, step):
return self.get_nodeset_cluster(start, stop, step)[0]
def get_nodeset_cluster(self, start, stop, step):
if step <= 0:
raise ValueError, 'Step must be positive.'
ns = self.mod.mutnodeset()
if start >= stop:
return (ns, None)
clusters = self.clusters
lo = 0
hi = len(clusters)
cluidx = self.cluidx
while lo < hi:
clu = clusters[cluidx]
if clu.locount <= start:
if start < clu.hicount:
break
else:
lo = cluidx + 1
else:
hi = cluidx
cluidx = (lo + hi) // 2
else:
return (ns, None)
clu_to_return = clu
while 1:
objects = clu.objects
if start != clu.locount or stop < clu.hicount or step != 1:
if not clu.issorted:
sortrender = self.sortrender
if sortrender == 'IDENTITY':
ks = objects
else:
ks = [sortrender(x) for x in objects]
ks = [(kind, i) for i, kind in enumerate(ks)]
ks.sort()
clu.objects = objects = self.mod.observation_list(
[objects[i] for (kind, i) in ks])
clu.issorted = True
objects = objects[start-clu.locount:stop-clu.locount:step]
ns |= objects
self.cluidx = cluidx # memo till next call
start += len(objects)*step
if start >= stop:
break
for cluidx in range(cluidx + 1, len(clusters)):
clu = clusters[cluidx]
if clu.locount <= start < clu.hicount:
break
else:
break
return (ns, clu_to_return)
def get_row(self, rowidx):
ns, clu = self.get_nodeset_cluster(rowidx, rowidx+1, 1)
if not ns:
raise IndexError, 'Partition index out of range.'
vi = self.mod.idset(ns, er=self.er)
row = PartRow(1, clu.obsize, self.render(vi.theone),
rowidx, (rowidx+1-clu.locount)*clu.obsize + clu.losize,
vi, vi.kind)
return row
def get_rowset(self, rowidx):
ns = self.get_nodeset(rowidx, rowidx+1, 1)
if not ns:
raise IndexError, 'Partition index out of range.'
return self.mod.idset(ns, er=self.er)
class SetPartition(Partition):
def __init__(self, mod, set, er):
Partition.__init__(self, mod, set, er)
classifier = er.classifier
tosort = [(-part.size, classifier.get_tabrendering(kind, ''), kind, part)
for (kind, part) in classifier.partition(set.nodes)]
tosort.sort()
cumulsize = 0
rows = []
for (minusize, name, kind, part) in tosort:
size = -minusize
cumulsize += size
# assert size == part.size
rows.append(PartRow(
part.count, size, name,
len(rows), cumulsize,
part, kind))
# No check. Sizes may change. Note feb 8 2006.
#assert cumulsize == set.size
self.count = set.count
self.kindheader = classifier.get_tabheader('')
self.kindname = ''
self.numrows = len(rows)
self.rows = rows
self.size = cumulsize
self.init_format(SetFormat)
def get_nodeset(self, start, stop, step):
if step <= 0:
raise ValueError, 'Step must be positive.'
ns = self.mod.mutnodeset()
while start < stop:
ns |= self.rows[start].set.nodes
start += step
return ns
def get_row(self, idx):
try:
return self.rows[idx]
except IndexError:
raise IndexError, 'Partition index out of range.'
def get_rowset(self, idx):
return self.get_row(idx).set
class _GLUECLAMP_:
_preload_ = ('_hiding_tag_',)
_chgable_ = ('line_length', 'backup_suffix')
_imports_ = (
'_parent.OutputHandling:output_buffer',
'_parent.OutputHandling:basic_more_printer',
'_parent.ImpSet:mutnodeset',
'_parent.Use:Id',
'_parent.Use:Size',
'_parent.Use:idset',
'_parent.Use:load',
'_parent.View:_hiding_tag_',
'_parent.View:observation_list',
'_root.os:rename',
'_root.textwrap:fill',
'_root.textwrap:wrap',
'_root.textwrap:wrap',
'_root:time',
)
# 'Config'
line_length = 100
backup_suffix = '.old'
# Factory method
def partition(self, set, er):
if er.classifier is self.Id.classifier:
return IdentityPartition(self, set, er)
else:
return SetPartition(self, set, er)
# Private - Use.load is intended to be used directly.
def _load_stat(self, get_trows):
return Stat(self, get_trows)
| apache-2.0 | 1,496,926,129,144,674,300 | 9,201,176,475,154,110,000 | 24.046272 | 88 | 0.617931 | false |
yfried/ansible | test/units/plugins/action/test_raw.py | 45 | 3763 | # (c) 2016, Saran Ahluwalia <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.errors import AnsibleActionFail
from units.compat import unittest
from units.compat.mock import patch, MagicMock, Mock
from ansible.plugins.action.raw import ActionModule
from ansible.playbook.task import Task
from ansible.plugins.loader import connection_loader
play_context = Mock()
play_context.shell = 'sh'
connection = connection_loader.get('local', play_context, os.devnull)
class TestCopyResultExclude(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
# The current behavior of the raw aciton in regards to executable is currently in question;
# the test_raw_executable_is_not_empty_string verifies the current behavior (whether it is desireed or not.
# Please refer to the following for context:
# Issue: https://github.com/ansible/ansible/issues/16054
# PR: https://github.com/ansible/ansible/pull/16085
def test_raw_executable_is_not_empty_string(self):
task = MagicMock(Task)
task.async_val = False
task.args = {'_raw_params': 'Args1'}
play_context.check_mode = False
self.mock_am = ActionModule(task, connection, play_context, loader=None, templar=None, shared_loader_obj=None)
self.mock_am._low_level_execute_command = Mock(return_value={})
self.mock_am.display = Mock()
self.mock_am._admin_users = ['root', 'toor']
self.mock_am.run()
self.mock_am._low_level_execute_command.assert_called_with('Args1', executable=False)
def test_raw_check_mode_is_True(self):
task = MagicMock(Task)
task.async_val = False
task.args = {'_raw_params': 'Args1'}
play_context.check_mode = True
try:
self.mock_am = ActionModule(task, connection, play_context, loader=None, templar=None, shared_loader_obj=None)
except AnsibleActionFail:
pass
def test_raw_test_environment_is_None(self):
task = MagicMock(Task)
task.async_val = False
task.args = {'_raw_params': 'Args1'}
task.environment = None
play_context.check_mode = False
self.mock_am = ActionModule(task, connection, play_context, loader=None, templar=None, shared_loader_obj=None)
self.mock_am._low_level_execute_command = Mock(return_value={})
self.mock_am.display = Mock()
self.assertEqual(task.environment, None)
def test_raw_task_vars_is_not_None(self):
task = MagicMock(Task)
task.async_val = False
task.args = {'_raw_params': 'Args1'}
task.environment = None
play_context.check_mode = False
self.mock_am = ActionModule(task, connection, play_context, loader=None, templar=None, shared_loader_obj=None)
self.mock_am._low_level_execute_command = Mock(return_value={})
self.mock_am.display = Mock()
self.mock_am.run(task_vars={'a': 'b'})
self.assertEqual(task.environment, None)
| gpl-3.0 | -1,678,641,817,867,828,200 | 1,198,114,771,542,897,400 | 33.842593 | 122 | 0.682434 | false |
fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractCurrentlyTLingBuniMi.py | 1 | 1148 | def extractCurrentlyTLingBuniMi(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
if item['title'].startswith('[BNM]'):
return buildReleaseMessageWithType(item, 'Bu ni Mi wo Sasagete Hyaku to Yonen. Elf de Yarinaosu Musha Shugyou', vol, chp, frag=frag, postfix=postfix)
if item['title'].startswith('[DD]'):
return buildReleaseMessageWithType(item, 'Doll Dungeon', vol, chp, frag=frag, postfix=postfix)
if item['title'].startswith('[HCLS]'):
return buildReleaseMessageWithType(item, 'High Comprehension Low Strength', vol, chp, frag=frag, postfix=postfix)
tagmap = [
('Abyss Domination', 'Abyss Domination', 'translated'),
('Nine Yang Sword Saint', 'Nine Yang Sword Saint', 'translated'),
('Mysterious World Beast God', 'Mysterious World Beast God', 'translated'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | bsd-3-clause | 4,163,741,847,204,743,700 | -8,406,247,326,171,236,000 | 44.96 | 151 | 0.690767 | false |
OptiPop/external_chromium_org_third_party_skia | gm/rename_config.py | 20 | 3431 | #!/usr/bin/python
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility to rename a config in some subset of our GM expectation files.
Created for http://skbug.com/2752 ('split existing "gpu" GM results into "gl"
and "gles"')
Run with -h to see usage.
Example command lines:
rename_config.py gpu gles '.*Android.*'
TODO(epoger): Once https://codereview.chromium.org/397103003/ is committed,
we should add a unittest. Until then, we can test this as follows:
OLD=expectations/gm && NEW=/tmp/expectations && \
rm -rf $NEW && \
cp -a $OLD $NEW && \
gm/rename_config.py msaa4 gles-msaa4 '.*Android.*' \
--expectations-root $NEW && \
diff --recursive $OLD $NEW
"""
__author__ = 'Elliot Poger'
import argparse
import os
import re
import gm_json
DEFAULT_EXPECTATIONS_ROOT = os.path.join(
os.path.dirname(__file__), os.pardir, 'expectations', 'gm')
IMAGE_FILENAME_RE = re.compile(gm_json.IMAGE_FILENAME_PATTERN)
class Renamer(object):
def __init__(self, args):
"""
Params:
args: the Namespace object generated by argparse.parse_args()
"""
self._args = args
def run(self):
"""Perform all the subsitutions."""
for path in self._get_file_list():
self._rename_config(path=path,
old=self._args.old_config_name,
new=self._args.new_config_name)
def _rename_config(self, path, old, new):
"""Renames all instances of a config within a GM expectations file.
Params:
path: path to file which will be modified in place
old: old config name
new: new config name
"""
dic = gm_json.LoadFromFile(file_path=path)
expected_results = dic[gm_json.JSONKEY_EXPECTEDRESULTS]
orig_keys = expected_results.keys()
for key in orig_keys:
result = expected_results.pop(key)
(testname, config) = IMAGE_FILENAME_RE.match(key).groups()
if config == old:
config = new
key = '%s_%s.png' % (testname, config)
expected_results[key] = result
gm_json.WriteToFile(json_dict=dic, file_path=path)
def _get_file_list(self):
"""Returns the list of files we want to operate on (the complete path
to each file)."""
root = self._args.expectations_root
regex = re.compile(self._args.builder_name_pattern)
return [os.path.join(root, builder, 'expected-results.json')
for builder in os.listdir(root)
if regex.match(builder)]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('old_config_name',
help=('Config name we want to replace.'))
parser.add_argument('new_config_name',
help=('Config name we want to replace the old one with.'))
parser.add_argument('builder_name_pattern',
help=('Regex pattern describing which builders we want '
'to make the substitution for; \'.*\' to perform '
'the replacement on all builders.'))
parser.add_argument('--expectations-root',
default=DEFAULT_EXPECTATIONS_ROOT,
help=('Root of the GM expectations dir; defaults to '
'%(default)s'))
args = parser.parse_args()
renamer = Renamer(args)
renamer.run()
if __name__ == '__main__':
main()
| bsd-3-clause | 4,218,569,164,502,764,500 | -1,919,084,750,112,808,700 | 31.990385 | 80 | 0.626348 | false |
ychfan/tensorflow | tensorflow/contrib/distributions/python/ops/vector_exponential_linear_operator.py | 10 | 10517 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Vectorized Exponential distribution class, directly using LinearOperator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import exponential
from tensorflow.python.ops.distributions import transformed_distribution
from tensorflow.python.ops.linalg import linalg
__all__ = ["VectorExponentialLinearOperator"]
_mvn_sample_note = """
`value` is a batch vector with compatible shape if `value` is a `Tensor` whose
shape can be broadcast up to either:
```python
self.batch_shape + self.event_shape
```
or
```python
[M1, ..., Mm] + self.batch_shape + self.event_shape
```
"""
class VectorExponentialLinearOperator(
transformed_distribution.TransformedDistribution):
"""The vectorization of the Exponential distribution on `R^k`.
The vector exponential distribution is defined over a subset of `R^k`, and
parameterized by a (batch of) length-`k` `loc` vector and a (batch of) `k x k`
`scale` matrix: `covariance = scale @ scale.T`, where `@` denotes
matrix-multiplication.
#### Mathematical Details
The probability density function (pdf) is
```none
pdf(y; loc, scale) = exp(-||x||_1) / Z, for y in S(loc, scale),
x = inv(scale) @ (y - loc),
Z = |det(scale)|,
```
where:
* `loc` is a vector in `R^k`,
* `scale` is a linear operator in `R^{k x k}`, `cov = scale @ scale.T`,
* `S = {loc + scale @ x : x in R^k, x_1 > 0, ..., x_k > 0}`, is an image of
the positive half-space,
* `||x||_1` denotes the `l1` norm of `x`, `sum_i |x_i|`,
* `Z` denotes the normalization constant.
The VectorExponential distribution is a member of the [location-scale
family](https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X = (X_1, ..., X_k), each X_i ~ Exponential(rate=1)
Y = (Y_1, ...,Y_k) = scale @ X + loc
```
#### About `VectorExponential` and `Vector` distributions in TensorFlow.
The `VectorExponential` is a non-standard distribution that has useful
properties.
The marginals `Y_1, ..., Y_k` are *not* Exponential random variables, due to
the fact that the sum of Exponential random variables is not Exponential.
Instead, `Y` is a vector whose components are linear combinations of
Exponential random variables. Thus, `Y` lives in the vector space generated
by `vectors` of Exponential distributions. This allows the user to decide the
mean and covariance (by setting `loc` and `scale`), while preserving some
properties of the Exponential distribution. In particular, the tails of `Y_i`
will be (up to polynomial factors) exponentially decaying.
To see this last statement, note that the pdf of `Y_i` is the convolution of
the pdf of `k` independent Exponential random variables. One can then show by
induction that distributions with exponential (up to polynomial factors) tails
are closed under convolution.
#### Examples
```python
ds = tf.contrib.distributions
la = tf.linalg
# Initialize a single 2-variate VectorExponential, supported on
# {(x, y) in R^2 : x > 0, y > 0}.
mat = [[1.0, 0.1],
[0.1, 1.0]]
vex = ds.VectorExponentialLinearOperator(
scale=la.LinearOperatorFullMatrix(mat))
# Compute the pdf of an`R^2` observation; return a scalar.
vex.prob([1., 2.]).eval() # shape: []
# Initialize a 2-batch of 3-variate Vector Exponential's.
mu = [[1., 2, 3],
[1., 0, 0]] # shape: [2, 3]
scale_diag = [[1., 2, 3],
[0.5, 1, 1.5]] # shape: [2, 3]
vex = ds.VectorExponentialLinearOperator(
loc=mu,
scale=la.LinearOperatorDiag(scale_diag))
# Compute the pdf of two `R^3` observations; return a length-2 vector.
x = [[1.9, 2.2, 3.1],
[10., 1.0, 9.0]] # shape: [2, 3]
vex.prob(x).eval() # shape: [2]
```
"""
def __init__(self,
loc=None,
scale=None,
validate_args=False,
allow_nan_stats=True,
name="VectorExponentialLinearOperator"):
"""Construct Vector Exponential distribution supported on a subset of `R^k`.
The `batch_shape` is the broadcast shape between `loc` and `scale`
arguments.
The `event_shape` is given by last dimension of the matrix implied by
`scale`. The last dimension of `loc` (if provided) must broadcast with this.
Recall that `covariance = scale @ scale.T`.
Additional leading dimensions (if any) will index batches.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
scale: Instance of `LinearOperator` with same `dtype` as `loc` and shape
`[B1, ..., Bb, k, k]`.
validate_args: Python `bool`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: Python `bool`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
ValueError: if `scale` is unspecified.
TypeError: if not `scale.dtype.is_floating`
"""
parameters = locals()
if scale is None:
raise ValueError("Missing required `scale` parameter.")
if not scale.dtype.is_floating:
raise TypeError("`scale` parameter must have floating-point dtype.")
with ops.name_scope(name, values=[loc] + scale.graph_parents):
# Since expand_dims doesn't preserve constant-ness, we obtain the
# non-dynamic value if possible.
loc = ops.convert_to_tensor(loc, name="loc") if loc is not None else loc
batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
loc, scale)
super(VectorExponentialLinearOperator, self).__init__(
distribution=exponential.Exponential(rate=array_ops.ones(
[], dtype=scale.dtype), allow_nan_stats=allow_nan_stats),
bijector=bijectors.AffineLinearOperator(
shift=loc, scale=scale, validate_args=validate_args),
batch_shape=batch_shape,
event_shape=event_shape,
validate_args=validate_args,
name=name)
self._parameters = parameters
@property
def loc(self):
"""The `loc` `Tensor` in `Y = scale @ X + loc`."""
return self.bijector.shift
@property
def scale(self):
"""The `scale` `LinearOperator` in `Y = scale @ X + loc`."""
return self.bijector.scale
@distribution_util.AppendDocstring(_mvn_sample_note)
def _log_prob(self, x):
return super(VectorExponentialLinearOperator, self)._log_prob(x)
@distribution_util.AppendDocstring(_mvn_sample_note)
def _prob(self, x):
return super(VectorExponentialLinearOperator, self)._prob(x)
def _mean(self):
# Let
# W = (w1,...,wk), with wj ~ iid Exponential(0, 1).
# Then this distribution is
# X = loc + LW,
# and then E[X] = loc + L1, where 1 is the vector of ones.
scale_x_ones = self.bijector.scale.matvec(
array_ops.ones(self._mode_mean_shape(), self.dtype))
if self.loc is None:
return scale_x_ones
return array_ops.identity(self.loc) + scale_x_ones
def _covariance(self):
# Let
# W = (w1,...,wk), with wj ~ iid Exponential(0, 1).
# Then this distribution is
# X = loc + LW,
# and then since Cov(wi, wj) = 1 if i=j, and 0 otherwise,
# Cov(X) = L Cov(W W^T) L^T = L L^T.
if distribution_util.is_diagonal_scale(self.scale):
return array_ops.matrix_diag(math_ops.square(self.scale.diag_part()))
else:
return self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)
def _variance(self):
if distribution_util.is_diagonal_scale(self.scale):
return math_ops.square(self.scale.diag_part())
elif (isinstance(self.scale, linalg.LinearOperatorLowRankUpdate) and
self.scale.is_self_adjoint):
return array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense()))
else:
return array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense(), adjoint_arg=True))
def _stddev(self):
if distribution_util.is_diagonal_scale(self.scale):
return math_ops.abs(self.scale.diag_part())
elif (isinstance(self.scale, linalg.LinearOperatorLowRankUpdate) and
self.scale.is_self_adjoint):
return math_ops.sqrt(
array_ops.matrix_diag_part(self.scale.matmul(self.scale.to_dense())))
else:
return math_ops.sqrt(
array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)))
def _mode(self):
scale_x_zeros = self.bijector.scale.matvec(
array_ops.zeros(self._mode_mean_shape(), self.dtype))
if self.loc is None:
return scale_x_zeros
return array_ops.identity(self.loc) + scale_x_zeros
def _mode_mean_shape(self):
"""Shape for the mode/mean Tensors."""
shape = self.batch_shape.concatenate(self.event_shape)
has_static_shape = shape.is_fully_defined()
if not has_static_shape:
shape = array_ops.concat([
self.batch_shape_tensor(),
self.event_shape_tensor(),
], 0)
return shape
| apache-2.0 | -7,666,131,825,978,735,000 | 6,445,880,331,535,579,000 | 35.517361 | 80 | 0.657412 | false |
yfried/ansible | lib/ansible/modules/monitoring/icinga2_host.py | 35 | 9960 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This module is proudly sponsored by CGI (www.cgi.com) and
# KPN (www.kpn.com).
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: icinga2_host
short_description: Manage a host in Icinga2
description:
- "Add or remove a host to Icinga2 through the API."
- "See U(https://www.icinga.com/docs/icinga2/latest/doc/12-icinga2-api/)"
version_added: "2.5"
author: "Jurgen Brand (@t794104)"
options:
url:
description:
- HTTP, HTTPS, or FTP URL in the form (http|https|ftp)://[user[:pass]]@host.domain[:port]/path
required: true
use_proxy:
description:
- If C(no), it will not use a proxy, even if one is defined in
an environment variable on the target hosts.
type: bool
default: 'yes'
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
type: bool
default: 'yes'
url_username:
description:
- The username for use in HTTP basic authentication.
- This parameter can be used without C(url_password) for sites that allow empty passwords.
url_password:
description:
- The password for use in HTTP basic authentication.
- If the C(url_username) parameter is not specified, the C(url_password) parameter will not be used.
force_basic_auth:
description:
- httplib2, the library used by the uri module only sends authentication information when a webservice
responds to an initial request with a 401 status. Since some basic auth services do not properly
send a 401, logins will fail. This option forces the sending of the Basic authentication header
upon initial request.
type: bool
default: 'no'
client_cert:
description:
- PEM formatted certificate chain file to be used for SSL client
authentication. This file can also include the key as well, and if
the key is included, C(client_key) is not required.
client_key:
description:
- PEM formatted file that contains your private key to be used for SSL
client authentication. If C(client_cert) contains both the certificate
and key, this option is not required.
state:
description:
- Apply feature state.
choices: [ "present", "absent" ]
default: present
name:
description:
- Name used to create / delete the host. This does not need to be the FQDN, but does needs to be unique.
required: true
zone:
description:
- The zone from where this host should be polled.
template:
description:
- The template used to define the host.
- Template cannot be modified after object creation.
check_command:
description:
- The command used to check if the host is alive.
default: "hostalive"
display_name:
description:
- The name used to display the host.
default: if none is give it is the value of the <name> parameter
ip:
description:
- The IP address of the host.
required: true
variables:
description:
- List of variables.
'''
EXAMPLES = '''
- name: Add host to icinga
icinga2_host:
url: "https://icinga2.example.com"
url_username: "ansible"
url_password: "a_secret"
state: present
name: "{{ ansible_fqdn }}"
ip: "{{ ansible_default_ipv4.address }}"
delegate_to: 127.0.0.1
'''
RETURN = '''
name:
description: The name used to create, modify or delete the host
type: string
returned: always
data:
description: The data structure used for create, modify or delete of the host
type: dict
returned: always
'''
import json
import os
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url, url_argument_spec
# ===========================================
# Icinga2 API class
#
class icinga2_api:
module = None
def call_url(self, path, data='', method='GET'):
headers = {
'Accept': 'application/json',
'X-HTTP-Method-Override': method,
}
url = self.module.params.get("url") + "/" + path
rsp, info = fetch_url(module=self.module, url=url, data=data, headers=headers, method=method)
body = ''
if rsp:
body = json.loads(rsp.read())
if info['status'] >= 400:
body = info['body']
return {'code': info['status'], 'data': body}
def check_connection(self):
ret = self.call_url('v1/status')
if ret['code'] == 200:
return True
return False
def exists(self, hostname):
data = {
"filter": "match(\"" + hostname + "\", host.name)",
}
ret = self.call_url(
path="v1/objects/hosts",
data=self.module.jsonify(data)
)
if ret['code'] == 200:
if len(ret['data']['results']) == 1:
return True
return False
def create(self, hostname, data):
ret = self.call_url(
path="v1/objects/hosts/" + hostname,
data=self.module.jsonify(data),
method="PUT"
)
return ret
def delete(self, hostname):
data = {"cascade": 1}
ret = self.call_url(
path="v1/objects/hosts/" + hostname,
data=self.module.jsonify(data),
method="DELETE"
)
return ret
def modify(self, hostname, data):
ret = self.call_url(
path="v1/objects/hosts/" + hostname,
data=self.module.jsonify(data),
method="POST"
)
return ret
def diff(self, hostname, data):
ret = self.call_url(
path="v1/objects/hosts/" + hostname,
method="GET"
)
changed = False
ic_data = ret['data']['results'][0]
for key in data['attrs']:
if key not in ic_data['attrs'].keys():
changed = True
elif data['attrs'][key] != ic_data['attrs'][key]:
changed = True
return changed
# ===========================================
# Module execution.
#
def main():
# use the predefined argument spec for url
argument_spec = url_argument_spec()
# remove unnecessary argument 'force'
del argument_spec['force']
# add our own arguments
argument_spec.update(
state=dict(default="present", choices=["absent", "present"]),
name=dict(required=True, aliases=['host']),
zone=dict(),
template=dict(default=None),
check_command=dict(default="hostalive"),
display_name=dict(default=None),
ip=dict(required=True),
variables=dict(type='dict', default=None),
)
# Define the main module
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
state = module.params["state"]
name = module.params["name"]
zone = module.params["zone"]
template = []
template.append(name)
if module.params["template"]:
template.append(module.params["template"])
check_command = module.params["check_command"]
ip = module.params["ip"]
display_name = module.params["display_name"]
if not display_name:
display_name = name
variables = module.params["variables"]
try:
icinga = icinga2_api()
icinga.module = module
icinga.check_connection()
except Exception as e:
module.fail_json(msg="unable to connect to Icinga. Exception message: %s" % (e))
data = {
'attrs': {
'address': ip,
'display_name': display_name,
'check_command': check_command,
'zone': zone,
'vars': {
'made_by': "ansible",
},
'templates': template,
}
}
if variables:
data['attrs']['vars'].update(variables)
changed = False
if icinga.exists(name):
if state == "absent":
if module.check_mode:
module.exit_json(changed=True, name=name, data=data)
else:
try:
ret = icinga.delete(name)
if ret['code'] == 200:
changed = True
else:
module.fail_json(msg="bad return code deleting host: %s" % (ret['data']))
except Exception as e:
module.fail_json(msg="exception deleting host: " + str(e))
elif icinga.diff(name, data):
if module.check_mode:
module.exit_json(changed=False, name=name, data=data)
# Template attribute is not allowed in modification
del data['attrs']['templates']
ret = icinga.modify(name, data)
if ret['code'] == 200:
changed = True
else:
module.fail_json(msg="bad return code modifying host: %s" % (ret['data']))
else:
if state == "present":
if module.check_mode:
changed = True
else:
try:
ret = icinga.create(name, data)
if ret['code'] == 200:
changed = True
else:
module.fail_json(msg="bad return code creating host: %s" % (ret['data']))
except Exception as e:
module.fail_json(msg="exception creating host: " + str(e))
module.exit_json(changed=changed, name=name, data=data)
# import module snippets
if __name__ == '__main__':
main()
| gpl-3.0 | 6,154,807,410,532,894,000 | -5,052,903,093,823,018,000 | 30.028037 | 110 | 0.574498 | false |
xcgspring/AXUI | test/test_driver/windows/test_Translater.py | 1 | 1731 |
import sys
import unittest
class TestTranslater(unittest.TestCase):
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
def test_coordinate_identifier(self):
import AXUI.driver.windows.Translater as translater
from AXUI.parsing.identifier_parsing import identifier_lexer, identifier_parser
identifier = "Coordinate = '(12 ,34, 56, 79)'"
parsed_identifier = identifier_parser.parse(identifier, lexer=identifier_lexer)
translated_identifier = translater.ID_Translater(parsed_identifier).get_translated()
print(translated_identifier)
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
def test_index_identifier(self):
import AXUI.driver.windows.Translater as translater
from AXUI.parsing.identifier_parsing import identifier_lexer, identifier_parser
identifier = "Name='menu bar' AND Index=3"
parsed_identifier = identifier_parser.parse(identifier, lexer=identifier_lexer)
translated_identifier = translater.ID_Translater(parsed_identifier).get_translated()
print(translated_identifier)
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
def test_UIA_identifier(self):
import AXUI.driver.windows.Translater as translater
from AXUI.parsing.identifier_parsing import identifier_lexer, identifier_parser
identifier = "Name='menu bar' AND LocalizedControlType='menu bar'"
parsed_identifier = identifier_parser.parse(identifier, lexer=identifier_lexer)
translated_identifier = translater.ID_Translater(parsed_identifier).get_translated()
print(translated_identifier)
| apache-2.0 | -525,210,914,453,632,900 | 8,406,456,785,715,013,000 | 53.09375 | 92 | 0.722126 | false |
christiansandberg/canopen | test/test_emcy.py | 1 | 2212 | import unittest
from canopen import emcy
class TestEmcyConsumer(unittest.TestCase):
def test_emcy_list(self):
emcy_node = emcy.EmcyConsumer()
emcy_node.on_emcy(0x81, b'\x01\x20\x02\x00\x01\x02\x03\x04', 1473418396.0)
emcy_node.on_emcy(0x81, b'\x10\x90\x01\x00\x01\x02\x03\x04', 1473418397.0)
self.assertEqual(len(emcy_node.log), 2)
self.assertEqual(len(emcy_node.active), 2)
error = emcy_node.log[0]
self.assertIsInstance(error, emcy.EmcyError)
self.assertIsInstance(error, Exception)
self.assertEqual(error.code, 0x2001)
self.assertEqual(error.register, 0x02)
self.assertEqual(error.data, b'\x00\x01\x02\x03\x04')
self.assertAlmostEqual(error.timestamp, 1473418396.0)
self.assertEqual(emcy_node.active[0], error)
error = emcy_node.log[1]
self.assertEqual(error.code, 0x9010)
self.assertEqual(error.register, 0x01)
self.assertEqual(error.data, b'\x00\x01\x02\x03\x04')
self.assertAlmostEqual(error.timestamp, 1473418397.0)
self.assertEqual(emcy_node.active[1], error)
emcy_node.on_emcy(0x81, b'\x00\x00\x00\x00\x00\x00\x00\x00', 1473418397.0)
self.assertEqual(len(emcy_node.log), 3)
self.assertEqual(len(emcy_node.active), 0)
def test_str(self):
error = emcy.EmcyError(0x2001, 0x02, b'\x00\x01\x02\x03\x04', 1473418396.0)
self.assertEqual(str(error), "Code 0x2001, Current")
error = emcy.EmcyError(0x50FF, 0x01, b'\x00\x01\x02\x03\x04', 1473418396.0)
self.assertEqual(str(error), "Code 0x50FF, Device Hardware")
error = emcy.EmcyError(0x7100, 0x01, b'\x00\x01\x02\x03\x04', 1473418396.0)
self.assertEqual(str(error), "Code 0x7100")
class MockNetwork(object):
data = None
def send_message(self, can_id, data):
self.data = data
class TestEmcyProducer(unittest.TestCase):
def test_send(self):
network = MockNetwork()
emcy_node = emcy.EmcyProducer(0x80 + 1)
emcy_node.network = network
emcy_node.send(0x2001, 0x2, b'\x00\x01\x02\x03\x04')
self.assertEqual(network.data, b'\x01\x20\x02\x00\x01\x02\x03\x04')
| mit | -1,534,005,000,694,432,500 | -1,624,965,144,727,793,400 | 35.262295 | 83 | 0.65642 | false |
arnedesmedt/dotfiles | .config/sublime-text-3/Packages.symlinkfollow/pygments/all/pygments/lexers/tcl.py | 47 | 5398 | # -*- coding: utf-8 -*-
"""
pygments.lexers.tcl
~~~~~~~~~~~~~~~~~~~
Lexers for Tcl and related languages.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number
from pygments.util import shebang_matches
__all__ = ['TclLexer']
class TclLexer(RegexLexer):
"""
For Tcl source code.
.. versionadded:: 0.10
"""
keyword_cmds_re = words((
'after', 'apply', 'array', 'break', 'catch', 'continue', 'elseif', 'else', 'error',
'eval', 'expr', 'for', 'foreach', 'global', 'if', 'namespace', 'proc', 'rename', 'return',
'set', 'switch', 'then', 'trace', 'unset', 'update', 'uplevel', 'upvar', 'variable',
'vwait', 'while'), prefix=r'\b', suffix=r'\b')
builtin_cmds_re = words((
'append', 'bgerror', 'binary', 'cd', 'chan', 'clock', 'close', 'concat', 'dde', 'dict',
'encoding', 'eof', 'exec', 'exit', 'fblocked', 'fconfigure', 'fcopy', 'file',
'fileevent', 'flush', 'format', 'gets', 'glob', 'history', 'http', 'incr', 'info', 'interp',
'join', 'lappend', 'lassign', 'lindex', 'linsert', 'list', 'llength', 'load', 'loadTk',
'lrange', 'lrepeat', 'lreplace', 'lreverse', 'lsearch', 'lset', 'lsort', 'mathfunc',
'mathop', 'memory', 'msgcat', 'open', 'package', 'pid', 'pkg::create', 'pkg_mkIndex',
'platform', 'platform::shell', 'puts', 'pwd', 're_syntax', 'read', 'refchan',
'regexp', 'registry', 'regsub', 'scan', 'seek', 'socket', 'source', 'split', 'string',
'subst', 'tell', 'time', 'tm', 'unknown', 'unload'), prefix=r'\b', suffix=r'\b')
name = 'Tcl'
aliases = ['tcl']
filenames = ['*.tcl', '*.rvt']
mimetypes = ['text/x-tcl', 'text/x-script.tcl', 'application/x-tcl']
def _gen_command_rules(keyword_cmds_re, builtin_cmds_re, context=""):
return [
(keyword_cmds_re, Keyword, 'params' + context),
(builtin_cmds_re, Name.Builtin, 'params' + context),
(r'([\w.-]+)', Name.Variable, 'params' + context),
(r'#', Comment, 'comment'),
]
tokens = {
'root': [
include('command'),
include('basic'),
include('data'),
(r'\}', Keyword), # HACK: somehow we miscounted our braces
],
'command': _gen_command_rules(keyword_cmds_re, builtin_cmds_re),
'command-in-brace': _gen_command_rules(keyword_cmds_re,
builtin_cmds_re,
"-in-brace"),
'command-in-bracket': _gen_command_rules(keyword_cmds_re,
builtin_cmds_re,
"-in-bracket"),
'command-in-paren': _gen_command_rules(keyword_cmds_re,
builtin_cmds_re,
"-in-paren"),
'basic': [
(r'\(', Keyword, 'paren'),
(r'\[', Keyword, 'bracket'),
(r'\{', Keyword, 'brace'),
(r'"', String.Double, 'string'),
(r'(eq|ne|in|ni)\b', Operator.Word),
(r'!=|==|<<|>>|<=|>=|&&|\|\||\*\*|[-+~!*/%<>&^|?:]', Operator),
],
'data': [
(r'\s+', Text),
(r'0x[a-fA-F0-9]+', Number.Hex),
(r'0[0-7]+', Number.Oct),
(r'\d+\.\d+', Number.Float),
(r'\d+', Number.Integer),
(r'\$([\w.:-]+)', Name.Variable),
(r'([\w.:-]+)', Text),
],
'params': [
(r';', Keyword, '#pop'),
(r'\n', Text, '#pop'),
(r'(else|elseif|then)\b', Keyword),
include('basic'),
include('data'),
],
'params-in-brace': [
(r'\}', Keyword, ('#pop', '#pop')),
include('params')
],
'params-in-paren': [
(r'\)', Keyword, ('#pop', '#pop')),
include('params')
],
'params-in-bracket': [
(r'\]', Keyword, ('#pop', '#pop')),
include('params')
],
'string': [
(r'\[', String.Double, 'string-square'),
(r'(?s)(\\\\|\\[0-7]+|\\.|[^"\\])', String.Double),
(r'"', String.Double, '#pop')
],
'string-square': [
(r'\[', String.Double, 'string-square'),
(r'(?s)(\\\\|\\[0-7]+|\\.|\\\n|[^\]\\])', String.Double),
(r'\]', String.Double, '#pop')
],
'brace': [
(r'\}', Keyword, '#pop'),
include('command-in-brace'),
include('basic'),
include('data'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('command-in-paren'),
include('basic'),
include('data'),
],
'bracket': [
(r'\]', Keyword, '#pop'),
include('command-in-bracket'),
include('basic'),
include('data'),
],
'comment': [
(r'.*[^\\]\n', Comment, '#pop'),
(r'.*\\\n', Comment),
],
}
def analyse_text(text):
return shebang_matches(text, r'(tcl)')
| mit | -6,037,861,646,282,135,000 | -6,399,596,195,655,417,000 | 36.227586 | 100 | 0.435346 | false |
medallia/aurora | src/main/python/apache/aurora/config/resource.py | 2 | 3436 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import namedtuple
from numbers import Number
from enum import Enum, unique
from gen.apache.aurora.api.ttypes import Resource
ResourceDetails = namedtuple('ResourceDetails', ['resource_type', 'value'])
@unique
class ResourceType(Enum):
"""Describes Aurora resource types and their traits."""
CPUS = ('numCpus', 'CPU', ' core(s)', float, 1)
RAM_MB = ('ramMb', 'RAM', ' MB', int, 2)
DISK_MB = ('diskMb', 'Disk', ' MB', int, 3)
PORTS = ('namedPort', 'Port', '', str, 4)
GPUS = ('numGpus', 'GPU', ' GPU(s)', int, 5)
def __init__(self, field, display_name, display_unit, value_type, display_position):
self._field = field
self._display_name = display_name
self._display_unit = display_unit
self._value_type = value_type
self._display_position = display_position
@property
def field(self):
return self._field
@property
def display_name(self):
return self._display_name
@property
def display_unit(self):
return self._display_unit
@property
def value_type(self):
return self._value_type
@property
def display_position(self):
return self._display_position
def resource_value(self, resource):
return resource.__dict__.get(self._field)
@classmethod
def from_resource(cls, resource):
for _, member in cls.__members__.items():
if resource.__dict__.get(member.field) is not None:
return member
else:
raise ValueError("Unknown resource: %s" % resource)
class ResourceManager(object):
"""Provides helper methods for working with Aurora resources."""
@classmethod
def resource_details(cls, resources):
result = []
if resources:
for resource in list(resources):
r_type = ResourceType.from_resource(resource)
result.append(ResourceDetails(r_type, r_type.resource_value(resource)))
return sorted(result, key=lambda rd: rd.resource_type.display_position)
return result
@classmethod
def resource_details_from_quota(cls, quota):
return cls.resource_details(quota.resources)
@classmethod
def resource_details_from_task(cls, task):
return cls.resource_details(cls._backfill_resources(task))
@classmethod
def quantity_of(cls, resource_details, resource_type):
result = 0.0
for d in resource_details:
if d.resource_type is resource_type:
result += d.value if isinstance(d.value, Number) else 1
return result
@classmethod
def _backfill_resources(cls, r_object):
resources = list(r_object.resources) if r_object.resources else None
if resources is None:
resources = [
Resource(numCpus=r_object.numCpus),
Resource(ramMb=r_object.ramMb),
Resource(diskMb=r_object.diskMb)
]
if hasattr(r_object, 'requestedPorts'):
resources += [Resource(namedPort=p) for p in r_object.requestedPorts or []]
return resources
| apache-2.0 | 8,347,975,210,638,750,000 | 5,178,286,382,628,477,000 | 29.40708 | 86 | 0.688591 | false |
Froff/TFY4115-Simulering | python/Simulation.py | 1 | 1185 | from math import sqrt
import Slope
class Simulation:
SIM_STEP_SIZE = 0.0001
const_g = -981
def __init__ (self, slope, **kwargs):
self.slope = slope
self.t = [0]
self.x = [Simulation.SIM_STEP_SIZE]
self.mom_inertia_coefficient = 0
for name, value in kwargs.items():
if name == "startingposition":
self.x = [value]
if name == "momentofintertiacoefficient":
self.mom_inertia_coefficient = value
def runSimulation(self):
while not self.isFinished():
self.step()
def step (self):
x = self.x[-1]
dydx = self.slope.dydx(x)
y = self.slope.f(x) - self.slope.f(0)
I = self.mom_inertia_coefficient
g = Simulation.const_g
step_size = Simulation.SIM_STEP_SIZE
try:
self.x.append(x + step_size * sqrt( (2*g*y) / ( (1 + I) * (1 + dydx**2) ) ))
self.t.append(self.t[-1] + Simulation.SIM_STEP_SIZE)
except ValueError:
print("Math domain error. x={}, y={}".format(x, y))
exit(2)
def isFinished (self):
return self.x[-1] >= self.slope.end
| mit | 8,743,386,583,956,119,000 | 6,702,576,848,980,115,000 | 30.184211 | 88 | 0.533333 | false |
Fl0rianFischer/sme_odoo | addons/l10n_pl/__openerp__.py | 19 | 1191 | # -*- encoding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Copyright (C) 2009 - now Grzegorz Grzelak [email protected]
{
'name' : 'Poland - Accounting',
'version' : '1.02',
'author' : 'Grzegorz Grzelak (OpenGLOBE)',
'website': 'http://www.openglobe.pl',
'category' : 'Localization/Account Charts',
'description': """
This is the module to manage the accounting chart and taxes for Poland in OpenERP.
==================================================================================
To jest moduł do tworzenia wzorcowego planu kont, podatków, obszarów podatkowych i
rejestrów podatkowych. Moduł ustawia też konta do kupna i sprzedaży towarów
zakładając, że wszystkie towary są w obrocie hurtowym.
Niniejszy moduł jest przeznaczony dla odoo 8.0.
Wewnętrzny numer wersji OpenGLOBE 1.02
""",
'depends' : ['account', 'base_iban', 'base_vat'],
'demo' : [],
'data' : [
'account_chart.xml',
'account_tax.xml',
'fiscal_position.xml',
'country_pl.xml',
'account_chart_template.yml'
],
'installable': True,
}
| gpl-3.0 | -8,226,135,077,280,796,000 | 1,767,170,287,860,483,300 | 34.666667 | 82 | 0.60068 | false |
kampanita/pelisalacarta | python/main-classic/channels/pelisdanko.py | 3 | 14488 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para PelisDanko
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
# ------------------------------------------------------------
import re
import sys
from core import config
from core import logger
from core import scrapertools
from core.item import Item
__modo_grafico__ = config.get_setting('modo_grafico', 'pelisdanko')
DEBUG = config.get_setting("debug")
host = "http://pelisdanko.com"
art = "http://pelisdanko.com/img/background.jpg"
def mainlist(item):
logger.info("pelisalacarta.channels.pelisdanko mainlist")
itemlist = []
itemlist.append(item.clone(action="novedades", title="Novedades", url=host + "/novedades",
fanart=art))
itemlist.append(item.clone(action="novedades", title="Estrenos", url=host + "/estrenos",
fanart=art))
itemlist.append(item.clone(action="novedades", title="Populares", url=host + "/populares",
fanart=art))
itemlist.append(item.clone(action="actualizadas", title="Películas actualizadas", url=host,
fanart=art))
itemlist.append(item.clone(action="indices", title="Índices", fanart=art))
itemlist.append(item.clone(title="", action=""))
itemlist.append(item.clone(action="search", title="Buscar...", fanart=art))
itemlist.append(item.clone(action="configuracion", title="Configurar canal...", fanart=art,
text_color="gold", folder=False))
return itemlist
def configuracion(item):
from platformcode import platformtools
platformtools.show_channel_settings()
if config.is_xbmc():
import xbmc
xbmc.executebuiltin("Container.Refresh")
def search(item, texto):
logger.info("pelisalacarta.channels.pelisdanko search")
texto = texto.replace(" ", "+")
item.url = "http://pelisdanko.com/busqueda?terms=%s" % texto
try:
return novedades(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(categoria):
logger.info("pelisalacarta.channels.pelisdanko newest")
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = "http://pelisdanko.com/novedades"
itemlist = novedades(item)
if itemlist[-1].action == "novedades":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def novedades(item):
logger.info("pelisalacarta.channels.pelisdanko novedades")
itemlist = []
# Descarga la pagina
data = scrapertools.downloadpage(item.url)
bloque = scrapertools.find_multiple_matches(data, '<div class="col-xs-[\d] col-sm-[\d] col-md-[\d] col-lg-[\d]'
' text-center"(.*?)</div>')
for match in bloque:
calidades = scrapertools.find_multiple_matches(match, '<span class="badge badge-critic badge-qualities[^>]+>'
'([^<]+)</span>')
calidad = "[COLOR darkseagreen] "
for quality in calidades:
calidad += "[" + quality + "]"
patron = 'title="([^"]+)".*?href="([^"]+)".*?class="img-responsive img-thumbnail" src="([^"]+)"'
matches = scrapertools.find_multiple_matches(match, patron)
for scrapedtitle, scrapedurl, scrapedthumbnail in matches:
contentTitle = scrapedtitle[:]
scrapedtitle = "[COLOR darkorange][B]" + scrapedtitle + "[/B][/COLOR]" + calidad + "[/COLOR]"
if (DEBUG): logger.info(
"title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(item.clone(action="enlaces", title=bbcode_kodi2html(scrapedtitle),
url=scrapedurl, thumbnail=scrapedthumbnail, fanart=scrapedthumbnail,
fulltitle=contentTitle, filtro=False, contentTitle=contentTitle,
context="05", trailer=True))
# Busca enlaces de paginas siguientes...
next_page_url = scrapertools.find_single_match(data, '<a href="([^"]+)" rel="next">')
if len(next_page_url) > 0:
itemlist.append(item.clone(action="novedades", title=">> Página siguiente", url=next_page_url))
return itemlist
def actualizadas(item):
logger.info("pelisalacarta.channels.pelisdanko actualizadas")
itemlist = []
# Descarga la pagina
data = scrapertools.downloadpage(item.url)
bloque_big = scrapertools.find_single_match(data, 'Últimas actualizaciones(.*?)<div class="col-xs-10 col-md-8 '
'text-left">')
bloque = scrapertools.find_multiple_matches(bloque_big, '<div class="col-xs-[\d] col-sm-[\d] col-md-[\d]'
' col-lg-[\d] text-center"(.*?)<br><br>')
for match in bloque:
calidades = scrapertools.find_multiple_matches(match, '<span class="badge badge-critic badge-qualities[^>]+>'
'([^<]+)</span>')
calidad = "[COLOR darkseagreen] "
for quality in calidades:
calidad += "[" + quality + "]"
languages = scrapertools.find_multiple_matches(match, '<img width="28".*?alt="([^"]+)"')
idiomas = " ("
for idioma in languages:
idioma = idioma.replace('ES_', '').replace('ES', 'CAST')
if idioma != "CAST" and idioma != "LAT":
idioma = "VOSE"
idiomas += idioma + "/"
patron = 'title="([^"]+)".*?href="([^"]+)".*?class="img-responsive img-thumbnail" src="([^"]+)"'
matches = scrapertools.find_multiple_matches(match, patron)
for scrapedtitle, scrapedurl, scrapedthumbnail in matches:
contentTitle = scrapedtitle[:]
scrapedtitle = "[COLOR darkorange][B]" + scrapedtitle + "[/B][/COLOR]" + calidad + idiomas[
:-1] + ")[/COLOR]"
if (DEBUG): logger.info(
"title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(item.clone(action="enlaces", title=bbcode_kodi2html(scrapedtitle),
url=scrapedurl, thumbnail=scrapedthumbnail, fanart=scrapedthumbnail,
fulltitle=contentTitle, filtro=False, contentTitle=contentTitle,
context="05"))
return itemlist
def indices(item):
logger.info("pelisalacarta.channels.pelisdanko indices")
itemlist = []
item.text_color = "orchid"
itemlist.append(item.clone(action="indice_list", title="Género", url=host, fulltitle="genero"))
itemlist.append(item.clone(action="indice_list", title="Alfabético", url=host, fulltitle="letra"))
itemlist.append(item.clone(action="indice_list", title="Idioma", url=host, fulltitle="idioma"))
itemlist.append(item.clone(action="indice_list", title="Calidad", url=host, fulltitle="calidad"))
itemlist.append(item.clone(action="indice_list", title="Nacionalidad", url=host, fulltitle="nacionalidad"))
return itemlist
def indice_list(item):
logger.info("pelisalacarta.channels.pelisdanko indice_list")
itemlist = []
# Descarga la pagina
data = scrapertools.downloadpage(item.url)
patron = '<a href="(http://pelisdanko.com/%s/[^"]+)">([^<]+)</a>' % item.fulltitle
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
scrapedtitle = scrapedtitle.capitalize()
itemlist.append(item.clone(action="novedades", title=scrapedtitle, url=scrapedurl))
return itemlist
def enlaces(item):
logger.info("pelisalacarta.channels.pelisdanko enlaces")
item.extra = ""
item.text_color = ""
itemlist = []
# Descarga la pagina
data = scrapertools.downloadpage(item.url)
data = re.sub(r"\n|\r|\t|\s{2}", '', data)
item.fanart = scrapertools.find_single_match(data, "CUSTOM BACKGROUND.*?url\('([^']+)'")
item.infoLabels["plot"] = scrapertools.find_single_match(data, 'dt>Sinopsis</dt> <dd class=[^>]+>(.*?)</dd>')
year = scrapertools.find_single_match(data, '<dt>Estreno</dt> <dd>(\d+)</dd>')
try:
from core import tmdb
item.infoLabels['year'] = int(year)
# Obtenemos los datos basicos de todas las peliculas mediante multihilos
tmdb.set_infoLabels_item(item, __modo_grafico__)
except:
pass
filtro_idioma = config.get_setting("filterlanguages", item.channel)
filtro_enlaces = config.get_setting("filterlinks", item.channel)
dict_idiomas = {'CAST': 2, 'LAT': 1, 'VOSE': 0}
if filtro_enlaces != 0:
itemlist.append(item.clone(action="", title="Enlaces Online", text_color="dodgerblue", text_bold=True))
itemlist = bloque_enlaces(data, filtro_idioma, dict_idiomas, itemlist, "ss", item)
if filtro_enlaces != 1:
itemlist.append(item.clone(action="", title="Enlaces Descarga", text_color="dodgerblue", text_bold=True))
itemlist = bloque_enlaces(data, filtro_idioma, dict_idiomas, itemlist, "dd", item)
trailer_id = scrapertools.find_single_match(data, 'data:\s*\{\s*id:\s*"([^"]+)"')
data_trailer = scrapertools.downloadpage("http://pelisdanko.com/trailer", post="id=%s" % trailer_id)
url_trailer = scrapertools.find_single_match(data_trailer, 'src="([^"]+)"')
if url_trailer != "":
url_trailer = url_trailer.replace("embed/", "watch?v=")
item.infoLabels['trailer'] = url_trailer
itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
text_color="magenta"))
return itemlist
def bloque_enlaces(data, filtro_idioma, dict_idiomas, itemlist, type, item):
logger.info("pelisalacarta.channels.pelisdanko bloque_enlaces")
bloque = scrapertools.find_single_match(data, '<div role="tabpanel" class="tab-pane fade" id="tab-' +
type + '">(.*?)</table>')
patron = '<tr class="rip hover".*?data-slug="([^"]+)".*?src="http://pelisdanko.com/img/flags/(.*?).png"' \
'.*?<span class="label label-default quality[^>]+>([^<]+)</span>.*?<td class="small">([^<]+)</td>'
matches = scrapertools.find_multiple_matches(bloque, patron)
filtrados = []
for slug, flag, quality, date in matches:
if flag != "ES" and flag != "ES_LAT":
flag = "VOSE"
flag = flag.replace('ES_LAT', 'LAT').replace('ES', 'CAST')
scrapedurl = "%s/%s/%s?#%s" % (item.url, slug, type, type)
scrapedtitle = " [COLOR firebrick]Mostrar enlaces: [/COLOR][COLOR goldenrod][" \
+ flag + "/" + quality + "][/COLOR][COLOR khaki] " + date + "[/COLOR]"
if filtro_idioma == 3 or item.filtro:
itemlist.append(item.clone(title=bbcode_kodi2html(scrapedtitle), action="findvideos",
url=scrapedurl, id_enlaces=slug, calidad=quality))
else:
idioma = dict_idiomas[flag]
if idioma == filtro_idioma:
itemlist.append(item.clone(title=bbcode_kodi2html(scrapedtitle),
action="findvideos", url=scrapedurl, id_enlaces=slug))
else:
if flag not in filtrados:
filtrados.append(flag)
if filtro_idioma != 3:
if len(filtrados) > 0:
title = bbcode_kodi2html("[COLOR orangered] Mostrar enlaces filtrados en %s[/COLOR]") % ", ".join(
filtrados)
itemlist.append(item.clone(title=title, action="enlaces", url=item.url, filtro=True))
return itemlist
def findvideos(item):
logger.info("pelisalacarta.channels.pelisdanko findvideos")
itemlist = []
if item.url[-2:] == "ss":
prefix = "strms"
else:
prefix = "lnks"
# Descarga la pagina
data = scrapertools.downloadpage(item.url)
# Parametros para redireccion donde muestra los enlaces
data_slug = scrapertools.find_single_match(data, '<div id="ad" data-id="[^"]+" data-slug="([^"]+)"')
data_id = scrapertools.find_single_match(data, '<tr class="rip hover" data-id="([^"]+)"')
url = "http://pelisdanko.com/%s/%s/%s/%s" % (prefix, data_id, item.id_enlaces, data_slug)
data = scrapertools.downloadpage(url, post="")
from core import servertools
video_item_list = servertools.find_video_items(data=data)
for video_item in video_item_list:
title = "[COLOR green]%s[/COLOR] | [COLOR darkorange][%s][/COLOR]" % (video_item.server, item.calidad)
itemlist.append(item.clone(title=bbcode_kodi2html(title), url=video_item.url, action="play",
server=video_item.server, text_color=""))
# Opción "Añadir esta película a la biblioteca de XBMC"
if config.get_library_support() and len(itemlist) > 0 and item.category != "Cine":
itemlist.append(Item(channel=item.channel, title="Añadir película a la biblioteca", url=item.url,
infoLabels={'title': item.fulltitle}, action="add_pelicula_to_library",
fulltitle=item.fulltitle, text_color="green", id_enlaces=item.id_enlaces))
return itemlist
def bbcode_kodi2html(text):
if config.get_platform().startswith("plex") or config.get_platform().startswith("mediaserver"):
import re
text = re.sub(r'\[COLOR\s([^\]]+)\]',
r'<span style="color: \1">',
text)
text = text.replace('[/COLOR]', '</span>') \
.replace('[CR]', '<br>') \
.replace('[B]', '<strong>') \
.replace('[/B]', '</strong>') \
.replace('"color: white"', '"color: auto"')
return text
| gpl-3.0 | 6,604,189,237,083,174,000 | -6,080,610,329,236,547,000 | 44.373041 | 117 | 0.582838 | false |
elainenaomi/sciwonc-dataflow-examples | dissertation2017/Experiment 1A/instances/10_0_workflow_full_10files_primary_3sh_3rs_noannot_with_proj_3s_range/generalinfo_0/GeneralInfo_0.py | 50 | 1207 | #!/usr/bin/env python
"""
This activity wants to answer:
- which time interval was analysed?
- how many items has this interval?
"""
# Connection with SciWonc-Dataflow module
from sciwonc.dataflow.DataStoreClient import DataStoreClient
import ConfigDB_GeneralInfo_0
# connector and config
client = DataStoreClient("mongodb", ConfigDB_GeneralInfo_0)
# according to config
data = client.getData() # return an array of docs (like a csv reader)
output = []
count = 0
min_time = None
max_time = None
if(data):
# processing
while True:
doc = data.next()
if doc is None:
break;
current_time = float(doc['time'])
if current_time:
if min_time is None or min_time > current_time:
min_time = current_time
if max_time is None or max_time < current_time:
max_time = current_time
count += 1
if count > 0:
newline = {}
newline['interval seconds'] = (max_time - min_time)/1000000
newline['total items'] = count
newline['min timestamp'] = min_time
newline['max timestamp'] = max_time
output.append(newline)
client.saveData(output)
| gpl-3.0 | -7,617,057,859,022,025,000 | 3,316,475,980,609,668,000 | 22.666667 | 69 | 0.620547 | false |
mdanielwork/intellij-community | python/helpers/pydev/tests_pydevd_runfiles/test_pydevdio.py | 26 | 1184 | import sys
import os
import unittest
class Test(unittest.TestCase):
def test_it(self):
#make it as if we were executing from the directory above this one (so that we can use jycompletionserver
#without the need for it being in the pythonpath)
#(twice the dirname to get the previous level from this file.)
import test_pydevdio #@UnresolvedImport - importing itself
ADD_TO_PYTHONPATH = os.path.join(os.path.dirname(os.path.dirname(test_pydevdio.__file__)))
sys.path.insert(0, ADD_TO_PYTHONPATH)
try:
from _pydevd_bundle import pydevd_io
original = sys.stdout
try:
sys.stdout = pydevd_io.IOBuf()
print('foo')
print('bar')
self.assertEqual('foo\nbar\n', sys.stdout.getvalue()) #@UndefinedVariable
print('ww')
print('xx')
self.assertEqual('ww\nxx\n', sys.stdout.getvalue()) #@UndefinedVariable
finally:
sys.stdout = original
finally:
#remove it to leave it ok for other tests
sys.path.remove(ADD_TO_PYTHONPATH)
| apache-2.0 | 3,419,299,378,307,228,700 | 4,306,529,945,107,249,700 | 31.888889 | 113 | 0.586149 | false |
tchernomax/ansible | lib/ansible/modules/cloud/vmware/vmware_guest_boot_manager.py | 11 | 12392 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, Ansible Project
# Copyright: (c) 2018, Abhijeet Kasurde <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vmware_guest_boot_manager
short_description: Manage boot options for the given virtual machine
description:
- This module can be used to manage boot options for the given virtual machine.
version_added: 2.7
author:
- Abhijeet Kasurde (@Akasurde) <[email protected]>
notes:
- Tested on vSphere 6.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
name:
description:
- Name of the VM to work with.
- This is required if C(uuid) parameter is not supplied.
uuid:
description:
- UUID of the instance to manage if known, this is VMware's BIOS UUID.
- This is required if C(name) parameter is not supplied.
boot_order:
description:
- List of the boot devices.
default: []
name_match:
description:
- If multiple virtual machines matching the name, use the first or last found.
default: 'first'
choices: ['first', 'last']
boot_delay:
description:
- Delay in milliseconds before starting the boot sequence.
default: 0
enter_bios_setup:
description:
- If set to C(True), the virtual machine automatically enters BIOS setup the next time it boots.
- The virtual machine resets this flag, so that the machine boots proceeds normally.
type: 'bool'
default: False
boot_retry_enabled:
description:
- If set to C(True), the virtual machine that fails to boot, will try to boot again after C(boot_retry_delay) is expired.
- If set to C(False), the virtual machine waits indefinitely for user intervention.
type: 'bool'
default: False
boot_retry_delay:
description:
- Specify the time in milliseconds between virtual machine boot failure and subsequent attempt to boot again.
- If set, will automatically set C(boot_retry_enabled) to C(True) as this parameter is required.
default: 0
boot_firmware:
description:
- Choose which firmware should be used to boot the virtual machine.
choices: ["bios", "efi"]
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Change virtual machine's boot order and related parameters
vmware_guest_boot_manager:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
name: testvm
boot_delay: 2000
enter_bios_setup: True
boot_retry_enabled: True
boot_retry_delay: 22300
boot_firmware: bios
boot_order:
- floppy
- cdrom
- ethernet
- disk
delegate_to: localhost
register: vm_boot_order
'''
RETURN = r"""
vm_boot_status:
description: metadata about boot order of virtual machine
returned: always
type: dict
sample: {
"current_boot_order": [
"floppy",
"disk",
"ethernet",
"cdrom"
],
"current_boot_delay": 2000,
"current_boot_retry_delay": 22300,
"current_boot_retry_enabled": true,
"current_enter_bios_setup": true,
"current_boot_firmware": "bios",
"previous_boot_delay": 10,
"previous_boot_retry_delay": 10000,
"previous_boot_retry_enabled": true,
"previous_enter_bios_setup": false,
"previous_boot_firmware": "bios",
"previous_boot_order": [
"ethernet",
"cdrom",
"floppy",
"disk"
],
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec, find_vm_by_id, wait_for_task, TaskError
try:
from pyVmomi import vim
except ImportError:
pass
class VmBootManager(PyVmomi):
def __init__(self, module):
super(VmBootManager, self).__init__(module)
self.name = self.params['name']
self.uuid = self.params['uuid']
self.vm = None
def _get_vm(self):
vms = []
if self.uuid:
vm_obj = find_vm_by_id(self.content, vm_id=self.uuid, vm_id_type="uuid")
if vm_obj is None:
self.module.fail_json(msg="Failed to find the virtual machine with UUID : %s" % self.uuid)
vms = [vm_obj]
elif self.name:
objects = self.get_managed_objects_properties(vim_type=vim.VirtualMachine, properties=['name'])
for temp_vm_object in objects:
if temp_vm_object.obj.name == self.name:
vms.append(temp_vm_object.obj)
if vms:
if self.params.get('name_match') == 'first':
self.vm = vms[0]
elif self.params.get('name_match') == 'last':
self.vm = vms[-1]
else:
self.module.fail_json(msg="Failed to find virtual machine using %s" % (self.name or self.uuid))
@staticmethod
def humanize_boot_order(boot_order):
results = []
for device in boot_order:
if isinstance(device, vim.vm.BootOptions.BootableCdromDevice):
results.append('cdrom')
elif isinstance(device, vim.vm.BootOptions.BootableDiskDevice):
results.append('disk')
elif isinstance(device, vim.vm.BootOptions.BootableEthernetDevice):
results.append('ethernet')
elif isinstance(device, vim.vm.BootOptions.BootableFloppyDevice):
results.append('floppy')
return results
def ensure(self):
self._get_vm()
valid_device_strings = ['cdrom', 'disk', 'ethernet', 'floppy']
boot_order_list = []
for device_order in self.params.get('boot_order'):
if device_order not in valid_device_strings:
self.module.fail_json(msg="Invalid device found [%s], please specify device from ['%s']" % (device_order,
"', '".join(valid_device_strings)))
if device_order == 'cdrom':
first_cdrom = [device for device in self.vm.config.hardware.device if isinstance(device, vim.vm.device.VirtualCdrom)]
if first_cdrom:
boot_order_list.append(vim.vm.BootOptions.BootableCdromDevice())
elif device_order == 'disk':
first_hdd = [device for device in self.vm.config.hardware.device if isinstance(device, vim.vm.device.VirtualDisk)]
if first_hdd:
boot_order_list.append(vim.vm.BootOptions.BootableDiskDevice(deviceKey=first_hdd[0].key))
elif device_order == 'ethernet':
first_ether = [device for device in self.vm.config.hardware.device if isinstance(device, vim.vm.device.VirtualEthernetCard)]
if first_ether:
boot_order_list.append(vim.vm.BootOptions.BootableEthernetDevice(deviceKey=first_ether[0].key))
elif device_order == 'floppy':
first_floppy = [device for device in self.vm.config.hardware.device if isinstance(device, vim.vm.device.VirtualFloppy)]
if first_floppy:
boot_order_list.append(vim.vm.BootOptions.BootableFloppyDevice())
change_needed = False
kwargs = dict()
if len(boot_order_list) != len(self.vm.config.bootOptions.bootOrder):
kwargs.update({'bootOrder': boot_order_list})
change_needed = True
else:
for i in range(0, len(boot_order_list)):
boot_device_type = type(boot_order_list[i])
vm_boot_device_type = type(self.vm.config.bootOptions.bootOrder[i])
if boot_device_type != vm_boot_device_type:
kwargs.update({'bootOrder': boot_order_list})
change_needed = True
if self.vm.config.bootOptions.bootDelay != self.params.get('boot_delay'):
kwargs.update({'bootDelay': self.params.get('boot_delay')})
change_needed = True
if self.vm.config.bootOptions.enterBIOSSetup != self.params.get('enter_bios_setup'):
kwargs.update({'enterBIOSSetup': self.params.get('enter_bios_setup')})
change_needed = True
if self.vm.config.bootOptions.bootRetryEnabled != self.params.get('boot_retry_enabled'):
kwargs.update({'bootRetryEnabled': self.params.get('boot_retry_enabled')})
change_needed = True
if self.vm.config.bootOptions.bootRetryDelay != self.params.get('boot_retry_delay'):
if not self.vm.config.bootOptions.bootRetryEnabled:
kwargs.update({'bootRetryEnabled': True})
kwargs.update({'bootRetryDelay': self.params.get('boot_retry_delay')})
change_needed = True
boot_firmware_required = False
if self.vm.config.firmware != self.params.get('boot_firmware'):
change_needed = True
boot_firmware_required = True
changed = False
results = dict(
previous_boot_order=self.humanize_boot_order(self.vm.config.bootOptions.bootOrder),
previous_boot_delay=self.vm.config.bootOptions.bootDelay,
previous_enter_bios_setup=self.vm.config.bootOptions.enterBIOSSetup,
previous_boot_retry_enabled=self.vm.config.bootOptions.bootRetryEnabled,
previous_boot_retry_delay=self.vm.config.bootOptions.bootRetryDelay,
previous_boot_firmware=self.vm.config.firmware,
current_boot_order=[],
)
if change_needed:
vm_conf = vim.vm.ConfigSpec()
vm_conf.bootOptions = vim.vm.BootOptions(**kwargs)
if boot_firmware_required:
vm_conf.firmware = self.params.get('boot_firmware')
task = self.vm.ReconfigVM_Task(vm_conf)
try:
changed, result = wait_for_task(task)
except TaskError as e:
self.module.fail_json(msg="Failed to perform reconfigure virtual"
" machine %s for boot order due to: %s" % (self.name or self.uuid,
to_native(e)))
results.update(
{
'current_boot_order': self.humanize_boot_order(self.vm.config.bootOptions.bootOrder),
'current_boot_delay': self.vm.config.bootOptions.bootDelay,
'current_enter_bios_setup': self.vm.config.bootOptions.enterBIOSSetup,
'current_boot_retry_enabled': self.vm.config.bootOptions.bootRetryEnabled,
'current_boot_retry_delay': self.vm.config.bootOptions.bootRetryDelay,
'current_boot_firmware': self.vm.config.firmware,
}
)
self.module.exit_json(changed=changed, vm_boot_status=results)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
name=dict(type='str'),
uuid=dict(type='str'),
boot_order=dict(
type='list',
default=[],
),
name_match=dict(
choices=['first', 'last'],
default='first'
),
boot_delay=dict(
type='int',
default=0,
),
enter_bios_setup=dict(
type='bool',
default=False,
),
boot_retry_enabled=dict(
type='bool',
default=False,
),
boot_retry_delay=dict(
type='int',
default=0,
),
boot_firmware=dict(
type='str',
choices=['efi', 'bios'],
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['name', 'uuid']
],
mutually_exclusive=[
['name', 'uuid']
],
)
pyv = VmBootManager(module)
pyv.ensure()
if __name__ == '__main__':
main()
| gpl-3.0 | -1,263,219,805,581,176,600 | -3,904,274,523,788,355,600 | 35.771513 | 143 | 0.590058 | false |
googleapis/googleapis-gen | google/cloud/gkehub/v1alpha2/gkehub-v1alpha2-py/google/cloud/gkehub_v1alpha2/services/gke_hub/pagers.py | 1 | 5811 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional
from google.cloud.gkehub_v1alpha2.types import membership
class ListMembershipsPager:
"""A pager for iterating through ``list_memberships`` requests.
This class thinly wraps an initial
:class:`google.cloud.gkehub_v1alpha2.types.ListMembershipsResponse` object, and
provides an ``__iter__`` method to iterate through its
``resources`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListMemberships`` requests and continue to iterate
through the ``resources`` field on the
corresponding responses.
All the usual :class:`google.cloud.gkehub_v1alpha2.types.ListMembershipsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., membership.ListMembershipsResponse],
request: membership.ListMembershipsRequest,
response: membership.ListMembershipsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.gkehub_v1alpha2.types.ListMembershipsRequest):
The initial request object.
response (google.cloud.gkehub_v1alpha2.types.ListMembershipsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = membership.ListMembershipsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[membership.ListMembershipsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[membership.Membership]:
for page in self.pages:
yield from page.resources
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListMembershipsAsyncPager:
"""A pager for iterating through ``list_memberships`` requests.
This class thinly wraps an initial
:class:`google.cloud.gkehub_v1alpha2.types.ListMembershipsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``resources`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListMemberships`` requests and continue to iterate
through the ``resources`` field on the
corresponding responses.
All the usual :class:`google.cloud.gkehub_v1alpha2.types.ListMembershipsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[membership.ListMembershipsResponse]],
request: membership.ListMembershipsRequest,
response: membership.ListMembershipsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.gkehub_v1alpha2.types.ListMembershipsRequest):
The initial request object.
response (google.cloud.gkehub_v1alpha2.types.ListMembershipsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = membership.ListMembershipsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[membership.ListMembershipsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[membership.Membership]:
async def async_generator():
async for page in self.pages:
for response in page.resources:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
| apache-2.0 | -5,719,678,593,279,796,000 | -380,823,999,768,263,800 | 40.507143 | 95 | 0.660816 | false |
wuhengzhi/chromium-crosswalk | tools/json_schema_compiler/js_externs_generator_test.py | 15 | 8773 | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import idl_schema
import json_parse
from js_externs_generator import JsExternsGenerator
from datetime import datetime
import model
import sys
import unittest
# The contents of a fake idl file.
fake_idl = """
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// A totally fake API.
namespace fakeApi {
enum Greek {
ALPHA,
BETA,
GAMMA,
DELTA
};
dictionary Bar {
long num;
};
dictionary Baz {
DOMString str;
long num;
boolean b;
Greek letter;
Greek? optionalLetter;
long[] arr;
Bar[]? optionalObjArr;
Greek[] enumArr;
any[] anythingGoes;
Bar obj;
long? maybe;
(DOMString or Greek or long[]) choice;
object plainObj;
ArrayBuffer arrayBuff;
};
callback VoidCallback = void();
callback BazGreekCallback = void(Baz baz, Greek greek);
interface Functions {
// Does something exciting! And what's more, this is a multiline function
// comment! It goes onto multiple lines!
// |baz| : The baz to use.
static void doSomething(Baz baz, VoidCallback callback);
// |callback| : The callback which will most assuredly in all cases be
// called; that is, of course, iff such a callback was provided and is
// not at all null.
static void bazGreek(optional BazGreekCallback callback);
[deprecated="Use a new method."] static DOMString returnString();
};
interface Events {
// Fired when we realize it's a trap!
static void onTrapDetected(Baz baz);
};
};
"""
# The output we expect from our fake idl file.
expected_output = ("""// Copyright %s The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file was generated by:
// %s.
// NOTE: The format of types has changed. 'FooType' is now
// 'chrome.fakeApi.FooType'.
// Please run the closure compiler before committing changes.
// See https://chromium.googlesource.com/chromium/src/+/master/docs/closure_compilation.md
/** @fileoverview Externs generated from namespace: fakeApi */
/**
* @const
*/
chrome.fakeApi = {};
/**
* @enum {string}
* @see https://developer.chrome.com/extensions/fakeApi#type-Greek
*/
chrome.fakeApi.Greek = {
ALPHA: 'ALPHA',
BETA: 'BETA',
GAMMA: 'GAMMA',
DELTA: 'DELTA',
};
/**
* @typedef {{
* num: number
* }}
* @see https://developer.chrome.com/extensions/fakeApi#type-Bar
*/
chrome.fakeApi.Bar;
/**
* @typedef {{
* str: string,
* num: number,
* b: boolean,
* letter: !chrome.fakeApi.Greek,
* optionalLetter: (!chrome.fakeApi.Greek|undefined),
* arr: !Array<number>,
* optionalObjArr: (!Array<!chrome.fakeApi.Bar>|undefined),
* enumArr: !Array<!chrome.fakeApi.Greek>,
* anythingGoes: !Array<*>,
* obj: !chrome.fakeApi.Bar,
* maybe: (number|undefined),
* choice: (string|!chrome.fakeApi.Greek|!Array<number>),
* plainObj: Object,
* arrayBuff: ArrayBuffer
* }}
* @see https://developer.chrome.com/extensions/fakeApi#type-Baz
*/
chrome.fakeApi.Baz;
/**
* Does something exciting! And what's more, this is a multiline function
* comment! It goes onto multiple lines!
* @param {!chrome.fakeApi.Baz} baz The baz to use.
* @param {function():void} callback
* @see https://developer.chrome.com/extensions/fakeApi#method-doSomething
*/
chrome.fakeApi.doSomething = function(baz, callback) {};
/**
* @param {function(!chrome.fakeApi.Baz, !chrome.fakeApi.Greek):void=} callback
* The callback which will most assuredly in all cases be called; that is,
* of course, iff such a callback was provided and is not at all null.
* @see https://developer.chrome.com/extensions/fakeApi#method-bazGreek
*/
chrome.fakeApi.bazGreek = function(callback) {};
/**
* @return {string}
* @deprecated Use a new method.
* @see https://developer.chrome.com/extensions/fakeApi#method-returnString
*/
chrome.fakeApi.returnString = function() {};
/**
* Fired when we realize it's a trap!
* @type {!ChromeEvent}
* @see https://developer.chrome.com/extensions/fakeApi#event-onTrapDetected
*/
chrome.fakeApi.onTrapDetected;""" % (datetime.now().year, sys.argv[0]))
fake_json = """// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
[
{
"namespace": "fakeJson",
"description": "Fake JSON API Stuff",
"types": [ {
"id": "CrazyEnum",
"type": "string",
"enum": ["camelCaseEnum", "Non-Characters", "5NumFirst", \
"3Just-plainOld_MEAN"]
} ],
"functions": [ {
"name": "funcWithInlineObj",
"type": "function",
"parameters": [
{
"type": "object",
"name": "inlineObj",
"description": "Evil inline object! With a super duper duper long\
string description that causes problems!",
"properties": {
"foo": {
"type": "boolean",
"optional": "true",
"description": "The foo."
},
"bar": {
"type": "integer",
"description": "The bar."
},
"baz": {
"type": "object",
"description": "Inception object.",
"properties": {
"depth": {
"type": "integer"
}
}
},
"quu": {
"type": "binary",
"description": "The array buffer"
}
}
},
{
"name": "callback",
"type": "function",
"parameters": [
{
"type": "object",
"name": "returnObj",
"properties": {
"str": { "type": "string"}
}
}
],
"description": "The callback to this heinous method"
}
],
"returns": {
"type": "object",
"properties": {
"str": { "type": "string" },
"int": { "type": "number" }
}
}
} ]
}
]"""
json_expected = ("""// Copyright %s The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file was generated by:
// %s.
// NOTE: The format of types has changed. 'FooType' is now
// 'chrome.fakeJson.FooType'.
// Please run the closure compiler before committing changes.
// See https://chromium.googlesource.com/chromium/src/+/master/docs/closure_compilation.md
/** @fileoverview Externs generated from namespace: fakeJson */
/**
* @const
*/
chrome.fakeJson = {};
/**
* @enum {string}
* @see https://developer.chrome.com/extensions/fakeJson#type-CrazyEnum
*/
chrome.fakeJson.CrazyEnum = {
CAMEL_CASE_ENUM: 'camelCaseEnum',
NON_CHARACTERS: 'Non-Characters',
_5NUM_FIRST: '5NumFirst',
_3JUST_PLAIN_OLD_MEAN: '3Just-plainOld_MEAN',
};
/**
* @param {{
* foo: (boolean|undefined),
* bar: number,
* baz: {
* depth: number
* },
* quu: ArrayBuffer
* }} inlineObj Evil inline object! With a super duper duper long string
* description that causes problems!
* @param {function({
* str: string
* }):void} callback The callback to this heinous method
* @return {{
* str: string,
* int: number
* }}
* @see https://developer.chrome.com/extensions/fakeJson#method-funcWithInlineObj
*/
chrome.fakeJson.funcWithInlineObj = function(inlineObj, callback) {};""" %
(datetime.now().year, sys.argv[0]))
class JsExternGeneratorTest(unittest.TestCase):
def _GetNamespace(self, fake_content, filename, is_idl):
"""Returns a namespace object for the given content"""
api_def = (idl_schema.Process(fake_content, filename) if is_idl
else json_parse.Parse(fake_content))
m = model.Model()
return m.AddNamespace(api_def[0], filename)
def setUp(self):
self.maxDiff = None # Lets us see the full diff when inequal.
def testBasic(self):
namespace = self._GetNamespace(fake_idl, 'fake_api.idl', True)
self.assertMultiLineEqual(expected_output,
JsExternsGenerator().Generate(namespace).Render())
def testJsonWithInlineObjects(self):
namespace = self._GetNamespace(fake_json, 'fake_api.json', False)
self.assertMultiLineEqual(json_expected,
JsExternsGenerator().Generate(namespace).Render())
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 581,523,148,486,017,700 | 3,360,781,276,291,711,500 | 26.850794 | 90 | 0.62054 | false |
erccarls/vectorsearch | vectorsearch/word2vec.py | 1 | 4242 | from __future__ import division # py3 "true division"
import logging
import sys
import os
import heapq
from timeit import default_timer
from copy import deepcopy
from collections import defaultdict
import threading
import itertools
import gensim
from gensim.utils import keep_vocab_item
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty
from numpy import exp, log, dot, zeros, outer, random, dtype, float32 as REAL,\
uint32, seterr, array, uint8, vstack, fromstring, sqrt, newaxis,\
ndarray, empty, sum as np_sum, prod, ones, ascontiguousarray
from gensim import utils, matutils # utility fnc for pickling, common scipy operations etc
from six import iteritems, itervalues, string_types
from six.moves import xrange
from types import GeneratorType
logger = logging.getLogger(__name__)
try:
from gensim.models.word2vec_inner import train_batch_sg, train_batch_cbow
from gensim.models.word2vec_inner import score_sentence_sg, score_sentence_cbow
from gensim.models.word2vec_inner import FAST_VERSION, MAX_WORDS_IN_BATCH
except ImportError:
# failed... fall back to plain numpy (20-80x slower training than the above)
FAST_VERSION = -1
MAX_WORDS_IN_BATCH = 10000
class Word2Vec(gensim.models.Word2Vec):
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self._stem_memory = defaultdict(set)
def most_similar(self, words={}, topn=10, restrict_vocab=None):
"""
Find the top-N most similar words.
words : a dict where the words are the keys and the weights are the values.
This method computes cosine similarity between a simple mean of the projection
weight vectors of the given words and the vectors for each word in the model.
The method corresponds to the `word-analogy` and `distance` scripts in the original
word2vec implementation.
If topn is False, most_similar returns the vector of similarity scores.
`restrict_vocab` is an optional integer which limits the range of vectors which
are searched for most-similar values. For example, restrict_vocab=10000 would
only check the first 10000 word vectors in the vocabulary order. (This may be
meaningful if you've sorted the vocabulary by descending frequency.)
Example::
>>> trained_model.most_similar(positive=['woman', 'king'], negative=['man'])
[('queen', 0.50882536), ...]
"""
self.init_sims()
# if isinstance(positive, string_types) and not negative:
# # allow calls like most_similar('dog'), as a shorthand for most_similar(['dog'])
# positive = [positive]
# add weights for each word, if not already present; default to 1.0 for positive and -1.0 for negative words
# positive = [
# (word, 1.0) if isinstance(word, string_types + (ndarray,)) else word
# for word in positive
# ]
# negative = [
# (word, -1.0) if isinstance(word, string_types + (ndarray,)) else word
# for word in negative
# ]
# compute the weighted average of all words
all_words, mean = set(), []
for word, weight in words.items():
if isinstance(word, ndarray):
mean.append(weight * word)
elif word in self.vocab:
mean.append(weight * self.syn0norm[self.vocab[word].index])
all_words.add(self.vocab[word].index)
else:
Warning("word '%s' not in vocabulary" % word)
if not mean:
raise ValueError("cannot compute similarity with no input")
mean = matutils.unitvec(array(mean).mean(axis=0)).astype(REAL)
limited = self.syn0norm if restrict_vocab is None else self.syn0norm[:restrict_vocab]
dists = dot(limited, mean)
if not topn:
return dists
best = matutils.argsort(dists, topn=topn + len(all_words), reverse=True)
# ignore (don't return) words from the input
result = [(self.index2word[sim], float(dists[sim])) for sim in best if sim not in all_words]
return result[:topn]
| apache-2.0 | 7,098,282,242,268,549,000 | -2,278,274,256,126,244,600 | 38.654206 | 116 | 0.656294 | false |
CloudBreadPaPa/azure-ml-python-seminar | code/python/ml-Iris.py | 1 | 1412 | import urllib2
# If you are using Python 3+, import urllib instead of urllib2
import json
data = {
"Inputs": {
"input1":
{
"ColumnNames": ["Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width", "Species"],
"Values": [ [ "1", "1", "1", "1", "" ], ]
}, },
"GlobalParameters": {
}
}
body = str.encode(json.dumps(data))
url = 'https://asiasoutheast.services.azureml.net/workspaces/46d0e60b05b34558827abd41f11d204f/services/acac88a083ce443789028306375ddf56/execute?api-version=2.0&details=true'
api_key = '<change here>' # Replace this with the API key for the web service
headers = {'Content-Type':'application/json', 'Authorization':('Bearer '+ api_key)}
req = urllib2.Request(url, body, headers)
try:
response = urllib2.urlopen(req)
# If you are using Python 3+, replace urllib2 with urllib.request in the above code:
# req = urllib.request.Request(url, body, headers)
# response = urllib.request.urlopen(req)
result = response.read()
print(result)
except urllib2.HTTPError, error:
print("The request failed with status code: " + str(error.code))
# Print the headers - they include the requert ID and the timestamp, which are useful for debugging the failure
print(error.info())
print(json.loads(error.read()))
| mit | 4,770,004,585,978,547,000 | 293,869,936,456,870,700 | 30.377778 | 173 | 0.626771 | false |
kaiix/depot_tools | tests/trychange_unittest.py | 43 | 6250 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for trychange.py."""
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from testing_support.super_mox import SuperMoxTestBase
import subprocess2
import trychange
class TryChangeTestsBase(SuperMoxTestBase):
"""Setups and tear downs the mocks but doesn't test anything as-is."""
def setUp(self):
SuperMoxTestBase.setUp(self)
self.mox.StubOutWithMock(subprocess2, 'communicate')
self.mox.StubOutWithMock(trychange, 'RunGit')
self.mox.StubOutWithMock(trychange.scm.GIT, 'Capture')
self.mox.StubOutWithMock(trychange.scm.GIT, 'GenerateDiff')
self.mox.StubOutWithMock(trychange.scm.GIT, 'GetCheckoutRoot')
self.mox.StubOutWithMock(trychange.scm.GIT, 'GetEmail')
self.mox.StubOutWithMock(trychange.scm.GIT, 'GetPatchName')
self.mox.StubOutWithMock(trychange.scm.GIT, 'GetUpstreamBranch')
self.mox.StubOutWithMock(trychange.scm.SVN, 'GenerateDiff')
self.mox.StubOutWithMock(trychange.scm.SVN, 'GetCheckoutRoot')
self.mox.StubOutWithMock(trychange.scm.SVN, 'GetEmail')
self.fake_root = self.Dir()
self.expected_files = ['foo.txt', 'bar.txt']
self.options = trychange.optparse.Values()
self.options.files = self.expected_files
self.options.diff = None
self.options.name = None
self.options.email = None
self.options.exclude = []
class TryChangeUnittest(TryChangeTestsBase):
"""General trychange.py tests."""
def testMembersChanged(self):
members = [
'DieWithError', 'EPILOG', 'Escape', 'GIT', 'GIT_PATCH_DIR_BASENAME',
'GetMungedDiff', 'GuessVCS', 'GIT_BRANCH_FILE',
'HELP_STRING', 'Error', 'InvalidScript', 'NoTryServerAccess',
'OptionParser', 'PrintSuccess',
'RunCommand', 'RunGit', 'SCM', 'SVN', 'TryChange', 'USAGE', 'contextlib',
'breakpad',
'datetime', 'errno', 'fix_encoding', 'gcl', 'gclient_utils',
'gerrit_util', 'gen_parser',
'getpass', 'itertools', 'json', 'logging', 'optparse', 'os', 'posixpath',
're', 'scm', 'shutil', 'subprocess2', 'sys', 'tempfile', 'urllib',
'urllib2', 'urlparse']
# If this test fails, you should add the relevant test.
self.compareMembers(trychange, members)
class TryChangeSimpleTest(unittest.TestCase):
# Doesn't require supermox to run.
def test_flags(self):
cmd = [
'--bot', 'bot1,bot2',
'--testfilter', 'test1',
'--testfilter', 'test2',
'--user', 'joe',
'--email', '[email protected]',
]
options, args = trychange.gen_parser(None).parse_args(cmd)
self.assertEquals([], args)
# pylint: disable=W0212
bot_spec = trychange._ParseBotList(options.bot, options.testfilter)
if options.testfilter:
bot_spec = trychange._ApplyTestFilter(options.testfilter, bot_spec)
values = trychange._ParseSendChangeOptions(bot_spec, options)
self.assertEquals(
[
('user', 'joe'),
('name', None),
('email', '[email protected]'),
('bot', 'bot1:test1,test2'),
('bot', 'bot2:test1,test2'),
],
values)
def test_flags_bad_combination(self):
cmd = [
'--bot', 'bot1:test1',
'--testfilter', 'test2',
]
options, args = trychange.gen_parser(None).parse_args(cmd)
self.assertEquals([], args)
try:
# pylint: disable=W0212
trychange._ParseBotList(options.bot, options.testfilter)
self.fail()
except ValueError:
pass
class SVNUnittest(TryChangeTestsBase):
"""trychange.SVN tests."""
def testMembersChanged(self):
members = [
'AutomagicalSettings', 'CaptureStatus', 'GetCodeReviewSetting',
'ReadRootFile', 'GenerateDiff', 'GetFileNames', 'files', 'file_tuples',
]
# If this test fails, you should add the relevant test.
self.compareMembers(trychange.SVN, members)
def testBasic(self):
# pylint: disable=E1103
trychange.os.path.abspath(self.fake_root).AndReturn(self.fake_root)
trychange.scm.SVN.GetCheckoutRoot(self.fake_root).AndReturn(self.fake_root)
trychange.scm.SVN.GenerateDiff(['foo.txt', 'bar.txt'],
self.fake_root,
full_move=True,
revision=None).AndReturn('A diff')
trychange.scm.SVN.GetEmail(self.fake_root).AndReturn('[email protected]')
self.mox.ReplayAll()
svn = trychange.SVN(self.options, self.fake_root, self.options.files)
self.assertEqual(svn.GetFileNames(), self.expected_files)
self.assertEqual(svn.checkout_root, self.fake_root)
self.assertEqual(svn.GenerateDiff(), 'A diff')
class GITUnittest(TryChangeTestsBase):
"""trychange.GIT tests."""
def testMembersChanged(self):
members = [
'AutomagicalSettings', 'CaptureStatus', 'GetCodeReviewSetting',
'ReadRootFile', 'GenerateDiff', 'GetFileNames', 'files', 'file_tuples',
]
# If this test fails, you should add the relevant test.
self.compareMembers(trychange.GIT, members)
def testBasic(self):
# pylint: disable=E1103
trychange.os.path.abspath(self.fake_root).AndReturn(self.fake_root)
trychange.scm.GIT.GetCheckoutRoot(self.fake_root).AndReturn(self.fake_root)
trychange.scm.GIT.GetUpstreamBranch(self.fake_root).AndReturn('somewhere')
trychange.RunGit(['diff-index', 'HEAD'])
trychange.scm.GIT.GenerateDiff(self.fake_root,
full_move=True,
files=['foo.txt', 'bar.txt'],
branch='somewhere').AndReturn('A diff')
trychange.scm.GIT.GetPatchName(self.fake_root).AndReturn('bleh-1233')
trychange.scm.GIT.GetEmail(self.fake_root).AndReturn('[email protected]')
self.mox.ReplayAll()
git = trychange.GIT(self.options, self.fake_root, self.options.files)
self.assertEqual(git.GetFileNames(), self.expected_files)
self.assertEqual(git.checkout_root, self.fake_root)
self.assertEqual(git.GenerateDiff(), 'A diff')
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 8,931,169,306,976,167,000 | 4,436,458,524,795,717,000 | 37.580247 | 79 | 0.66 | false |
yongshengwang/hue | build/env/lib/python2.7/site-packages/Django-1.6.10-py2.7.egg/django/contrib/syndication/views.py | 113 | 8515 | from __future__ import unicode_literals
from calendar import timegm
from django.conf import settings
from django.contrib.sites.models import get_current_site
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist
from django.http import HttpResponse, Http404
from django.template import loader, TemplateDoesNotExist, RequestContext
from django.utils import feedgenerator, tzinfo
from django.utils.encoding import force_text, iri_to_uri, smart_text
from django.utils.html import escape
from django.utils.http import http_date
from django.utils import six
from django.utils.timezone import is_naive
def add_domain(domain, url, secure=False):
protocol = 'https' if secure else 'http'
if url.startswith('//'):
# Support network-path reference (see #16753) - RSS requires a protocol
url = '%s:%s' % (protocol, url)
elif not (url.startswith('http://')
or url.startswith('https://')
or url.startswith('mailto:')):
url = iri_to_uri('%s://%s%s' % (protocol, domain, url))
return url
class FeedDoesNotExist(ObjectDoesNotExist):
pass
class Feed(object):
feed_type = feedgenerator.DefaultFeed
title_template = None
description_template = None
def __call__(self, request, *args, **kwargs):
try:
obj = self.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
raise Http404('Feed object does not exist.')
feedgen = self.get_feed(obj, request)
response = HttpResponse(content_type=feedgen.mime_type)
if hasattr(self, 'item_pubdate'):
# if item_pubdate is defined for the feed, set header so as
# ConditionalGetMiddleware is able to send 304 NOT MODIFIED
response['Last-Modified'] = http_date(
timegm(feedgen.latest_post_date().utctimetuple()))
feedgen.write(response, 'utf-8')
return response
def item_title(self, item):
# Titles should be double escaped by default (see #6533)
return escape(force_text(item))
def item_description(self, item):
return force_text(item)
def item_link(self, item):
try:
return item.get_absolute_url()
except AttributeError:
raise ImproperlyConfigured('Give your %s class a get_absolute_url() method, or define an item_link() method in your Feed class.' % item.__class__.__name__)
def __get_dynamic_attr(self, attname, obj, default=None):
try:
attr = getattr(self, attname)
except AttributeError:
return default
if callable(attr):
# Check co_argcount rather than try/excepting the function and
# catching the TypeError, because something inside the function
# may raise the TypeError. This technique is more accurate.
try:
code = six.get_function_code(attr)
except AttributeError:
code = six.get_function_code(attr.__call__)
if code.co_argcount == 2: # one argument is 'self'
return attr(obj)
else:
return attr()
return attr
def feed_extra_kwargs(self, obj):
"""
Returns an extra keyword arguments dictionary that is used when
initializing the feed generator.
"""
return {}
def item_extra_kwargs(self, item):
"""
Returns an extra keyword arguments dictionary that is used with
the `add_item` call of the feed generator.
"""
return {}
def get_object(self, request, *args, **kwargs):
return None
def get_context_data(self, **kwargs):
"""
Returns a dictionary to use as extra context if either
``self.description_template`` or ``self.item_template`` are used.
Default implementation preserves the old behavior
of using {'obj': item, 'site': current_site} as the context.
"""
return {'obj': kwargs.get('item'), 'site': kwargs.get('site')}
def get_feed(self, obj, request):
"""
Returns a feedgenerator.DefaultFeed object, fully populated, for
this feed. Raises FeedDoesNotExist for invalid parameters.
"""
current_site = get_current_site(request)
link = self.__get_dynamic_attr('link', obj)
link = add_domain(current_site.domain, link, request.is_secure())
feed = self.feed_type(
title = self.__get_dynamic_attr('title', obj),
subtitle = self.__get_dynamic_attr('subtitle', obj),
link = link,
description = self.__get_dynamic_attr('description', obj),
language = settings.LANGUAGE_CODE,
feed_url = add_domain(
current_site.domain,
self.__get_dynamic_attr('feed_url', obj) or request.path,
request.is_secure(),
),
author_name = self.__get_dynamic_attr('author_name', obj),
author_link = self.__get_dynamic_attr('author_link', obj),
author_email = self.__get_dynamic_attr('author_email', obj),
categories = self.__get_dynamic_attr('categories', obj),
feed_copyright = self.__get_dynamic_attr('feed_copyright', obj),
feed_guid = self.__get_dynamic_attr('feed_guid', obj),
ttl = self.__get_dynamic_attr('ttl', obj),
**self.feed_extra_kwargs(obj)
)
title_tmp = None
if self.title_template is not None:
try:
title_tmp = loader.get_template(self.title_template)
except TemplateDoesNotExist:
pass
description_tmp = None
if self.description_template is not None:
try:
description_tmp = loader.get_template(self.description_template)
except TemplateDoesNotExist:
pass
for item in self.__get_dynamic_attr('items', obj):
context = self.get_context_data(item=item, site=current_site,
obj=obj, request=request)
if title_tmp is not None:
title = title_tmp.render(RequestContext(request, context))
else:
title = self.__get_dynamic_attr('item_title', item)
if description_tmp is not None:
description = description_tmp.render(RequestContext(request, context))
else:
description = self.__get_dynamic_attr('item_description', item)
link = add_domain(
current_site.domain,
self.__get_dynamic_attr('item_link', item),
request.is_secure(),
)
enc = None
enc_url = self.__get_dynamic_attr('item_enclosure_url', item)
if enc_url:
enc = feedgenerator.Enclosure(
url = smart_text(enc_url),
length = smart_text(self.__get_dynamic_attr('item_enclosure_length', item)),
mime_type = smart_text(self.__get_dynamic_attr('item_enclosure_mime_type', item))
)
author_name = self.__get_dynamic_attr('item_author_name', item)
if author_name is not None:
author_email = self.__get_dynamic_attr('item_author_email', item)
author_link = self.__get_dynamic_attr('item_author_link', item)
else:
author_email = author_link = None
pubdate = self.__get_dynamic_attr('item_pubdate', item)
if pubdate and is_naive(pubdate):
ltz = tzinfo.LocalTimezone(pubdate)
pubdate = pubdate.replace(tzinfo=ltz)
feed.add_item(
title = title,
link = link,
description = description,
unique_id = self.__get_dynamic_attr('item_guid', item, link),
unique_id_is_permalink = self.__get_dynamic_attr(
'item_guid_is_permalink', item),
enclosure = enc,
pubdate = pubdate,
author_name = author_name,
author_email = author_email,
author_link = author_link,
categories = self.__get_dynamic_attr('item_categories', item),
item_copyright = self.__get_dynamic_attr('item_copyright', item),
**self.item_extra_kwargs(item)
)
return feed
| apache-2.0 | -51,741,703,911,563,500 | 1,885,517,174,655,238,700 | 39.547619 | 167 | 0.579213 | false |
andyfaff/scipy | scipy/sparse/csgraph/tests/test_shortest_path.py | 17 | 12026 | import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from pytest import raises as assert_raises
from scipy.sparse.csgraph import (shortest_path, dijkstra, johnson,
bellman_ford, construct_dist_matrix,
NegativeCycleError)
import scipy.sparse
import pytest
directed_G = np.array([[0, 3, 3, 0, 0],
[0, 0, 0, 2, 4],
[0, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[2, 0, 0, 2, 0]], dtype=float)
undirected_G = np.array([[0, 3, 3, 1, 2],
[3, 0, 0, 2, 4],
[3, 0, 0, 0, 0],
[1, 2, 0, 0, 2],
[2, 4, 0, 2, 0]], dtype=float)
unweighted_G = (directed_G > 0).astype(float)
directed_SP = [[0, 3, 3, 5, 7],
[3, 0, 6, 2, 4],
[np.inf, np.inf, 0, np.inf, np.inf],
[1, 4, 4, 0, 8],
[2, 5, 5, 2, 0]]
directed_sparse_zero_G = scipy.sparse.csr_matrix(([0, 1, 2, 3, 1],
([0, 1, 2, 3, 4],
[1, 2, 0, 4, 3])),
shape = (5, 5))
directed_sparse_zero_SP = [[0, 0, 1, np.inf, np.inf],
[3, 0, 1, np.inf, np.inf],
[2, 2, 0, np.inf, np.inf],
[np.inf, np.inf, np.inf, 0, 3],
[np.inf, np.inf, np.inf, 1, 0]]
undirected_sparse_zero_G = scipy.sparse.csr_matrix(([0, 0, 1, 1, 2, 2, 1, 1],
([0, 1, 1, 2, 2, 0, 3, 4],
[1, 0, 2, 1, 0, 2, 4, 3])),
shape = (5, 5))
undirected_sparse_zero_SP = [[0, 0, 1, np.inf, np.inf],
[0, 0, 1, np.inf, np.inf],
[1, 1, 0, np.inf, np.inf],
[np.inf, np.inf, np.inf, 0, 1],
[np.inf, np.inf, np.inf, 1, 0]]
directed_pred = np.array([[-9999, 0, 0, 1, 1],
[3, -9999, 0, 1, 1],
[-9999, -9999, -9999, -9999, -9999],
[3, 0, 0, -9999, 1],
[4, 0, 0, 4, -9999]], dtype=float)
undirected_SP = np.array([[0, 3, 3, 1, 2],
[3, 0, 6, 2, 4],
[3, 6, 0, 4, 5],
[1, 2, 4, 0, 2],
[2, 4, 5, 2, 0]], dtype=float)
undirected_SP_limit_2 = np.array([[0, np.inf, np.inf, 1, 2],
[np.inf, 0, np.inf, 2, np.inf],
[np.inf, np.inf, 0, np.inf, np.inf],
[1, 2, np.inf, 0, 2],
[2, np.inf, np.inf, 2, 0]], dtype=float)
undirected_SP_limit_0 = np.ones((5, 5), dtype=float) - np.eye(5)
undirected_SP_limit_0[undirected_SP_limit_0 > 0] = np.inf
undirected_pred = np.array([[-9999, 0, 0, 0, 0],
[1, -9999, 0, 1, 1],
[2, 0, -9999, 0, 0],
[3, 3, 0, -9999, 3],
[4, 4, 0, 4, -9999]], dtype=float)
methods = ['auto', 'FW', 'D', 'BF', 'J']
def test_dijkstra_limit():
limits = [0, 2, np.inf]
results = [undirected_SP_limit_0,
undirected_SP_limit_2,
undirected_SP]
def check(limit, result):
SP = dijkstra(undirected_G, directed=False, limit=limit)
assert_array_almost_equal(SP, result)
for limit, result in zip(limits, results):
check(limit, result)
def test_directed():
def check(method):
SP = shortest_path(directed_G, method=method, directed=True,
overwrite=False)
assert_array_almost_equal(SP, directed_SP)
for method in methods:
check(method)
def test_undirected():
def check(method, directed_in):
if directed_in:
SP1 = shortest_path(directed_G, method=method, directed=False,
overwrite=False)
assert_array_almost_equal(SP1, undirected_SP)
else:
SP2 = shortest_path(undirected_G, method=method, directed=True,
overwrite=False)
assert_array_almost_equal(SP2, undirected_SP)
for method in methods:
for directed_in in (True, False):
check(method, directed_in)
def test_directed_sparse_zero():
# test directed sparse graph with zero-weight edge and two connected components
def check(method):
SP = shortest_path(directed_sparse_zero_G, method=method, directed=True,
overwrite=False)
assert_array_almost_equal(SP, directed_sparse_zero_SP)
for method in methods:
check(method)
def test_undirected_sparse_zero():
def check(method, directed_in):
if directed_in:
SP1 = shortest_path(directed_sparse_zero_G, method=method, directed=False,
overwrite=False)
assert_array_almost_equal(SP1, undirected_sparse_zero_SP)
else:
SP2 = shortest_path(undirected_sparse_zero_G, method=method, directed=True,
overwrite=False)
assert_array_almost_equal(SP2, undirected_sparse_zero_SP)
for method in methods:
for directed_in in (True, False):
check(method, directed_in)
@pytest.mark.parametrize('directed, SP_ans',
((True, directed_SP),
(False, undirected_SP)))
@pytest.mark.parametrize('indices', ([0, 2, 4], [0, 4], [3, 4], [0, 0]))
def test_dijkstra_indices_min_only(directed, SP_ans, indices):
SP_ans = np.array(SP_ans)
indices = np.array(indices, dtype=np.int64)
min_ind_ans = indices[np.argmin(SP_ans[indices, :], axis=0)]
min_d_ans = np.zeros(SP_ans.shape[0], SP_ans.dtype)
for k in range(SP_ans.shape[0]):
min_d_ans[k] = SP_ans[min_ind_ans[k], k]
min_ind_ans[np.isinf(min_d_ans)] = -9999
SP, pred, sources = dijkstra(directed_G,
directed=directed,
indices=indices,
min_only=True,
return_predecessors=True)
assert_array_almost_equal(SP, min_d_ans)
assert_array_equal(min_ind_ans, sources)
SP = dijkstra(directed_G,
directed=directed,
indices=indices,
min_only=True,
return_predecessors=False)
assert_array_almost_equal(SP, min_d_ans)
@pytest.mark.parametrize('n', (10, 100, 1000))
def test_shortest_path_min_only_random(n):
np.random.seed(1234)
data = scipy.sparse.rand(n, n, density=0.5, format='lil',
random_state=42, dtype=np.float64)
data.setdiag(np.zeros(n, dtype=np.bool_))
# choose some random vertices
v = np.arange(n)
np.random.shuffle(v)
indices = v[:int(n*.1)]
ds, pred, sources = dijkstra(data,
directed=False,
indices=indices,
min_only=True,
return_predecessors=True)
for k in range(n):
p = pred[k]
s = sources[k]
while(p != -9999):
assert(sources[p] == s)
p = pred[p]
def test_shortest_path_indices():
indices = np.arange(4)
def check(func, indshape):
outshape = indshape + (5,)
SP = func(directed_G, directed=False,
indices=indices.reshape(indshape))
assert_array_almost_equal(SP, undirected_SP[indices].reshape(outshape))
for indshape in [(4,), (4, 1), (2, 2)]:
for func in (dijkstra, bellman_ford, johnson, shortest_path):
check(func, indshape)
assert_raises(ValueError, shortest_path, directed_G, method='FW',
indices=indices)
def test_predecessors():
SP_res = {True: directed_SP,
False: undirected_SP}
pred_res = {True: directed_pred,
False: undirected_pred}
def check(method, directed):
SP, pred = shortest_path(directed_G, method, directed=directed,
overwrite=False,
return_predecessors=True)
assert_array_almost_equal(SP, SP_res[directed])
assert_array_almost_equal(pred, pred_res[directed])
for method in methods:
for directed in (True, False):
check(method, directed)
def test_construct_shortest_path():
def check(method, directed):
SP1, pred = shortest_path(directed_G,
directed=directed,
overwrite=False,
return_predecessors=True)
SP2 = construct_dist_matrix(directed_G, pred, directed=directed)
assert_array_almost_equal(SP1, SP2)
for method in methods:
for directed in (True, False):
check(method, directed)
def test_unweighted_path():
def check(method, directed):
SP1 = shortest_path(directed_G,
directed=directed,
overwrite=False,
unweighted=True)
SP2 = shortest_path(unweighted_G,
directed=directed,
overwrite=False,
unweighted=False)
assert_array_almost_equal(SP1, SP2)
for method in methods:
for directed in (True, False):
check(method, directed)
def test_negative_cycles():
# create a small graph with a negative cycle
graph = np.ones([5, 5])
graph.flat[::6] = 0
graph[1, 2] = -2
def check(method, directed):
assert_raises(NegativeCycleError, shortest_path, graph, method,
directed)
for method in ['FW', 'J', 'BF']:
for directed in (True, False):
check(method, directed)
def test_masked_input():
np.ma.masked_equal(directed_G, 0)
def check(method):
SP = shortest_path(directed_G, method=method, directed=True,
overwrite=False)
assert_array_almost_equal(SP, directed_SP)
for method in methods:
check(method)
def test_overwrite():
G = np.array([[0, 3, 3, 1, 2],
[3, 0, 0, 2, 4],
[3, 0, 0, 0, 0],
[1, 2, 0, 0, 2],
[2, 4, 0, 2, 0]], dtype=float)
foo = G.copy()
shortest_path(foo, overwrite=False)
assert_array_equal(foo, G)
@pytest.mark.parametrize('method', methods)
def test_buffer(method):
# Smoke test that sparse matrices with read-only buffers (e.g., those from
# joblib workers) do not cause::
#
# ValueError: buffer source array is read-only
#
G = scipy.sparse.csr_matrix([[1.]])
G.data.flags['WRITEABLE'] = False
shortest_path(G, method=method)
def test_NaN_warnings():
with pytest.warns(None) as record:
shortest_path(np.array([[0, 1], [np.nan, 0]]))
for r in record:
assert r.category is not RuntimeWarning
def test_sparse_matrices():
# Test that using lil,csr and csc sparse matrix do not cause error
G_dense = np.array([[0, 3, 0, 0, 0],
[0, 0, -1, 0, 0],
[0, 0, 0, 2, 0],
[0, 0, 0, 0, 4],
[0, 0, 0, 0, 0]], dtype=float)
SP = shortest_path(G_dense)
G_csr = scipy.sparse.csr_matrix(G_dense)
G_csc = scipy.sparse.csc_matrix(G_dense)
G_lil = scipy.sparse.lil_matrix(G_dense)
assert_array_almost_equal(SP, shortest_path(G_csr))
assert_array_almost_equal(SP, shortest_path(G_csc))
assert_array_almost_equal(SP, shortest_path(G_lil))
| bsd-3-clause | -3,998,308,802,815,234,000 | -617,159,645,677,077,900 | 35.005988 | 87 | 0.494346 | false |
pieterlexis/pdns | build-scripts/cherry-pick-pr.py | 4 | 1840 | #!/usr/bin/env python3
import requests
import sys
import subprocess
import argparse
def get_commits(pr):
try:
res = requests.get('https://api.github.com/repos/PowerDNS/pdns/pulls/'
'{}/commits'.format(pr)).json()
return [c['sha'] for c in res]
except (ValueError, requests.exceptions.HTTPError) as e:
print(e)
sys.exit(1)
def run_command(cmd):
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError as e:
print(e)
sys.exit(1)
a = argparse.ArgumentParser()
action = a.add_mutually_exclusive_group(required=True)
action.add_argument(
'-b', '--backport-unto', metavar='REF', nargs=1, help='Backport, using '
'cherry-pick, all commits from PULL_REQUEST onto REF. This is done on a '
'branch called "backport-PULL_REQUEST". When the cherry-pick fails, solve '
'the conflict as usual and run "git cherry-pick --continue --allow-empty"')
action.add_argument(
'-m', '--merge-into', metavar='REF', nargs=1, help='Take the backport-'
'PULL_REQUEST branch and merge it into REF')
a.add_argument(
'pull_request', metavar='PULL_REQUEST', type=int,
help='The PR number to backport')
args = a.parse_args()
if args.backport_unto:
command = ['git', 'checkout', '-b',
'backport-{}'.format(args.pull_request), args.backport_unto[0]]
run_command(command)
commits = get_commits(args.pull_request)
command = ['git', 'cherry-pick', '-x', '--allow-empty'] + commits
run_command(command)
if args.merge_into:
command = ['git', 'checkout', args.merge_into[0]]
run_command(command)
command = ['git', 'merge', '--no-ff',
'backport-{}'.format(args.pull_request), '-m',
'Backport #{}'.format(args.pull_request)]
run_command(command)
| gpl-2.0 | 8,114,416,791,546,906,000 | 5,056,247,851,405,247,000 | 30.186441 | 79 | 0.628804 | false |
schwehr/gdal-autotest2 | python/ogr/georss_test.py | 1 | 15293 | # MOE:insert #!/usr/bin/env python
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is a complete rewrite of a file licensed as follows:
#
# Copyright (c) 2008-2013, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""Test OGR handling of GeoRSS files.
This is a rewrite of:
https://trac.osgeo.org/gdal/browser/trunk/autotest/ogr/ogr_georss.py
"""
import json
import os
import sys
import unittest
import google3
from osgeo import ogr
from osgeo import osr
from osgeo import gdal
from autotest2.gcore import gcore_util
from autotest2.ogr import ogr_util
DRIVER = ogr_util.GEORSS_DRIVER
EXT = '.xml'
DEFAULT_LAYER_NAME = 'OGRGeoRSS'
# Values used in some of the atom tests.
ATOM_FIELD_VALUES = [
('title', 'Atom draft-07 snapshot',
ogr.OFTString), ('link_rel', 'alternate',
ogr.OFTString), ('link_type', 'text/html', ogr.OFTString),
('link_href', 'http://example.org/2005/04/02/atom',
ogr.OFTString), ('link2_rel', 'enclosure',
ogr.OFTString), ('link2_type', 'audio/mpeg',
ogr.OFTString), ('link2_length', '1337',
ogr.OFTInteger),
('link2_href', 'http://example.org/audio/ph34r_my_podcast.mp3',
ogr.OFTString), ('id', 'tag:example.org,2003:3.2397',
ogr.OFTString), ('updated', '2005/07/31 12:29:29+00',
ogr.OFTDateTime),
('published', '2003/12/13 08:29:29-04',
ogr.OFTDateTime), ('author_name', 'Mark Pilgrim',
ogr.OFTString), ('author_uri', 'http://example.org/',
ogr.OFTString),
('author_email', '[email protected]',
ogr.OFTString), ('contributor_name', 'Sam Ruby',
ogr.OFTString), ('contributor2_name', 'Joe Gregorio',
ogr.OFTString), ('content_type', 'xhtml',
ogr.OFTString),
('content_xml_lang', 'en',
ogr.OFTString), ('content_xml_base', 'http://diveintomark.org/',
ogr.OFTString)
]
def setUpModule():
ogr_util.SetupTestEnv()
def CreateField(layer, name, field_type=ogr.OFTString):
field_definition = ogr.FieldDefn(name, field_type)
layer.CreateField(field_definition)
field_definition.Destroy()
@ogr_util.SkipIfDriverMissing(DRIVER)
class OgrGeoRSSTest(ogr_util.DriverTestCase):
def setUp(self):
super(OgrGeoRSSTest, self).setUp(DRIVER, EXT)
# Helper for GeoRSS tests. Used by GeoRss1x.
def ogrGeoRssTestAtom(self, ogr_filepath):
ds = self.CheckOpen(ogr_filepath)
lyr = ds.GetLayerByIndex(0)
self.assertIsNone(lyr.GetSpatialRef())
feat = lyr.GetNextFeature()
for field_value in ATOM_FIELD_VALUES:
self.assertEquals(feat.GetFieldAsString(field_value[0]), field_value[1])
self.assertIn('<div xmlns="http://www.w3.org/1999/xhtml">',
feat.GetFieldAsString('content'))
# Helper for GeoRSS tests. Used by GeoRss2~9.
def ogrGeoRssTest(self, ogr_filepath, only_first_feature):
ds = self.CheckOpen(ogr_filepath)
lyr = ds.GetLayerByIndex(0)
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS('WGS84')
self.assertIsNotNone(lyr.GetSpatialRef())
self.assertTrue(lyr.GetSpatialRef().IsSame(srs))
self.assertNotIn('AXIS["Latitude",NORTH],AXIS["Longitude",EAST]',
lyr.GetSpatialRef().ExportToWkt())
feat = lyr.GetNextFeature()
expected_wkt = 'POINT (2 49)'
self.assertEquals(feat.GetGeometryRef().ExportToWkt(), expected_wkt)
self.assertEquals(feat.GetFieldAsString('title'), 'A point')
self.assertEquals(feat.GetFieldAsString('author'), 'Author')
self.assertEquals(feat.GetFieldAsString('link'), 'http://gdal.org')
self.assertEquals(
feat.GetFieldAsString('pubDate'), '2008/12/07 20:13:00+02')
self.assertEquals(feat.GetFieldAsString('category'), 'First category')
self.assertEquals(feat.GetFieldAsString('category_domain'), 'first_domain')
self.assertEquals(feat.GetFieldAsString('category2'), 'Second category')
self.assertEquals(
feat.GetFieldAsString('category2_domain'), 'second_domain')
feat = lyr.GetNextFeature()
expected_wkt = 'LINESTRING (2 48,2.1 48.1,2.2 48.0)'
if only_first_feature is False:
self.assertEquals(feat.GetGeometryRef().ExportToWkt(), expected_wkt)
self.assertEquals(feat.GetFieldAsString('title'), 'A line')
feat = lyr.GetNextFeature()
expected_wkt = 'POLYGON ((2 50,2.1 50.1,2.2 48.1,2.1 46.1,2 50))'
if only_first_feature is False:
self.assertEquals(feat.GetGeometryRef().ExportToWkt(), expected_wkt)
self.assertEquals(feat.GetFieldAsString('title'), 'A polygon')
feat = lyr.GetNextFeature()
expected_wkt = 'POLYGON ((2 49,2.0 49.5,2.2 49.5,2.2 49.0,2 49))'
if only_first_feature is False:
self.assertEquals(feat.GetGeometryRef().ExportToWkt(), expected_wkt)
self.assertEquals(feat.GetFieldAsString('title'), 'A box')
# Creates a RSS 2.0 document
def ogrGeoRssCreate(self, ogr_filepath, options):
ds = self.driver.CreateDataSource(ogr_filepath, options=options)
lyr = ds.CreateLayer('georss')
lyr.CreateField(ogr.FieldDefn('title', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('author', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('link', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('pubDate', ogr.OFTDateTime))
lyr.CreateField(ogr.FieldDefn('description', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('category', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('category_domain', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('category2', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('category2_domain', ogr.OFTString))
dst_feat = ogr.Feature(feature_def=lyr.GetLayerDefn())
dst_feat.SetField('title', 'A point')
dst_feat.SetField('author', 'Author')
dst_feat.SetField('link', 'http://gdal.org')
dst_feat.SetField('pubDate', '2008/12/07 20:13:00+02')
dst_feat.SetField('category', 'First category')
dst_feat.SetField('category_domain', 'first_domain')
dst_feat.SetField('category2', 'Second category')
dst_feat.SetField('category2_domain', 'second_domain')
dst_feat.SetGeometry(ogr.CreateGeometryFromWkt('POINT (2 49)'))
self.assertEqual(lyr.CreateFeature(dst_feat), 0)
dst_feat = ogr.Feature(feature_def=lyr.GetLayerDefn())
dst_feat.SetField('title', 'A line')
dst_feat.SetField('author', 'Author')
dst_feat.SetField('link', 'http://gdal.org')
dst_feat.SetField('pubDate', '2008/12/07 20:13:00+02')
dst_feat.SetGeometry(
ogr.CreateGeometryFromWkt('LINESTRING (2 48,2.1 48.1,2.2 48.0)'))
self.assertEqual(lyr.CreateFeature(dst_feat), 0)
dst_feat = ogr.Feature(feature_def=lyr.GetLayerDefn())
dst_feat.SetField('title', 'A polygon')
dst_feat.SetField('author', 'Author')
dst_feat.SetField('link', 'http://gdal.org')
dst_feat.SetField('pubDate', '2008/12/07 20:13:00+02')
dst_feat.SetGeometry(
ogr.CreateGeometryFromWkt(
'POLYGON ((2 50,2.1 50.1,2.2 48.1,2.1 46.1,2 50))'))
self.assertEqual(lyr.CreateFeature(dst_feat), 0)
dst_feat = ogr.Feature(feature_def=lyr.GetLayerDefn())
dst_feat.SetField('title', 'A box')
dst_feat.SetField('author', 'Author')
dst_feat.SetField('link', 'http://gdal.org')
dst_feat.SetField('pubDate', '2008/12/07 20:13:00+02')
dst_feat.SetGeometry(
ogr.CreateGeometryFromWkt(
'POLYGON ((2 49,2.0 49.5,2.2 49.5,2.2 49.0,2 49))'))
self.assertEqual(lyr.CreateFeature(dst_feat), 0)
ds = None
def testOgrGeorss1(self):
filepath = ogr_util.GetTestFilePath('georss/atom_rfc_sample.xml')
self.ogrGeoRssTestAtom(filepath)
def testOgrGeorss1AtomNs(self):
filepath = ogr_util.GetTestFilePath('georss/atom_rfc_sample_atom_ns.xml')
self.ogrGeoRssTestAtom(filepath)
def testOgrGeorss1bis(self):
filepath = ogr_util.GetTestFilePath('/vsimem/test_atom.xml')
ds = self.driver.CreateDataSource(filepath, options=['FORMAT=ATOM'])
lyr = ds.CreateLayer('georss')
for field_value in ATOM_FIELD_VALUES:
lyr.CreateField(ogr.FieldDefn(field_value[0], field_value[2]))
lyr.CreateField(ogr.FieldDefn('content', ogr.OFTString))
dst_feat = ogr.Feature(feature_def=lyr.GetLayerDefn())
for field_value in ATOM_FIELD_VALUES:
dst_feat.SetField(field_value[0], field_value[1])
dst_feat.SetField(
'content', '<div xmlns="http://www.w3.org/1999/xhtml">'
'<p><i>[Update: The Atom draft is finished.]</i></p></div>')
self.assertEqual(lyr.CreateFeature(dst_feat), 0)
def testOgrGeorss1ter(self):
filepath = ogr_util.GetTestFilePath('/vsimem/test_atom.xml')
self.ogrGeoRssTestAtom(filepath)
# Test reading a RSS 2.0 document with GeoRSS simple geometries
def testOgrGeorss2(self):
filepath = ogr_util.GetTestFilePath('georss/test_georss_simple.xml')
self.ogrGeoRssTest(filepath, False)
# Test reading a RSS 2.0 document with GeoRSS GML geometries
def testOgrGeorss3(self):
filepath = ogr_util.GetTestFilePath('georss/test_georss_gml.xml')
self.ogrGeoRssTest(filepath, False)
# Test writing a RSS 2.0 document in Simple dialect
# (doesn't need read support)
def testOgrGeorss4and5(self):
filepath = ogr_util.GetTestFilePath('/vsimem/ogr_georss_4.xml')
with gcore_util.GdalUnlinkWhenDone(filepath):
self.ogrGeoRssCreate(filepath, [])
src = self.CheckOpen(filepath)
lyr = src.GetLayerByName('georss')
self.assertIsNotNone(lyr)
# Portion that was in 5.
self.ogrGeoRssTest(filepath, False)
# Test writing a RSS 2.0 document in GML dialect
# (doesn't need read support)
def testOgrGeorss6and7(self):
filepath = ogr_util.GetTestFilePath('/vsimem/ogr_georss_6.xml')
with gcore_util.GdalUnlinkWhenDone(filepath):
self.ogrGeoRssCreate(filepath, ['GEOM_DIALECT=GML'])
src = self.CheckOpen(filepath)
lyr = src.GetLayerByName('georss')
self.assertIsNotNone(lyr)
# Portion that was in 7.
self.ogrGeoRssTest(filepath, False)
# Test writing a RSS 2.0 document in W3C Geo dialect
# (doesn't need read support)
def testOgrGeorss8and9(self):
filepath = ogr_util.GetTestFilePath('/vsimem/ogr_georss_8.xml')
with gcore_util.GdalUnlinkWhenDone(filepath):
self.ogrGeoRssCreate(filepath, ['GEOM_DIALECT=W3C_GEO'])
src = self.CheckOpen(filepath)
lyr = src.GetLayerByName('georss')
self.assertIsNotNone(lyr)
# Portion that was in 9.
self.ogrGeoRssTest(filepath, True)
# Test writing a RSS 2.0 document in GML dialect with EPSG:32631
def testOgrGeorss10and11(self):
filepath = ogr_util.GetTestFilePath('/vsimem/test32631.rss')
with gcore_util.GdalUnlinkWhenDone(filepath):
srs = osr.SpatialReference()
srs.ImportFromEPSG(32631)
ds = self.driver.CreateDataSource(filepath)
with gcore_util.GdalUnlinkWhenDone(filepath):
with gcore_util.ErrorHandler('CPLQuietErrorHandler'):
lyr = ds.CreateLayer('georss', srs=srs)
self.assertIsNone(lyr)
ds = self.driver.CreateDataSource(filepath, options=['GEOM_DIALECT=GML'])
lyr = ds.CreateLayer('georss', srs=srs)
dst_feat = ogr.Feature(feature_def=lyr.GetLayerDefn())
dst_feat.SetGeometry(ogr.CreateGeometryFromWkt('POINT (500000 4000000)'))
self.assertEqual(lyr.CreateFeature(dst_feat), 0)
# Close the files and force a flush to the filesystem.
lyr = None
ds = None
src = self.CheckOpen(filepath)
lyr = src.GetLayerByName('georss')
self.assertIsNotNone(lyr)
# Portion that was in 11.
ds = self.CheckOpen(filepath)
lyr = ds.GetLayer(0)
srs = osr.SpatialReference()
srs.ImportFromEPSG(32631)
self.assertIsNotNone(lyr.GetSpatialRef())
self.assertTrue(lyr.GetSpatialRef().IsSame(srs))
self.assertIn('AXIS["Latitude",NORTH],AXIS["Longitude",EAST]',
lyr.GetSpatialRef().ExportToWkt())
feat = lyr.GetNextFeature()
expected_wkt = 'POINT (500000 4000000)'
self.assertEqual(feat.GetGeometryRef().ExportToWkt(), expected_wkt)
# TODO(b/71817518): ogr_georss_12
def testOgrGeorss13and14(self):
filepath = ogr_util.GetTestFilePath('/vsimem/test32631.rss')
with gcore_util.GdalUnlinkWhenDone(filepath):
ds = self.driver.CreateDataSource(
filepath, options=['USE_EXTENSIONS=YES'])
lyr = ds.CreateLayer('georss')
lyr.CreateField(ogr.FieldDefn('myns_field', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('field2', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('ogr_field3', ogr.OFTString))
dst_feat = ogr.Feature(feature_def=lyr.GetLayerDefn())
dst_feat.SetField('myns_field', 'val')
dst_feat.SetField('field2', 'val2')
dst_feat.SetField('ogr_field3', 'val3')
self.assertEqual(lyr.CreateFeature(dst_feat), 0)
ds = None
src = self.CheckOpen(filepath)
lyr = src.GetLayerByName('georss')
self.assertIsNotNone(lyr)
# Portion that was in 14.
ds = self.CheckOpen(filepath)
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
self.assertEquals(feat.GetFieldAsString('myns_field'), 'val')
self.assertEquals(feat.GetFieldAsString('ogr_field2'), 'val2')
self.assertEquals(feat.GetFieldAsString('ogr_field3'), 'val3')
# ogr_georss_15 redundant as all temp files were tested with in memory file.
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -4,637,688,136,443,576,000 | -632,995,407,081,462,300 | 37.716456 | 81 | 0.678153 | false |
marc-sensenich/ansible | lib/ansible/module_utils/network/ftd/common.py | 22 | 6027 | # Copyright (c) 2018 Cisco and/or its affiliates.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import re
from ansible.module_utils._text import to_text
from ansible.module_utils.common.collections import is_string
INVALID_IDENTIFIER_SYMBOLS = r'[^a-zA-Z0-9_]'
IDENTITY_PROPERTIES = ['id', 'version', 'ruleId']
NON_COMPARABLE_PROPERTIES = IDENTITY_PROPERTIES + ['isSystemDefined', 'links']
class HTTPMethod:
GET = 'get'
POST = 'post'
PUT = 'put'
DELETE = 'delete'
class ResponseParams:
SUCCESS = 'success'
STATUS_CODE = 'status_code'
RESPONSE = 'response'
class FtdConfigurationError(Exception):
def __init__(self, msg, obj=None):
super(FtdConfigurationError, self).__init__(msg)
self.msg = msg
self.obj = obj
class FtdServerError(Exception):
def __init__(self, response, code):
super(FtdServerError, self).__init__(response)
self.response = response
self.code = code
class FtdUnexpectedResponse(Exception):
"""The exception to be raised in case of unexpected responses from 3d parties."""
pass
def construct_ansible_facts(response, params):
facts = dict()
if response:
response_body = response['items'] if 'items' in response else response
if params.get('register_as'):
facts[params['register_as']] = response_body
elif 'name' in response_body and 'type' in response_body:
object_name = re.sub(INVALID_IDENTIFIER_SYMBOLS, '_', response_body['name'].lower())
fact_name = '%s_%s' % (response_body['type'], object_name)
facts[fact_name] = response_body
return facts
def copy_identity_properties(source_obj, dest_obj):
for property_name in IDENTITY_PROPERTIES:
if property_name in source_obj:
dest_obj[property_name] = source_obj[property_name]
return dest_obj
def is_object_ref(d):
"""
Checks if a dictionary is a reference object. The dictionary is considered to be a
reference object when it contains non-empty 'id' and 'type' fields.
:type d: dict
:return: True if passed dictionary is a reference object, otherwise False
"""
has_id = 'id' in d.keys() and d['id']
has_type = 'type' in d.keys() and d['type']
return has_id and has_type
def equal_object_refs(d1, d2):
"""
Checks whether two references point to the same object.
:type d1: dict
:type d2: dict
:return: True if passed references point to the same object, otherwise False
"""
have_equal_ids = d1['id'] == d2['id']
have_equal_types = d1['type'] == d2['type']
return have_equal_ids and have_equal_types
def equal_lists(l1, l2):
"""
Checks whether two lists are equal. The order of elements in the arrays is important.
:type l1: list
:type l2: list
:return: True if passed lists, their elements and order of elements are equal. Otherwise, returns False.
"""
if len(l1) != len(l2):
return False
for v1, v2 in zip(l1, l2):
if not equal_values(v1, v2):
return False
return True
def equal_dicts(d1, d2, compare_by_reference=True):
"""
Checks whether two dictionaries are equal. If `compare_by_reference` is set to True, dictionaries referencing
objects are compared using `equal_object_refs` method. Otherwise, every key and value is checked.
:type d1: dict
:type d2: dict
:param compare_by_reference: if True, dictionaries referencing objects are compared using `equal_object_refs` method
:return: True if passed dicts are equal. Otherwise, returns False.
"""
if compare_by_reference and is_object_ref(d1) and is_object_ref(d2):
return equal_object_refs(d1, d2)
if len(d1) != len(d2):
return False
for key, v1 in d1.items():
if key not in d2:
return False
v2 = d2[key]
if not equal_values(v1, v2):
return False
return True
def equal_values(v1, v2):
"""
Checks whether types and content of two values are the same. In case of complex objects, the method might be
called recursively.
:param v1: first value
:param v2: second value
:return: True if types and content of passed values are equal. Otherwise, returns False.
:rtype: bool
"""
# string-like values might have same text but different types, so checking them separately
if is_string(v1) and is_string(v2):
return to_text(v1) == to_text(v2)
if type(v1) != type(v2):
return False
value_type = type(v1)
if value_type == list:
return equal_lists(v1, v2)
elif value_type == dict:
return equal_dicts(v1, v2)
else:
return v1 == v2
def equal_objects(d1, d2):
"""
Checks whether two objects are equal. Ignores special object properties (e.g. 'id', 'version') and
properties with None and empty values. In case properties contains a reference to the other object,
only object identities (ids and types) are checked.
:type d1: dict
:type d2: dict
:return: True if passed objects and their properties are equal. Otherwise, returns False.
"""
d1 = dict((k, d1[k]) for k in d1.keys() if k not in NON_COMPARABLE_PROPERTIES and d1[k])
d2 = dict((k, d2[k]) for k in d2.keys() if k not in NON_COMPARABLE_PROPERTIES and d2[k])
return equal_dicts(d1, d2, compare_by_reference=False)
| gpl-3.0 | 7,718,774,907,056,608,000 | -1,091,534,918,333,002,500 | 30.227979 | 120 | 0.661357 | false |
wisechengyi/pants | src/python/pants/util/collections.py | 1 | 3201 | # Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import collections
import collections.abc
from typing import Any, Callable, DefaultDict, Iterable, List, MutableMapping, Type, TypeVar, Union
_K = TypeVar("_K")
_V = TypeVar("_V")
def factory_dict(value_factory: Callable[[_K], _V], *args, **kwargs) -> DefaultDict:
"""A dict whose values are computed by `value_factory` when a `__getitem__` key is missing.
Note that values retrieved by any other method will not be lazily computed; eg: via `get`.
:param value_factory:
:param *args: Any positional args to pass through to `dict`.
:param **kwrags: Any kwargs to pass through to `dict`.
"""
class FactoryDict(collections.defaultdict):
@staticmethod
def __never_called():
raise AssertionError(
"The default factory should never be called since we override " "__missing__."
)
def __init__(self):
super().__init__(self.__never_called, *args, **kwargs)
def __missing__(self, key):
value = value_factory(key)
self[key] = value
return value
return FactoryDict()
def recursively_update(d: MutableMapping, d2: MutableMapping) -> None:
"""dict.update but which merges child dicts (dict2 takes precedence where there's conflict)."""
for k, v in d2.items():
if k in d:
if isinstance(v, dict):
recursively_update(d[k], v)
continue
d[k] = v
_T = TypeVar("_T")
def assert_single_element(iterable: Iterable[_T]) -> _T:
"""Get the single element of `iterable`, or raise an error.
:raise: :class:`StopIteration` if there is no element.
:raise: :class:`ValueError` if there is more than one element.
"""
it = iter(iterable)
first_item = next(it)
try:
next(it)
except StopIteration:
return first_item
raise ValueError(f"iterable {iterable!r} has more than one element.")
def ensure_list(val: Union[Any, Iterable[Any]], *, expected_type: Type[_T]) -> List[_T]:
"""Given either a single value or an iterable of values, always return a list.
This performs runtime type checking to ensure that every element of the list is the expected
type.
"""
if isinstance(val, expected_type):
return [val]
if not isinstance(val, collections.abc.Iterable):
raise ValueError(
f"The value {val} (type {type(val)}) did not have the expected type {expected_type} "
"nor was it an iterable."
)
result: List[_T] = []
for i, x in enumerate(val):
if not isinstance(x, expected_type):
raise ValueError(
f"Not all elements of the iterable have type {expected_type}. Encountered the "
f"element {x} of type {type(x)} at index {i}."
)
result.append(x)
return result
def ensure_str_list(val: Union[str, Iterable[str]]) -> List[str]:
"""Given either a single string or an iterable of strings, always return a list."""
return ensure_list(val, expected_type=str)
| apache-2.0 | -6,391,764,361,599,115,000 | -718,296,405,737,825,800 | 32.34375 | 99 | 0.621993 | false |
imsut/commons | src/python/twitter/common/http/mirror_file.py | 2 | 2677 | import errno
import httplib
import os
import socket
import time
class MirrorFile(object):
def __init__(self, http_host, http_path, local_file, https=False):
"""
Given a file pointed to by 'url', mirror it to 'local_file', providing operations
to check that it's up to date.
"""
self._http_path = http_path
self._http_host = http_host
self._local_filename = local_file
self._connection_class = httplib.HTTPSConnection if https else httplib.HTTPConnection
self._local_mtime = None
self._web_mtime = None
self._exists = os.path.exists(local_file)
def _get_local_timestamp(self):
try:
stat = os.stat(self._local_filename)
return stat.st_mtime
except OSError as e:
if e.errno == errno.ENOENT:
self._local_mtime = None
else:
# File is inaccessible.
raise
return None
def _get_web_timestamp(self):
# TODO(wickman) Wrap this in an expontential backoff.
conn = self._connection_class(self._http_host)
try:
conn.request('HEAD', self._http_path)
except (httplib.CannotSendRequest, socket.error):
return None
try:
res = conn.getresponse()
except (httplib.ResponseNotReady, httplib.BadStatusLine):
return None
if res is not None:
last_modified = res.getheader('last-modified')
if last_modified is not None:
try:
last_modified = time.strptime(last_modified, '%a, %d %b %Y %H:%M:%S %Z')
except ValueError:
return None
return int(time.mktime(last_modified))
return None
def filename(self):
if not self._exists:
ioe = IOError('%s does not exist' % self._local_filename)
ioe.errno = errno.ENOENT
raise ioe
return self._local_filename
def refresh(self):
"""
Refresh the local file if necessary. Returns truthy if the underlying file changed.
"""
self._local_mtime = self._get_local_timestamp()
self._web_mtime = self._get_web_timestamp()
if self._web_mtime is None:
return None
else:
if self._web_mtime != self._local_mtime:
return self._fetch()
def _fetch(self):
conn = self._connection_class(self._http_host)
try:
conn.request('GET', self._http_path)
except (httplib.CannotSendRequest, socket.error):
return None
try:
res = conn.getresponse()
except (httplib.ResponseNotReady, httplib.BadStatusLine):
return None
if res is not None:
with open(self._local_filename, 'w') as fp:
fp.write(res.read())
os.utime(self._local_filename, (self._web_mtime, self._web_mtime))
self._exists = True
return True
| apache-2.0 | 7,391,587,615,334,487,000 | 7,503,468,052,297,088,000 | 29.078652 | 90 | 0.635039 | false |
uclouvain/osis | base/tests/views/learning_units/external/test_update.py | 1 | 4921 | ############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
############################################################################
from django.contrib.messages import get_messages, SUCCESS
from django.test import TestCase
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from waffle.testutils import override_flag
from base.models.enums.entity_type import FACULTY
from base.models.enums.learning_container_year_types import EXTERNAL
from base.models.enums.organization_type import MAIN
from base.tests.factories.academic_calendar import generate_learning_unit_edition_calendars
from base.tests.factories.academic_year import create_current_academic_year
from base.tests.factories.entity import EntityWithVersionFactory
from base.tests.factories.external_learning_unit_year import ExternalLearningUnitYearFactory
from base.tests.factories.learning_unit_year import LearningUnitYearFullFactory
from base.tests.factories.person import PersonFactory
from base.tests.forms.test_external_learning_unit import get_valid_external_learning_unit_form_data
from base.views.learning_units.update import update_learning_unit
from learning_unit.tests.factories.central_manager import CentralManagerFactory
@override_flag('learning_unit_update', active=True)
class TestUpdateExternalLearningUnitView(TestCase):
@classmethod
def setUpTestData(cls):
cls.entity = EntityWithVersionFactory(organization__type=MAIN, version__entity_type=FACULTY)
cls.manager = CentralManagerFactory(entity=cls.entity, with_child=True)
cls.person = cls.manager.person
cls.academic_year = create_current_academic_year()
generate_learning_unit_edition_calendars([cls.academic_year])
cls.luy = LearningUnitYearFullFactory(
academic_year=cls.academic_year,
internship_subtype=None,
acronym="EFAC1000",
learning_container_year__container_type=EXTERNAL,
learning_container_year__requirement_entity=cls.entity,
learning_container_year__allocation_entity=cls.entity,
)
cls.data = get_valid_external_learning_unit_form_data(cls.academic_year, cls.luy, cls.entity)
cls.url = reverse(update_learning_unit, args=[cls.luy.pk])
def setUp(self):
self.external = ExternalLearningUnitYearFactory(learning_unit_year=self.luy)
self.client.force_login(self.person.user)
def test_update_get(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
def test_update_get_permission_denied(self):
self.client.force_login(PersonFactory().user)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 403)
def test_update_post(self):
response = self.client.post(self.url, data=self.data)
self.assertEqual(response.status_code, 302)
messages = [m.level for m in get_messages(response.wsgi_request)]
self.assertEqual(messages, [SUCCESS])
def test_update_message_with_report(self):
self.data['postponement'] = "1"
response = self.client.post(self.url, data=self.data)
self.assertEqual(response.status_code, 302)
messages = [m.message for m in get_messages(response.wsgi_request)]
self.assertEqual(messages[0], _("The learning unit has been updated (with report)."))
def test_update_message_without_report(self):
self.data['postponement'] = "0"
response = self.client.post(self.url, data=self.data)
self.assertEqual(response.status_code, 302)
messages = [m.message for m in get_messages(response.wsgi_request)]
self.assertEqual(messages[0], _("The learning unit has been updated (without report)."))
| agpl-3.0 | -5,749,673,388,269,756,000 | -2,848,052,416,156,928,500 | 47.712871 | 101 | 0.710569 | false |
bastik/youtube-dl | youtube_dl/extractor/eagleplatform.py | 65 | 3468 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
)
class EaglePlatformIE(InfoExtractor):
_VALID_URL = r'''(?x)
(?:
eagleplatform:(?P<custom_host>[^/]+):|
https?://(?P<host>.+?\.media\.eagleplatform\.com)/index/player\?.*\brecord_id=
)
(?P<id>\d+)
'''
_TESTS = [{
# http://lenta.ru/news/2015/03/06/navalny/
'url': 'http://lentaru.media.eagleplatform.com/index/player?player=new&record_id=227304&player_template_id=5201',
'md5': '0b7994faa2bd5c0f69a3db6db28d078d',
'info_dict': {
'id': '227304',
'ext': 'mp4',
'title': 'Навальный вышел на свободу',
'description': 'md5:d97861ac9ae77377f3f20eaf9d04b4f5',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 87,
'view_count': int,
'age_limit': 0,
},
}, {
# http://muz-tv.ru/play/7129/
# http://media.clipyou.ru/index/player?record_id=12820&width=730&height=415&autoplay=true
'url': 'eagleplatform:media.clipyou.ru:12820',
'md5': '6c2ebeab03b739597ce8d86339d5a905',
'info_dict': {
'id': '12820',
'ext': 'mp4',
'title': "'O Sole Mio",
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 216,
'view_count': int,
},
'skip': 'Georestricted',
}]
def _handle_error(self, response):
status = int_or_none(response.get('status', 200))
if status != 200:
raise ExtractorError(' '.join(response['errors']), expected=True)
def _download_json(self, url_or_request, video_id, note='Downloading JSON metadata'):
response = super(EaglePlatformIE, self)._download_json(url_or_request, video_id, note)
self._handle_error(response)
return response
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
host, video_id = mobj.group('custom_host') or mobj.group('host'), mobj.group('id')
player_data = self._download_json(
'http://%s/api/player_data?id=%s' % (host, video_id), video_id)
media = player_data['data']['playlist']['viewports'][0]['medialist'][0]
title = media['title']
description = media.get('description')
thumbnail = media.get('snapshot')
duration = int_or_none(media.get('duration'))
view_count = int_or_none(media.get('views'))
age_restriction = media.get('age_restriction')
age_limit = None
if age_restriction:
age_limit = 0 if age_restriction == 'allow_all' else 18
m3u8_data = self._download_json(
media['sources']['secure_m3u8']['auto'],
video_id, 'Downloading m3u8 JSON')
formats = self._extract_m3u8_formats(
m3u8_data['data'][0], video_id,
'mp4', entry_protocol='m3u8_native')
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'view_count': view_count,
'age_limit': age_limit,
'formats': formats,
}
| unlicense | 636,888,592,069,119,000 | -2,521,762,706,642,427,400 | 33.79798 | 121 | 0.534107 | false |
abaldwin1/thumbor | tests/loaders/test_https_loader.py | 2 | 7454 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com [email protected]
from os.path import abspath, join, dirname
from preggy import expect
import mock
# from tornado.concurrent import Future
import tornado.web
from tests.base import PythonTestCase, TestCase
from tornado.concurrent import Future
import thumbor.loaders.https_loader as loader
from thumbor.context import Context
from thumbor.config import Config
from thumbor.loaders import LoaderResult
def fixture_for(filename):
return abspath(join(dirname(__file__), 'fixtures', filename))
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write('Hello')
class EchoUserAgentHandler(tornado.web.RequestHandler):
def get(self):
self.write(self.request.headers['User-Agent'])
class HandlerMock(object):
def __init__(self, headers):
self.request = RequestMock(headers)
class RequestMock(object):
def __init__(self, headers):
self.headers = headers
class ResponseMock:
def __init__(self, error=None, content_type=None, body=None, code=None):
self.error = error
self.code = code
self.time_info = None
self.headers = {
'Content-Type': 'image/jpeg'
}
if content_type:
self.headers['Content-Type'] = content_type
self.body = body
class ReturnContentTestCase(PythonTestCase):
def test_return_none_on_error(self):
response_mock = ResponseMock(error='Error', code=599)
callback_mock = mock.Mock()
ctx = Context(None, None, None)
loader.return_contents(response_mock, 'some-url', callback_mock, ctx)
result = callback_mock.call_args[0][0]
expect(result).to_be_instance_of(LoaderResult)
expect(result.buffer).to_be_null()
expect(result.successful).to_be_false()
def test_return_body_if_valid(self):
response_mock = ResponseMock(body='body', code=200)
callback_mock = mock.Mock()
ctx = Context(None, None, None)
loader.return_contents(response_mock, 'some-url', callback_mock, ctx)
result = callback_mock.call_args[0][0]
expect(result).to_be_instance_of(LoaderResult)
expect(result.buffer).to_equal('body')
def test_return_upstream_error_on_body_none(self):
response_mock = ResponseMock(body=None, code=200)
callback_mock = mock.Mock()
ctx = Context(None, None, None)
loader.return_contents(response_mock, 'some-url', callback_mock, ctx)
result = callback_mock.call_args[0][0]
expect(result).to_be_instance_of(LoaderResult)
expect(result.buffer).to_be_null()
expect(result.successful).to_be_false()
expect(result.error).to_equal(LoaderResult.ERROR_UPSTREAM)
def test_return_upstream_error_on_body_empty(self):
response_mock = ResponseMock(body='', code=200)
callback_mock = mock.Mock()
ctx = Context(None, None, None)
loader.return_contents(response_mock, 'some-url', callback_mock, ctx)
result = callback_mock.call_args[0][0]
expect(result).to_be_instance_of(LoaderResult)
expect(result.buffer).to_be_null()
expect(result.successful).to_be_false()
expect(result.error).to_equal(LoaderResult.ERROR_UPSTREAM)
class ValidateUrlTestCase(PythonTestCase):
def test_with_allowed_sources(self):
config = Config()
config.ALLOWED_SOURCES = ['s.glbimg.com']
ctx = Context(None, config, None)
expect(
loader.validate(
ctx,
'http://www.google.com/logo.jpg'
)
).to_be_false()
expect(
loader.validate(
ctx,
'http://s2.glbimg.com/logo.jpg'
)
).to_be_false()
expect(
loader.validate(
ctx,
'/glob=:sfoir%20%20%3Co-pmb%20%20%20%20_%20%20%20%200%20%20g.-%3E%3Ca%20hplass='
)
).to_be_false()
expect(
loader.validate(ctx, 'http://s.glbimg.com/logo.jpg')).to_be_true()
def test_without_allowed_sources(self):
config = Config()
config.ALLOWED_SOURCES = []
ctx = Context(None, config, None)
is_valid = loader.validate(ctx, 'http://www.google.com/logo.jpg')
expect(is_valid).to_be_true()
class NormalizeUrlTestCase(PythonTestCase):
def test_should_normalize_url(self):
expect(loader._normalize_url('http://some.url')).to_equal('http://some.url')
expect(loader._normalize_url('some.url')).to_equal('https://some.url')
def test_should_normalize_quoted_url(self):
url = 'https%3A//www.google.ca/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png'
expected = 'https://www.google.ca/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png'
result = loader._normalize_url(url)
expect(result).to_equal(expected)
class HttpsLoaderTestCase(TestCase):
def get_app(self):
application = tornado.web.Application([
(r"/", MainHandler),
])
return application
def test_load_with_callback(self):
url = self.get_url('/')
config = Config()
ctx = Context(None, config, None)
loader.load(ctx, url, self.stop)
result = self.wait()
expect(result).to_be_instance_of(LoaderResult)
expect(result.buffer).to_equal('Hello')
expect(result.successful).to_be_true()
def test_load_with_curl(self):
url = self.get_url('/')
config = Config()
config.HTTP_LOADER_CURL_ASYNC_HTTP_CLIENT = True
ctx = Context(None, config, None)
loader.load(ctx, url, self.stop)
result = self.wait()
expect(result).to_be_instance_of(LoaderResult)
expect(result.buffer).to_equal('Hello')
expect(result.successful).to_be_true()
def test_should_return_a_future(self):
url = self.get_url('/')
config = Config()
ctx = Context(None, config, None)
future = loader.load(ctx, url)
expect(isinstance(future, Future)).to_be_true()
class HttpLoaderWithUserAgentForwardingTestCase(TestCase):
def get_app(self):
application = tornado.web.Application([
(r"/", EchoUserAgentHandler),
])
return application
def test_load_with_user_agent(self):
url = self.get_url('/')
config = Config()
config.HTTP_LOADER_FORWARD_USER_AGENT = True
ctx = Context(None, config, None, HandlerMock({"User-Agent": "test-user-agent"}))
loader.load(ctx, url, self.stop)
result = self.wait()
expect(result).to_be_instance_of(LoaderResult)
expect(result.buffer).to_equal('test-user-agent')
def test_load_with_default_user_agent(self):
url = self.get_url('/')
config = Config()
config.HTTP_LOADER_FORWARD_USER_AGENT = True
config.HTTP_LOADER_DEFAULT_USER_AGENT = "DEFAULT_USER_AGENT"
ctx = Context(None, config, None, HandlerMock({}))
loader.load(ctx, url, self.stop)
result = self.wait()
expect(result).to_be_instance_of(LoaderResult)
expect(result.buffer).to_equal('DEFAULT_USER_AGENT')
| mit | 2,203,885,717,680,810,500 | -3,340,072,046,957,085,000 | 31.550218 | 102 | 0.626778 | false |
trezor/micropython | ports/nrf/examples/ssd1306_mod.py | 3 | 1591 | # NOTE: Modified version to align with implemented I2C API in nrf port.
#
# Examples usage of SSD1306_SPI on pca10040
#
# from machine import Pin, SPI
# from ssd1306 import SSD1306_SPI
# spi = SPI(0, baudrate=40000000)
# dc = Pin.board.PA11
# res = Pin.board.PA12
# cs = Pin.board.PA13
# disp = SSD1306_SPI(128, 64, spi, dc, res, cs)
#
#
# Example usage of SSD1306_I2C on pca10040
#
# from machine import Pin, I2C
# from ssd1306_mod import SSD1306_I2C_Mod
# i2c = I2C(0, Pin.board.PA3, Pin.board.PA4)
# disp = SSD1306_I2C_Mod(128, 64, i2c)
from ssd1306 import SSD1306_I2C
SET_COL_ADDR = const(0x21)
SET_PAGE_ADDR = const(0x22)
class SSD1306_I2C_Mod(SSD1306_I2C):
def show(self):
x0 = 0
x1 = self.width - 1
if self.width == 64:
# displays with width of 64 pixels are shifted by 32
x0 += 32
x1 += 32
self.write_cmd(SET_COL_ADDR)
self.write_cmd(x0)
self.write_cmd(x1)
self.write_cmd(SET_PAGE_ADDR)
self.write_cmd(0)
self.write_cmd(self.pages - 1)
chunk_size = 254 # 255, excluding opcode.
num_of_chunks = len(self.buffer) // chunk_size
leftover = len(self.buffer) - (num_of_chunks * chunk_size)
for i in range(0, num_of_chunks):
self.write_data(self.buffer[chunk_size*i:chunk_size*(i+1)])
if (leftover > 0):
self.write_data(self.buffer[chunk_size * num_of_chunks:])
def write_data(self, buf):
buffer = bytearray([0x40]) + buf # Co=0, D/C#=1
self.i2c.writeto(self.addr, buffer)
| mit | -4,408,012,690,217,617,000 | 1,090,186,413,120,215,200 | 28.462963 | 71 | 0.609679 | false |
mamachanko/lymph | lymph/core/connection.py | 8 | 4696 | # -*- coding: utf-8 -*-
from __future__ import division, unicode_literals
import gevent
import math
import os
import time
import logging
from lymph.utils import SampleWindow
from lymph.exceptions import RpcError
logger = logging.getLogger(__name__)
UNKNOWN = 'unknown'
RESPONSIVE = 'responsive'
UNRESPONSIVE = 'unresponsive'
CLOSED = 'closed'
IDLE = 'idle'
class Connection(object):
def __init__(self, server, endpoint, heartbeat_interval=1, timeout=3, idle_timeout=10, unresponsive_disconnect=30, idle_disconnect=60):
assert heartbeat_interval < timeout < idle_timeout
self.server = server
self.endpoint = endpoint
self.timeout = timeout
self.heartbeat_interval = heartbeat_interval
self.idle_timeout = idle_timeout
self.unresponsive_disconnect = unresponsive_disconnect
self.idle_disconnect = idle_disconnect
now = time.monotonic()
self.last_seen = 0
self.idle_since = 0
self.last_message = now
self.created_at = now
self.heartbeat_samples = SampleWindow(100, factor=1000) # milliseconds
self.explicit_heartbeat_count = 0
self.status = UNKNOWN
self.received_message_count = 0
self.sent_message_count = 0
self.heartbeat_loop_greenlet = self.server.spawn(self.heartbeat_loop)
self.live_check_loop_greenlet = self.server.spawn(self.live_check_loop)
self.pid = os.getpid()
def __str__(self):
return "connection to=%s last_seen=%s" % (self.endpoint, self._dt())
def _dt(self):
return time.monotonic() - self.last_seen
@property
def phi(self):
p = self.heartbeat_samples.p(self._dt())
if p == 0:
return float('inf')
return -math.log10(p)
def set_status(self, status):
self.status = status
def heartbeat_loop(self):
while True:
start = time.monotonic()
channel = self.server.ping(self.endpoint)
error = False
try:
channel.get(timeout=self.heartbeat_interval)
except RpcError as e:
logger.debug('hearbeat error on %s: %r', self, e)
error = True
took = time.monotonic() - start
if not error:
self.heartbeat_samples.add(took)
self.explicit_heartbeat_count += 1
gevent.sleep(max(0, self.heartbeat_interval - took))
def live_check_loop(self):
while True:
self.update_status()
self.log_stats()
gevent.sleep(1)
def update_status(self):
if self.last_seen:
now = time.monotonic()
if now - self.last_seen >= self.timeout:
self.set_status(UNRESPONSIVE)
elif now - self.last_message >= self.idle_timeout:
self.set_status(IDLE)
self.idle_since = now
else:
self.set_status(RESPONSIVE)
def log_stats(self):
roundtrip_stats = 'window (mean rtt={mean:.1f} ms; stddev rtt={stddev:.1f})'.format(**self.heartbeat_samples.stats)
roundtrip_total_stats = 'total (mean rtt={mean:.1f} ms; stddev rtt={stddev:.1f})'.format(**self.heartbeat_samples.total.stats)
logger.debug("pid=%s; endpoint=%s; %s; %s; phi=%.3f; ping/s=%.2f; status=%s" % (
self.pid,
self.endpoint,
roundtrip_stats,
roundtrip_total_stats,
self.phi,
self.explicit_heartbeat_count / max(1, time.monotonic() - self.created_at),
self.status,
))
def close(self):
if self.status == CLOSED:
return
self.status = CLOSED
self.heartbeat_loop_greenlet.kill()
self.live_check_loop_greenlet.kill()
self.server.disconnect(self.endpoint)
def on_recv(self, msg):
now = time.monotonic()
self.last_seen = now
if not msg.is_idle_chatter():
self.last_message = now
self.received_message_count += 1
def on_send(self, msg):
if not msg.is_idle_chatter():
self.last_message = time.monotonic()
self.sent_message_count += 1
def is_alive(self):
return self.status in (RESPONSIVE, IDLE, UNKNOWN)
def stats(self):
# FIXME: rtt and phi should be recorded as summary/histogram for all connections
return {
'endpoint': self.endpoint,
'rtt': self.heartbeat_samples.stats,
'phi': self.phi,
'status': self.status,
'sent': self.sent_message_count,
'received': self.received_message_count,
}
| apache-2.0 | -7,679,985,716,895,526,000 | 432,726,723,021,927,800 | 31.611111 | 139 | 0.585818 | false |
GoogleCloudPlatform/training-data-analyst | courses/data-engineering/kubeflow-examples/mnist/testing/conftest.py | 2 | 3215 | import os
import pytest
def pytest_addoption(parser):
parser.addoption(
"--tfjob_name", help="Name for the TFjob.",
type=str, default="mnist-test-" + os.getenv('BUILD_ID'))
parser.addoption(
"--namespace", help=("The namespace to run in. This should correspond to"
"a namespace associated with a Kubeflow namespace."),
type=str, default="kubeflow-kubeflow-testing")
parser.addoption(
"--repos", help="The repos to checkout; leave blank to use defaults",
type=str, default="")
parser.addoption(
"--trainer_image", help="TFJob training image",
type=str, default="gcr.io/kubeflow-examples/mnist/model:build-" + os.getenv('BUILD_ID'))
parser.addoption(
"--train_steps", help="train steps for mnist testing",
type=str, default="200")
parser.addoption(
"--batch_size", help="batch size for mnist trainning",
type=str, default="100")
parser.addoption(
"--learning_rate", help="mnist learnning rate",
type=str, default="0.01")
parser.addoption(
"--num_ps", help="The number of PS",
type=str, default="1")
parser.addoption(
"--num_workers", help="The number of Worker",
type=str, default="2")
parser.addoption(
"--model_dir", help="Path for model saving",
type=str, default="gs://kubeflow-ci-deployment_ci-temp/mnist/models/" + os.getenv('BUILD_ID'))
parser.addoption(
"--export_dir", help="Path for model exporting",
type=str, default="gs://kubeflow-ci-deployment_ci-temp/mnist/models/" + os.getenv('BUILD_ID'))
parser.addoption(
"--deploy_name", help="Name for the service deployment",
type=str, default="mnist-test-" + os.getenv('BUILD_ID'))
parser.addoption(
"--master", action="store", default="", help="IP address of GKE master")
parser.addoption(
"--service", action="store", default="mnist-test-" + os.getenv('BUILD_ID'),
help="The name of the mnist K8s service")
@pytest.fixture
def master(request):
return request.config.getoption("--master")
@pytest.fixture
def namespace(request):
return request.config.getoption("--namespace")
@pytest.fixture
def service(request):
return request.config.getoption("--service")
@pytest.fixture
def tfjob_name(request):
return request.config.getoption("--tfjob_name")
@pytest.fixture
def repos(request):
return request.config.getoption("--repos")
@pytest.fixture
def trainer_image(request):
return request.config.getoption("--trainer_image")
@pytest.fixture
def train_steps(request):
return request.config.getoption("--train_steps")
@pytest.fixture
def batch_size(request):
return request.config.getoption("--batch_size")
@pytest.fixture
def learning_rate(request):
return request.config.getoption("--learning_rate")
@pytest.fixture
def num_ps(request):
return request.config.getoption("--num_ps")
@pytest.fixture
def num_workers(request):
return request.config.getoption("--num_workers")
@pytest.fixture
def model_dir(request):
return request.config.getoption("--model_dir")
@pytest.fixture
def export_dir(request):
return request.config.getoption("--export_dir")
@pytest.fixture
def deploy_name(request):
return request.config.getoption("--deploy_name")
| apache-2.0 | -7,415,804,899,387,365,000 | -6,544,958,846,098,814,000 | 26.715517 | 98 | 0.692068 | false |
devdelay/home-assistant | homeassistant/util/__init__.py | 1 | 13534 | """Helper methods for various modules."""
from collections.abc import MutableSet
from itertools import chain
import threading
import queue
from datetime import datetime
import re
import enum
import socket
import random
import string
from functools import wraps
from types import MappingProxyType
from typing import Any, Sequence
from .dt import as_local, utcnow
RE_SANITIZE_FILENAME = re.compile(r'(~|\.\.|/|\\)')
RE_SANITIZE_PATH = re.compile(r'(~|\.(\.)+)')
RE_SLUGIFY = re.compile(r'[^a-z0-9_]+')
def sanitize_filename(filename):
r"""Sanitize a filename by removing .. / and \\."""
return RE_SANITIZE_FILENAME.sub("", filename)
def sanitize_path(path):
"""Sanitize a path by removing ~ and .."""
return RE_SANITIZE_PATH.sub("", path)
def slugify(text: str) -> str:
"""Slugify a given text."""
text = text.lower().replace(" ", "_")
return RE_SLUGIFY.sub("", text)
def repr_helper(inp: Any) -> str:
"""Help creating a more readable string representation of objects."""
if isinstance(inp, (dict, MappingProxyType)):
return ", ".join(
repr_helper(key)+"="+repr_helper(item) for key, item
in inp.items())
elif isinstance(inp, datetime):
return as_local(inp).isoformat()
else:
return str(inp)
def convert(value, to_type, default=None):
"""Convert value to to_type, returns default if fails."""
try:
return default if value is None else to_type(value)
except (ValueError, TypeError):
# If value could not be converted
return default
def ensure_unique_string(preferred_string: str,
current_strings: Sequence[str]) -> str:
"""Return a string that is not present in current_strings.
If preferred string exists will append _2, _3, ..
"""
test_string = preferred_string
current_strings_set = set(current_strings)
tries = 1
while test_string in current_strings_set:
tries += 1
test_string = "{}_{}".format(preferred_string, tries)
return test_string
# Taken from: http://stackoverflow.com/a/11735897
def get_local_ip():
"""Try to determine the local IP address of the machine."""
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Use Google Public DNS server to determine own IP
sock.connect(('8.8.8.8', 80))
return sock.getsockname()[0]
except socket.error:
return socket.gethostbyname(socket.gethostname())
finally:
sock.close()
# Taken from http://stackoverflow.com/a/23728630
def get_random_string(length=10):
"""Return a random string with letters and digits."""
generator = random.SystemRandom()
source_chars = string.ascii_letters + string.digits
return ''.join(generator.choice(source_chars) for _ in range(length))
class OrderedEnum(enum.Enum):
"""Taken from Python 3.4.0 docs."""
# pylint: disable=no-init, too-few-public-methods
def __ge__(self, other):
"""Return the greater than element."""
if self.__class__ is other.__class__:
return self.value >= other.value
return NotImplemented
def __gt__(self, other):
"""Return the greater element."""
if self.__class__ is other.__class__:
return self.value > other.value
return NotImplemented
def __le__(self, other):
"""Return the lower than element."""
if self.__class__ is other.__class__:
return self.value <= other.value
return NotImplemented
def __lt__(self, other):
"""Return the lower element."""
if self.__class__ is other.__class__:
return self.value < other.value
return NotImplemented
class OrderedSet(MutableSet):
"""Ordered set taken from http://code.activestate.com/recipes/576694/."""
def __init__(self, iterable=None):
"""Initialize the set."""
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
"""Return the length of the set."""
return len(self.map)
def __contains__(self, key):
"""Check if key is in set."""
return key in self.map
def add(self, key):
"""Add an element to the end of the set."""
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def promote(self, key):
"""Promote element to beginning of the set, add if not there."""
if key in self.map:
self.discard(key)
begin = self.end[2]
curr = begin[1]
curr[2] = begin[1] = self.map[key] = [key, curr, begin]
def discard(self, key):
"""Discard an element from the set."""
if key in self.map:
key, prev_item, next_item = self.map.pop(key)
prev_item[2] = next_item
next_item[1] = prev_item
def __iter__(self):
"""Iteration of the set."""
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
"""Reverse the ordering."""
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def pop(self, last=True): # pylint: disable=arguments-differ
"""Pop element of the end of the set.
Set last=False to pop from the beginning.
"""
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def update(self, *args):
"""Add elements from args to the set."""
for item in chain(*args):
self.add(item)
def __repr__(self):
"""Return the representation."""
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
"""Return the comparision."""
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
class Throttle(object):
"""A class for throttling the execution of tasks.
This method decorator adds a cooldown to a method to prevent it from being
called more then 1 time within the timedelta interval `min_time` after it
returned its result.
Calling a method a second time during the interval will return None.
Pass keyword argument `no_throttle=True` to the wrapped method to make
the call not throttled.
Decorator takes in an optional second timedelta interval to throttle the
'no_throttle' calls.
Adds a datetime attribute `last_call` to the method.
"""
# pylint: disable=too-few-public-methods
def __init__(self, min_time, limit_no_throttle=None):
"""Initialize the throttle."""
self.min_time = min_time
self.limit_no_throttle = limit_no_throttle
def __call__(self, method):
"""Caller for the throttle."""
if self.limit_no_throttle is not None:
method = Throttle(self.limit_no_throttle)(method)
# Different methods that can be passed in:
# - a function
# - an unbound function on a class
# - a method (bound function on a class)
# We want to be able to differentiate between function and unbound
# methods (which are considered functions).
# All methods have the classname in their qualname seperated by a '.'
# Functions have a '.' in their qualname if defined inline, but will
# be prefixed by '.<locals>.' so we strip that out.
is_func = (not hasattr(method, '__self__') and
'.' not in method.__qualname__.split('.<locals>.')[-1])
@wraps(method)
def wrapper(*args, **kwargs):
"""Wrapper that allows wrapped to be called only once per min_time.
If we cannot acquire the lock, it is running so return None.
"""
# pylint: disable=protected-access
if hasattr(method, '__self__'):
host = method.__self__
elif is_func:
host = wrapper
else:
host = args[0] if args else wrapper
if not hasattr(host, '_throttle'):
host._throttle = {}
if id(self) not in host._throttle:
host._throttle[id(self)] = [threading.Lock(), None]
throttle = host._throttle[id(self)]
if not throttle[0].acquire(False):
return None
# Check if method is never called or no_throttle is given
force = not throttle[1] or kwargs.pop('no_throttle', False)
try:
if force or utcnow() - throttle[1] > self.min_time:
result = method(*args, **kwargs)
throttle[1] = utcnow()
return result
else:
return None
finally:
throttle[0].release()
return wrapper
class ThreadPool(object):
"""A priority queue-based thread pool."""
# pylint: disable=too-many-instance-attributes
def __init__(self, job_handler, worker_count=0, busy_callback=None):
"""Initialize the pool.
job_handler: method to be called from worker thread to handle job
worker_count: number of threads to run that handle jobs
busy_callback: method to be called when queue gets too big.
Parameters: worker_count, list of current_jobs,
pending_jobs_count
"""
self._job_handler = job_handler
self._busy_callback = busy_callback
self.worker_count = 0
self.busy_warning_limit = 0
self._work_queue = queue.PriorityQueue()
self.current_jobs = []
self._lock = threading.RLock()
self._quit_task = object()
self.running = True
for _ in range(worker_count):
self.add_worker()
def add_worker(self):
"""Add worker to the thread pool and reset warning limit."""
with self._lock:
if not self.running:
raise RuntimeError("ThreadPool not running")
worker = threading.Thread(
target=self._worker,
name='ThreadPool Worker {}'.format(self.worker_count))
worker.daemon = True
worker.start()
self.worker_count += 1
self.busy_warning_limit = self.worker_count * 3
def remove_worker(self):
"""Remove worker from the thread pool and reset warning limit."""
with self._lock:
if not self.running:
raise RuntimeError("ThreadPool not running")
self._work_queue.put(PriorityQueueItem(0, self._quit_task))
self.worker_count -= 1
self.busy_warning_limit = self.worker_count * 3
def add_job(self, priority, job):
"""Add a job to the queue."""
with self._lock:
if not self.running:
raise RuntimeError("ThreadPool not running")
self._work_queue.put(PriorityQueueItem(priority, job))
# Check if our queue is getting too big.
if self._work_queue.qsize() > self.busy_warning_limit \
and self._busy_callback is not None:
# Increase limit we will issue next warning.
self.busy_warning_limit *= 2
self._busy_callback(
self.worker_count, self.current_jobs,
self._work_queue.qsize())
def block_till_done(self):
"""Block till current work is done."""
self._work_queue.join()
def stop(self):
"""Finish all the jobs and stops all the threads."""
self.block_till_done()
with self._lock:
if not self.running:
return
# Tell the workers to quit
for _ in range(self.worker_count):
self.remove_worker()
self.running = False
# Wait till all workers have quit
self.block_till_done()
def _worker(self):
"""Handle jobs for the thread pool."""
while True:
# Get new item from work_queue
job = self._work_queue.get().item
if job is self._quit_task:
self._work_queue.task_done()
return
# Add to current running jobs
job_log = (utcnow(), job)
self.current_jobs.append(job_log)
# Do the job
self._job_handler(job)
# Remove from current running job
self.current_jobs.remove(job_log)
# Tell work_queue the task is done
self._work_queue.task_done()
class PriorityQueueItem(object):
"""Holds a priority and a value. Used within PriorityQueue."""
# pylint: disable=too-few-public-methods
def __init__(self, priority, item):
"""Initialize the queue."""
self.priority = priority
self.item = item
def __lt__(self, other):
"""Return the ordering."""
return self.priority < other.priority
| mit | 6,903,292,226,251,662,000 | 6,098,046,952,795,508,000 | 30.328704 | 79 | 0.570637 | false |
bxlab/bx-python | lib/bx/align/epo.py | 1 | 11523 | """Classes and utilities for mutliple alignments from the EPO pipeline"""
import logging
import os
import pickle as cPickle
import re
from collections import namedtuple
from ._epo import ( # noqa: F401
bed_union,
cummulative_intervals,
fastLoadChain,
rem_dash
)
log = logging.getLogger(__name__)
class Chain(namedtuple('Chain', 'score tName tSize tStrand tStart tEnd qName qSize qStrand qStart qEnd id')):
"""A Chain header as in http://genome.ucsc.edu/goldenPath/help/chain.html
chain coordinates are with respect to the strand, so for example tStart on the + strand is the
distance from the leftmost position; tStart on the - strand is the distance from the rightmost position."""
__slots__ = ()
def __str__(self):
return "chain {score} {tName} {tSize} {tStrand} {tStart} {tEnd} {qName} {qSize} {qStrand} {qStart} {qEnd} {id}".format(**self._asdict())
@classmethod
def _strfactory(cls, line):
"""factory class method for Chain
:param line: header of a chain (in .chain format)
"""
assert isinstance(line, str), "this is a factory from string"
line = line.rstrip().split()[1:] # the first component is the keyword "chain"
tup = [t[0](t[1]) for t in zip([int, str, int, str, int, int, str, int, str, int, int, str], line)]
return tuple.__new__(cls, tup)
@classmethod
def _make_from_epo(cls, trg_comp, qr_comp, trg_chrom_sizes, qr_chrom_sizes):
"""crate a chain of collinear rings from the given components.
The target of the chain will always be on the forward strand.
This is done to avoid confusion when mapping psl files. So,
if trg_comp.strand=-, qr_comp.strand=- (resp. +) the
chain header will have tStrand=+, qStrand=+ (resp. -). No strand
changes on the other cases.
:param trg_comp: target (i.e, the first) component
:type trg_comp: L{EPOitem}
:param qr_comp: query (i.e, the second) component
:type qr_comp: L{EPOitem}
:param trg_chrom_sizes: chromosome sizes of the target
:type trg_chrom_sizes: dictionary of the type (chrom) --> size
:param qr_chrom_sizes: chromosome sizes of the query
:type qr_chrom_sizes: dictionary of the type (chrom) --> size
:return: A L{Chain} instance"""
# size, target, query arrays
S, T, Q = [], [], []
# the target strand of the chain must be on the forward strand
trg_intervals = trg_comp.intervals(reverse=trg_comp.strand == '-')
qr_intervals = qr_comp.intervals(reverse=trg_comp.strand == '-')
if len(trg_intervals) == 0 or len(qr_intervals) == 0:
log.warning("deletion/insertion only intervals")
return None
A, B = rem_dash(trg_intervals, qr_intervals)
# correct for when cigar starts/ends with dashes (in number of bases)
tr_start_correction = max(B[0][0] - A[0][0], 0)
tr_end_correction = max(A[-1][1] - B[-1][1], 0)
qr_start_correction = max(A[0][0] - B[0][0], 0)
qr_end_correction = max(B[-1][1] - A[-1][1], 0)
a, b = A.pop(0), B.pop(0)
# intervals are 0-base, halfo-open => lengths = coordinate difference
while A or B:
if a[1] < b[1]:
T.append(0)
Q.append(A[0][0] - a[1])
S.append(min(a[1], b[1]) - max(a[0], b[0]))
a = A.pop(0)
elif b[1] < a[1]:
Q.append(0)
T.append(B[0][0] - b[1])
S.append(min(a[1], b[1]) - max(a[0], b[0]))
b = B.pop(0)
elif A and B:
assert 1 > 2, "there are dash columns"
else:
break
S.append(min(a[1], b[1]) - max(a[0], b[0]))
assert len(T) == len(Q) == len(S) - 1, "(S, T, Q) = (%d, %d, %d)" % tuple(map(len, (S, T, Q)))
tSize = trg_chrom_sizes[trg_comp.chrom]
qSize = qr_chrom_sizes[qr_comp.chrom]
# UCSC coordinates are 0-based, half-open and e! coordinates are 1-base, closed
# chain_start = epo_start - 1 and chain_end = epo_end
if qr_comp.strand == '+':
chain = Chain(
0, trg_comp.chrom, tSize, "+",
(trg_comp.start - 1) + tr_start_correction, trg_comp.end - tr_end_correction,
qr_comp.chrom, qSize, (qr_comp.strand == trg_comp.strand and '+' or '-'),
(qr_comp.start - 1) + qr_start_correction, qr_comp.end - qr_end_correction,
qr_comp.gabid)
else:
chain = Chain(
0, trg_comp.chrom, tSize, "+",
(trg_comp.start - 1) + tr_start_correction, trg_comp.end - tr_end_correction,
qr_comp.chrom, qSize, (qr_comp.strand == trg_comp.strand and '+' or '-'),
(qr_comp.start - 1) + qr_end_correction, qr_comp.end - qr_start_correction,
qr_comp.gabid)
# strand correction. in UCSC coordinates this is: size - coord
if chain.qStrand == '-':
chain = chain._replace(
qEnd=chain.qSize - chain.qStart,
qStart=chain.qSize - chain.qEnd)
assert chain.tEnd - chain.tStart == sum(S) + sum(T), "[%s] %d != %d" % (
str(chain), chain.tEnd - chain.tStart, sum(S) + sum(T))
assert chain.qEnd - chain.qStart == sum(S) + sum(Q), "[%s] %d != %d" % (
str(chain), chain.qEnd - chain.qStart, sum(S) + sum(Q))
return chain, S, T, Q
def slice(self, who):
"return the slice entry (in a bed6 format), AS IS in the chain header"
assert who in ('t', 'q'), "who should be 't' or 'q'"
if who == 't':
return (self.tName, self.tStart, self.tEnd, self.id, self.score, self.tStrand)
else:
return (self.qName, self.qStart, self.qEnd, self.id, self.score, self.qStrand)
def bedInterval(self, who):
"return a BED6 entry, thus DOES coordinate conversion for minus strands"
if who == 't':
st, en = self.tStart, self.tEnd
if self.tStrand == '-':
st, en = self.tSize-en, self.tSize-st
return (self.tName, st, en, self.id, self.score, self.tStrand)
else:
st, en = self.qStart, self.qEnd
if self.qStrand == '-':
st, en = self.qSize-en, self.qSize-st
assert en-st == self.qEnd - self.qStart
return (self.qName, st, en, self.id, self.score, self.qStrand)
@classmethod
def _parse_file(cls, path, pickle=False):
"""parse a .chain file into a list of the type [(L{Chain}, arr, arr, arr) ...]
:param fname: name of the file"""
fname = path
if fname.endswith(".gz"):
fname = path[:-3]
if fname.endswith('.pkl'):
# you asked for the pickled file. I'll give it to you
log.debug("loading pickled file %s ...", fname)
with open(fname, "rb") as f:
return cPickle.load(f)
elif os.path.isfile("%s.pkl" % fname):
# there is a cached version I can give to you
log.info("loading pickled file %s.pkl ...", fname)
if os.stat(path).st_mtime > os.stat("%s.pkl" % fname).st_mtime:
log.critical("*** pickled file %s.pkl is not up to date ***", fname)
try:
with open("%s.pkl" % fname, "rb") as f:
return cPickle.load(f)
except Exception:
log.warning("Loading pickled file %s.pkl failed", fname)
data = fastLoadChain(path, cls._strfactory)
if pickle and not os.path.isfile('%s.pkl' % fname):
log.info("pickling to %s.pkl", fname)
with open('%s.pkl' % fname, 'wb') as f:
cPickle.dump(data, f)
return data
class EPOitem(namedtuple('Epo_item', 'species gabid chrom start end strand cigar')):
"this format is how alignments are delivered from e!"
__slots__ = ()
cigar_pattern = re.compile(r"(\d*)([MD])")
def __repr__(self):
return str(self)
def __str__(self):
c = self.cigar[:5] + "..." + self.cigar[-5:]
return "(%s %s %s %d %d %s %s)" % tuple(self[:6] + (c,))
@classmethod
def _strfactory(cls, line):
"""factory method for an EPOitem
:param line: a line of input"""
cmp = line.rstrip().split()
chrom = cmp[2]
if not chrom.startswith("chr"):
chrom = "chr%s" % chrom
instance = tuple.__new__(
cls,
(cmp[0], cmp[1], chrom, int(cmp[3]), int(cmp[4]), {'1': '+', '-1': '-'}[cmp[5]], cmp[6]))
span = instance.end - instance.start + 1
m_num = sum((t[1] == "M" and [t[0]] or [0])[0] for t in instance.cigar_iter(False))
if span != m_num:
log.warning("[{gabid}] {species}.{chrom}:{start}-{end}.".format(**instance._asdict()) + "(span) %d != %d (matches)" % (span, m_num))
return None
return instance
@classmethod
def _parse_epo(cls, fname):
"""Load an entire file in the EPO format into a dictionary of the type {gab_id => [Epoitem, ...]}
:param fname: file name"""
data = {}
with open(fname) as fd:
for el in (cls._strfactory(_) for _ in fd):
if el:
data.setdefault(el.gabid, []).append(el)
log.info("parsed %d elements from %s", len(data), fname)
return data
def cigar_iter(self, reverse):
"""self.cigar => [(length, type) ... ] iterate the cigar
:param reverse: whether to iterate in the reverse direction (right-to-left)
:type reverse: boolean
:return a list of pairs of the type [(length, M/D) ..]
"""
l = 0
P = self.cigar_pattern
data = []
cigar = self.cigar
parsed_cigar = re.findall(P, cigar)
if reverse:
parsed_cigar = parsed_cigar[::-1]
for _l, t in parsed_cigar:
# 1M is encoded as M
l = (_l and int(_l) or 1) # int(_l) cannot be 0
data.append((l, t))
return data
def intervals(self, reverse, thr=0):
"""return a list of (0-based half-open) intervals representing the match regions of the cigar
for example 4MD4M2DM with reverse=False will produce [(0,4), (5,9), (11,12)]
4MD4M2DM with reverse=True will produce [(0,1), (3,7), (8,12)] (= 12 - previous interval)
:param reverse: whether to iterate in the reverse direction (right-to-left) (this is passed as is to self.cigar_iter)
:type reverse: boolean
:param thr: shift all intervals by this much
:type thr: integer
:return: list of pairs"""
d = [(thr, thr)]
dl = 0
for tup in self.cigar_iter(reverse):
if tup[1] == "D":
dl = tup[0]
else:
s = d[-1][1] + dl
d.append((s, s+tup[0]))
assert d[0] == (thr, thr)
# assert that nr. of Ms in the interval == sum of produced intervals
assert sum(t[0] for t in self.cigar_iter(False) if t[1] == "M") == sum(t[1]-t[0] for t in d)
d_sum = sum(t[1]-t[0] for t in d)
assert self.end - self.start + 1 == d_sum, "[ (%d, %d) = %d ] != %d" % (
self.start, self.end, self.end-self.start+1, d_sum)
return d[1:] # clip the (thr, thr) entry
| mit | 5,563,732,313,012,776,000 | -62,687,249,902,042,696 | 38.462329 | 144 | 0.540484 | false |
xutian/virt-test | virttest/qemu_monitor_unittest.py | 14 | 11219 | import unittest
import common
from qemu_monitor import Monitor
import qemu_monitor
class MockMonitor(qemu_monitor.Monitor):
""" Dummy class inherited from qemu_monitor.HumanMonitor """
def __init__(self): # pylint: disable=W0231
pass
def __del__(self):
pass
class InfoNumaTests(unittest.TestCase):
def testZeroNodes(self):
d = "0 nodes\n"
r = Monitor.parse_info_numa(d)
self.assertEquals(r, [])
def testTwoNodes(self):
d = "2 nodes\n" + \
"node 0 cpus: 0 2 4\n" + \
"node 0 size: 12 MB\n" + \
"node 1 cpus: 1 3 5\n" + \
"node 1 size: 34 MB\n"
r = Monitor.parse_info_numa(d)
self.assertEquals(r, [(12, set([0, 2, 4])),
(34, set([1, 3, 5]))])
class InfoBlocks(unittest.TestCase):
def testParseBlocks(self):
info_1_4 = """ide0-hd0: removable=0 io-status=ok file=c.qcow2 backing_file=b.qcow2 backing_file_depth=2 ro=0 drv=qcow2 encrypted=0 bps=0 bps_rd=0 bps_wr=0 iops=0 iops_rd=0 iops_wr=0
scsi0-hd0: removable=0 io-status=ok file=a.qcow ro=1 drv=raw encrypted=0 bps=0 bps_rd=0 bps_wr=0 iops=0 iops_rd=0 iops_wr=0
scsi0-hd1: removable=0 io-status=ok file=enc.qcow2 ro=0 drv=qcow2 encrypted=1 bps=0 bps_rd=0 bps_wr=0 iops=0 iops_rd=0 iops_wr=0
ide1-cd0: removable=1 locked=0 tray-open=0 io-status=ok [not inserted]
floppy0: removable=1 locked=0 tray-open=0 [not inserted]
sd0: removable=1 locked=0 tray-open=0 [not inserted]"""
info_1_5 = """ide0-hd0: c.qcow2 (qcow2)
Backing file: b.qcow2 (chain depth: 2)
scsi0-hd0: a.qcow (raw, read-only)
scsi0-hd1: enc.qcow2 (qcow2, encrypted)
ide1-cd0: [not inserted]
Removable device: not locked, tray closed
floppy0: [not inserted]
Removable device: not locked, tray closed
sd0: [not inserted]
Removable device: not locked, tray closed"""
info_qmp = [{"io-status": "ok", "device": "ide0-hd0", "locked":
False, "removable": False, "inserted": {"iops_rd": 0,
"iops_wr": 0, "ro": False, "backing_file_depth": 2,
"drv": "qcow2", "iops": 0, "bps_wr": 0, "backing_file":
"b.qcow2", "encrypted": False, "bps": 0, "bps_rd": 0,
"file": "c.qcow2", "encryption_key_missing": False},
"type": "unknown"}, {"io-status": "ok", "device":
"scsi0-hd0", "locked": False, "removable": False,
"inserted": {"iops_rd": 0, "iops_wr": 0, "ro": True,
"backing_file_depth": 0, "drv": "raw", "iops": 0,
"bps_wr": 0, "encrypted": False, "bps": 0, "bps_rd": 0,
"file": "a.qcow", "encryption_key_missing": False},
"type": "unknown"}, {"io-status": "ok", "device":
"scsi0-hd1", "locked": False, "removable": False,
"inserted": {"iops_rd": 0, "iops_wr": 0, "ro": False,
"backing_file_depth": 0, "drv": "qcow2", "iops": 0,
"bps_wr": 0, "encrypted": True, "bps": 0, "bps_rd": 0,
"file": "enc.qcow2", "encryption_key_missing": True},
"type": "unknown"}, {"io-status": "ok", "device":
"ide1-cd0", "locked": False, "removable": True,
"tray_open": False, "type": "unknown"}, {"device":
"floppy0", "locked": False, "removable": True,
"tray_open": False, "type": "unknown"}, {"device": "sd0",
"locked": False, "removable": True, "tray_open": False,
"type": "unknown"}]
monitor = MockMonitor()
# Test "info block" version 1.4
monitor.info = lambda _what, _debug: info_1_4
out1 = monitor.info_block()
exp = {'sd0': {'tray-open': 0, 'locked': 0, 'not-inserted': 1,
'removable': 1},
'ide0-hd0': {'bps_rd': 0, 'backing_file_depth': 2,
'removable': 0, 'encrypted': 0, 'bps_wr': 0,
'io-status': 'ok', 'drv': 'qcow2', 'bps': 0,
'iops': 0, 'file': 'c.qcow2', 'iops_rd': 0,
'ro': 0, 'backing_file': 'b.qcow2', 'iops_wr': 0},
'floppy0': {'tray-open': 0, 'locked': 0, 'not-inserted': 1,
'removable': 1},
'ide1-cd0': {'tray-open': 0, 'locked': 0, 'not-inserted': 1,
'io-status': 'ok', 'removable': 1},
'scsi0-hd0': {'bps_rd': 0, 'removable': 0, 'encrypted': 0,
'bps_wr': 0, 'io-status': 'ok', 'drv': 'raw',
'bps': 0, 'iops': 0, 'file': 'a.qcow',
'iops_rd': 0, 'ro': 1, 'iops_wr': 0},
'scsi0-hd1': {'bps_rd': 0, 'removable': 0, 'encrypted': 1,
'bps_wr': 0, 'io-status': 'ok', 'drv': 'qcow2',
'bps': 0, 'iops': 0, 'file': 'enc.qcow2',
'iops_rd': 0, 'ro': 0, 'iops_wr': 0}}
assert out1 == exp, ("Info block of qemu 1.4 is parsed incorrectly\n%s"
"\n%s" % (out1, exp))
# Test "info block" version 1.5
monitor.info = lambda _what, _debug: info_1_5
out2 = monitor.info_block()
exp = {'sd0': {'not-inserted': 1, 'removable': 1},
'ide0-hd0': {'backing_file_depth': 2, 'drv': 'qcow2',
'backing_file': 'b.qcow2', 'file': 'c.qcow2'},
'floppy0': {'not-inserted': 1, 'removable': 1},
'ide1-cd0': {'not-inserted': 1, 'removable': 1},
'scsi0-hd0': {'drv': 'raw', 'ro': 1, 'file': 'a.qcow'},
'scsi0-hd1': {'encrypted': 1, 'drv': 'qcow2',
'file': 'enc.qcow2'}}
assert out2 == exp, ("Info block of qemu 1.5 is parsed incorrectly\n%s"
"\n%s" % (out2, exp))
# verify, that booth representation gives the same results
# (qemu-1.5 is less informative so not all params are checked)
for name, params in out2.iteritems():
assert name in out1, ("missing disk '%s' in info-1.5\n%s\n%s"
% (name, out2, out1))
for key, value in params.iteritems():
assert out1[name].get(key, 0) == value, ("value of disk %s "
"mismatch in info-1.5 %s=%s (%s)\n%s\n%s"
% (name, key, value, out1[
name].get(key, 0),
out2, out1))
# Test "query-block" qmp version
monitor.info = lambda _what, _debug: info_qmp
out3 = monitor.info_block()
exp = {'sd0': {'type': 'unknown', 'tray_open': False,
'not-inserted': True, 'removable': True,
'locked': False},
'ide0-hd0': {'bps_rd': 0, 'backing_file_depth': 2,
'removable': False, 'type': 'unknown',
'encrypted': False, 'bps_wr': 0, 'locked': False,
'drv': 'qcow2', 'bps': 0, 'iops': 0,
'io-status': 'ok', 'file': 'c.qcow2',
'iops_rd': 0, 'encryption_key_missing': False,
'ro': False, 'backing_file': 'b.qcow2',
'iops_wr': 0},
'floppy0': {'type': 'unknown', 'tray_open': False,
'not-inserted': True, 'removable': True,
'locked': False},
'ide1-cd0': {'locked': False, 'tray_open': False,
'io-status': 'ok', 'removable': True,
'not-inserted': True, 'type': 'unknown'},
'scsi0-hd0': {'bps_rd': 0, 'backing_file_depth': 0,
'removable': False, 'encrypted': False,
'bps_wr': 0, 'locked': False, 'drv': 'raw',
'bps': 0, 'iops': 0, 'io-status': 'ok',
'file': 'a.qcow', 'iops_rd': 0,
'encryption_key_missing': False, 'ro': True,
'type': 'unknown', 'iops_wr': 0},
'scsi0-hd1': {'bps_rd': 0, 'backing_file_depth': 0,
'removable': False, 'encrypted': True,
'bps_wr': 0, 'locked': False, 'drv': 'qcow2',
'bps': 0, 'iops': 0, 'io-status': 'ok',
'file': 'enc.qcow2', 'iops_rd': 0,
'encryption_key_missing': True, 'ro': False,
'type': 'unknown', 'iops_wr': 0}}
assert out3 == exp, ("QMP query-block of qemu is parsed incorrectly\n"
"%s\n%s" % (out3, exp))
# verify, that booth representation gives the same results
# (qemu-1.4 is less informative so not all params are checked)
for name, params in out1.iteritems():
assert name in out3, ("missing disk '%s' in info-1.5\n%s\n%s"
% (name, out1, out3))
for key, value in params.iteritems():
assert out3[name].get(key, 0) == value, ("value of disk %s "
"mismatch in QMP version %s=%s (%s)\n%s\n%s"
% (name, key, value, out3[
name].get(key, 0),
out1, out3))
if __name__ == "__main__":
unittest.main()
| gpl-2.0 | -8,899,857,938,591,880,000 | 6,605,805,801,194,706,000 | 58.047368 | 221 | 0.394064 | false |
perkinslr/pypyjs | website/js/pypy.js-0.2.0/lib/modules/profile.py | 166 | 22782 | #! /usr/bin/env python
#
# Class for profiling python code. rev 1.0 6/2/94
#
# Written by James Roskind
# Based on prior profile module by Sjoerd Mullender...
# which was hacked somewhat by: Guido van Rossum
"""Class for profiling Python code."""
# Copyright Disney Enterprises, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import sys
import os
import time
import marshal
from optparse import OptionParser
__all__ = ["run", "runctx", "help", "Profile"]
# Sample timer for use with
#i_count = 0
#def integer_timer():
# global i_count
# i_count = i_count + 1
# return i_count
#itimes = integer_timer # replace with C coded timer returning integers
#**************************************************************************
# The following are the static member functions for the profiler class
# Note that an instance of Profile() is *not* needed to call them.
#**************************************************************************
def run(statement, filename=None, sort=-1):
"""Run statement under profiler optionally saving results in filename
This function takes a single argument that can be passed to the
"exec" statement, and an optional file name. In all cases this
routine attempts to "exec" its first argument and gather profiling
statistics from the execution. If no file name is present, then this
function automatically prints a simple profiling report, sorted by the
standard name string (file/line/function-name) that is presented in
each line.
"""
prof = Profile()
try:
prof = prof.run(statement)
except SystemExit:
pass
if filename is not None:
prof.dump_stats(filename)
else:
return prof.print_stats(sort)
def runctx(statement, globals, locals, filename=None, sort=-1):
"""Run statement under profiler, supplying your own globals and locals,
optionally saving results in filename.
statement and filename have the same semantics as profile.run
"""
prof = Profile()
try:
prof = prof.runctx(statement, globals, locals)
except SystemExit:
pass
if filename is not None:
prof.dump_stats(filename)
else:
return prof.print_stats(sort)
# Backwards compatibility.
def help():
print "Documentation for the profile module can be found "
print "in the Python Library Reference, section 'The Python Profiler'."
if hasattr(os, "times"):
def _get_time_times(timer=os.times):
t = timer()
return t[0] + t[1]
# Using getrusage(3) is better than clock(3) if available:
# on some systems (e.g. FreeBSD), getrusage has a higher resolution
# Furthermore, on a POSIX system, returns microseconds, which
# wrap around after 36min.
_has_res = 0
try:
import resource
resgetrusage = lambda: resource.getrusage(resource.RUSAGE_SELF)
def _get_time_resource(timer=resgetrusage):
t = timer()
return t[0] + t[1]
_has_res = 1
except ImportError:
pass
class Profile:
"""Profiler class.
self.cur is always a tuple. Each such tuple corresponds to a stack
frame that is currently active (self.cur[-2]). The following are the
definitions of its members. We use this external "parallel stack" to
avoid contaminating the program that we are profiling. (old profiler
used to write into the frames local dictionary!!) Derived classes
can change the definition of some entries, as long as they leave
[-2:] intact (frame and previous tuple). In case an internal error is
detected, the -3 element is used as the function name.
[ 0] = Time that needs to be charged to the parent frame's function.
It is used so that a function call will not have to access the
timing data for the parent frame.
[ 1] = Total time spent in this frame's function, excluding time in
subfunctions (this latter is tallied in cur[2]).
[ 2] = Total time spent in subfunctions, excluding time executing the
frame's function (this latter is tallied in cur[1]).
[-3] = Name of the function that corresponds to this frame.
[-2] = Actual frame that we correspond to (used to sync exception handling).
[-1] = Our parent 6-tuple (corresponds to frame.f_back).
Timing data for each function is stored as a 5-tuple in the dictionary
self.timings[]. The index is always the name stored in self.cur[-3].
The following are the definitions of the members:
[0] = The number of times this function was called, not counting direct
or indirect recursion,
[1] = Number of times this function appears on the stack, minus one
[2] = Total time spent internal to this function
[3] = Cumulative time that this function was present on the stack. In
non-recursive functions, this is the total execution time from start
to finish of each invocation of a function, including time spent in
all subfunctions.
[4] = A dictionary indicating for each function name, the number of times
it was called by us.
"""
bias = 0 # calibration constant
def __init__(self, timer=None, bias=None):
self.timings = {}
self.cur = None
self.cmd = ""
self.c_func_name = ""
if bias is None:
bias = self.bias
self.bias = bias # Materialize in local dict for lookup speed.
if not timer:
if _has_res:
self.timer = resgetrusage
self.dispatcher = self.trace_dispatch
self.get_time = _get_time_resource
elif hasattr(time, 'clock'):
self.timer = self.get_time = time.clock
self.dispatcher = self.trace_dispatch_i
elif hasattr(os, 'times'):
self.timer = os.times
self.dispatcher = self.trace_dispatch
self.get_time = _get_time_times
else:
self.timer = self.get_time = time.time
self.dispatcher = self.trace_dispatch_i
else:
self.timer = timer
t = self.timer() # test out timer function
try:
length = len(t)
except TypeError:
self.get_time = timer
self.dispatcher = self.trace_dispatch_i
else:
if length == 2:
self.dispatcher = self.trace_dispatch
else:
self.dispatcher = self.trace_dispatch_l
# This get_time() implementation needs to be defined
# here to capture the passed-in timer in the parameter
# list (for performance). Note that we can't assume
# the timer() result contains two values in all
# cases.
def get_time_timer(timer=timer, sum=sum):
return sum(timer())
self.get_time = get_time_timer
self.t = self.get_time()
self.simulate_call('profiler')
# Heavily optimized dispatch routine for os.times() timer
def trace_dispatch(self, frame, event, arg):
timer = self.timer
t = timer()
t = t[0] + t[1] - self.t - self.bias
if event == "c_call":
self.c_func_name = arg.__name__
if self.dispatch[event](self, frame,t):
t = timer()
self.t = t[0] + t[1]
else:
r = timer()
self.t = r[0] + r[1] - t # put back unrecorded delta
# Dispatch routine for best timer program (return = scalar, fastest if
# an integer but float works too -- and time.clock() relies on that).
def trace_dispatch_i(self, frame, event, arg):
timer = self.timer
t = timer() - self.t - self.bias
if event == "c_call":
self.c_func_name = arg.__name__
if self.dispatch[event](self, frame, t):
self.t = timer()
else:
self.t = timer() - t # put back unrecorded delta
# Dispatch routine for macintosh (timer returns time in ticks of
# 1/60th second)
def trace_dispatch_mac(self, frame, event, arg):
timer = self.timer
t = timer()/60.0 - self.t - self.bias
if event == "c_call":
self.c_func_name = arg.__name__
if self.dispatch[event](self, frame, t):
self.t = timer()/60.0
else:
self.t = timer()/60.0 - t # put back unrecorded delta
# SLOW generic dispatch routine for timer returning lists of numbers
def trace_dispatch_l(self, frame, event, arg):
get_time = self.get_time
t = get_time() - self.t - self.bias
if event == "c_call":
self.c_func_name = arg.__name__
if self.dispatch[event](self, frame, t):
self.t = get_time()
else:
self.t = get_time() - t # put back unrecorded delta
# In the event handlers, the first 3 elements of self.cur are unpacked
# into vrbls w/ 3-letter names. The last two characters are meant to be
# mnemonic:
# _pt self.cur[0] "parent time" time to be charged to parent frame
# _it self.cur[1] "internal time" time spent directly in the function
# _et self.cur[2] "external time" time spent in subfunctions
def trace_dispatch_exception(self, frame, t):
rpt, rit, ret, rfn, rframe, rcur = self.cur
if (rframe is not frame) and rcur:
return self.trace_dispatch_return(rframe, t)
self.cur = rpt, rit+t, ret, rfn, rframe, rcur
return 1
def trace_dispatch_call(self, frame, t):
if self.cur and frame.f_back is not self.cur[-2]:
rpt, rit, ret, rfn, rframe, rcur = self.cur
if not isinstance(rframe, Profile.fake_frame):
assert rframe.f_back is frame.f_back, ("Bad call", rfn,
rframe, rframe.f_back,
frame, frame.f_back)
self.trace_dispatch_return(rframe, 0)
assert (self.cur is None or \
frame.f_back is self.cur[-2]), ("Bad call",
self.cur[-3])
fcode = frame.f_code
fn = (fcode.co_filename, fcode.co_firstlineno, fcode.co_name)
self.cur = (t, 0, 0, fn, frame, self.cur)
timings = self.timings
if fn in timings:
cc, ns, tt, ct, callers = timings[fn]
timings[fn] = cc, ns + 1, tt, ct, callers
else:
timings[fn] = 0, 0, 0, 0, {}
return 1
def trace_dispatch_c_call (self, frame, t):
fn = ("", 0, self.c_func_name)
self.cur = (t, 0, 0, fn, frame, self.cur)
timings = self.timings
if fn in timings:
cc, ns, tt, ct, callers = timings[fn]
timings[fn] = cc, ns+1, tt, ct, callers
else:
timings[fn] = 0, 0, 0, 0, {}
return 1
def trace_dispatch_return(self, frame, t):
if frame is not self.cur[-2]:
assert frame is self.cur[-2].f_back, ("Bad return", self.cur[-3])
self.trace_dispatch_return(self.cur[-2], 0)
# Prefix "r" means part of the Returning or exiting frame.
# Prefix "p" means part of the Previous or Parent or older frame.
rpt, rit, ret, rfn, frame, rcur = self.cur
rit = rit + t
frame_total = rit + ret
ppt, pit, pet, pfn, pframe, pcur = rcur
self.cur = ppt, pit + rpt, pet + frame_total, pfn, pframe, pcur
timings = self.timings
cc, ns, tt, ct, callers = timings[rfn]
if not ns:
# This is the only occurrence of the function on the stack.
# Else this is a (directly or indirectly) recursive call, and
# its cumulative time will get updated when the topmost call to
# it returns.
ct = ct + frame_total
cc = cc + 1
if pfn in callers:
callers[pfn] = callers[pfn] + 1 # hack: gather more
# stats such as the amount of time added to ct courtesy
# of this specific call, and the contribution to cc
# courtesy of this call.
else:
callers[pfn] = 1
timings[rfn] = cc, ns - 1, tt + rit, ct, callers
return 1
dispatch = {
"call": trace_dispatch_call,
"exception": trace_dispatch_exception,
"return": trace_dispatch_return,
"c_call": trace_dispatch_c_call,
"c_exception": trace_dispatch_return, # the C function returned
"c_return": trace_dispatch_return,
}
# The next few functions play with self.cmd. By carefully preloading
# our parallel stack, we can force the profiled result to include
# an arbitrary string as the name of the calling function.
# We use self.cmd as that string, and the resulting stats look
# very nice :-).
def set_cmd(self, cmd):
if self.cur[-1]: return # already set
self.cmd = cmd
self.simulate_call(cmd)
class fake_code:
def __init__(self, filename, line, name):
self.co_filename = filename
self.co_line = line
self.co_name = name
self.co_firstlineno = 0
def __repr__(self):
return repr((self.co_filename, self.co_line, self.co_name))
class fake_frame:
def __init__(self, code, prior):
self.f_code = code
self.f_back = prior
def simulate_call(self, name):
code = self.fake_code('profile', 0, name)
if self.cur:
pframe = self.cur[-2]
else:
pframe = None
frame = self.fake_frame(code, pframe)
self.dispatch['call'](self, frame, 0)
# collect stats from pending stack, including getting final
# timings for self.cmd frame.
def simulate_cmd_complete(self):
get_time = self.get_time
t = get_time() - self.t
while self.cur[-1]:
# We *can* cause assertion errors here if
# dispatch_trace_return checks for a frame match!
self.dispatch['return'](self, self.cur[-2], t)
t = 0
self.t = get_time() - t
def print_stats(self, sort=-1):
import pstats
pstats.Stats(self).strip_dirs().sort_stats(sort). \
print_stats()
def dump_stats(self, file):
f = open(file, 'wb')
self.create_stats()
marshal.dump(self.stats, f)
f.close()
def create_stats(self):
self.simulate_cmd_complete()
self.snapshot_stats()
def snapshot_stats(self):
self.stats = {}
for func, (cc, ns, tt, ct, callers) in self.timings.iteritems():
callers = callers.copy()
nc = 0
for callcnt in callers.itervalues():
nc += callcnt
self.stats[func] = cc, nc, tt, ct, callers
# The following two methods can be called by clients to use
# a profiler to profile a statement, given as a string.
def run(self, cmd):
import __main__
dict = __main__.__dict__
return self.runctx(cmd, dict, dict)
def runctx(self, cmd, globals, locals):
self.set_cmd(cmd)
sys.setprofile(self.dispatcher)
try:
exec cmd in globals, locals
finally:
sys.setprofile(None)
return self
# This method is more useful to profile a single function call.
def runcall(self, func, *args, **kw):
self.set_cmd(repr(func))
sys.setprofile(self.dispatcher)
try:
return func(*args, **kw)
finally:
sys.setprofile(None)
#******************************************************************
# The following calculates the overhead for using a profiler. The
# problem is that it takes a fair amount of time for the profiler
# to stop the stopwatch (from the time it receives an event).
# Similarly, there is a delay from the time that the profiler
# re-starts the stopwatch before the user's code really gets to
# continue. The following code tries to measure the difference on
# a per-event basis.
#
# Note that this difference is only significant if there are a lot of
# events, and relatively little user code per event. For example,
# code with small functions will typically benefit from having the
# profiler calibrated for the current platform. This *could* be
# done on the fly during init() time, but it is not worth the
# effort. Also note that if too large a value specified, then
# execution time on some functions will actually appear as a
# negative number. It is *normal* for some functions (with very
# low call counts) to have such negative stats, even if the
# calibration figure is "correct."
#
# One alternative to profile-time calibration adjustments (i.e.,
# adding in the magic little delta during each event) is to track
# more carefully the number of events (and cumulatively, the number
# of events during sub functions) that are seen. If this were
# done, then the arithmetic could be done after the fact (i.e., at
# display time). Currently, we track only call/return events.
# These values can be deduced by examining the callees and callers
# vectors for each functions. Hence we *can* almost correct the
# internal time figure at print time (note that we currently don't
# track exception event processing counts). Unfortunately, there
# is currently no similar information for cumulative sub-function
# time. It would not be hard to "get all this info" at profiler
# time. Specifically, we would have to extend the tuples to keep
# counts of this in each frame, and then extend the defs of timing
# tuples to include the significant two figures. I'm a bit fearful
# that this additional feature will slow the heavily optimized
# event/time ratio (i.e., the profiler would run slower, fur a very
# low "value added" feature.)
#**************************************************************
def calibrate(self, m, verbose=0):
if self.__class__ is not Profile:
raise TypeError("Subclasses must override .calibrate().")
saved_bias = self.bias
self.bias = 0
try:
return self._calibrate_inner(m, verbose)
finally:
self.bias = saved_bias
def _calibrate_inner(self, m, verbose):
get_time = self.get_time
# Set up a test case to be run with and without profiling. Include
# lots of calls, because we're trying to quantify stopwatch overhead.
# Do not raise any exceptions, though, because we want to know
# exactly how many profile events are generated (one call event, +
# one return event, per Python-level call).
def f1(n):
for i in range(n):
x = 1
def f(m, f1=f1):
for i in range(m):
f1(100)
f(m) # warm up the cache
# elapsed_noprofile <- time f(m) takes without profiling.
t0 = get_time()
f(m)
t1 = get_time()
elapsed_noprofile = t1 - t0
if verbose:
print "elapsed time without profiling =", elapsed_noprofile
# elapsed_profile <- time f(m) takes with profiling. The difference
# is profiling overhead, only some of which the profiler subtracts
# out on its own.
p = Profile()
t0 = get_time()
p.runctx('f(m)', globals(), locals())
t1 = get_time()
elapsed_profile = t1 - t0
if verbose:
print "elapsed time with profiling =", elapsed_profile
# reported_time <- "CPU seconds" the profiler charged to f and f1.
total_calls = 0.0
reported_time = 0.0
for (filename, line, funcname), (cc, ns, tt, ct, callers) in \
p.timings.items():
if funcname in ("f", "f1"):
total_calls += cc
reported_time += tt
if verbose:
print "'CPU seconds' profiler reported =", reported_time
print "total # calls =", total_calls
if total_calls != m + 1:
raise ValueError("internal error: total calls = %d" % total_calls)
# reported_time - elapsed_noprofile = overhead the profiler wasn't
# able to measure. Divide by twice the number of calls (since there
# are two profiler events per call in this test) to get the hidden
# overhead per event.
mean = (reported_time - elapsed_noprofile) / 2.0 / total_calls
if verbose:
print "mean stopwatch overhead per profile event =", mean
return mean
#****************************************************************************
def Stats(*args):
print 'Report generating functions are in the "pstats" module\a'
def main():
usage = "profile.py [-o output_file_path] [-s sort] scriptfile [arg] ..."
parser = OptionParser(usage=usage)
parser.allow_interspersed_args = False
parser.add_option('-o', '--outfile', dest="outfile",
help="Save stats to <outfile>", default=None)
parser.add_option('-s', '--sort', dest="sort",
help="Sort order when printing to stdout, based on pstats.Stats class",
default=-1)
if not sys.argv[1:]:
parser.print_usage()
sys.exit(2)
(options, args) = parser.parse_args()
sys.argv[:] = args
if len(args) > 0:
progname = args[0]
sys.path.insert(0, os.path.dirname(progname))
with open(progname, 'rb') as fp:
code = compile(fp.read(), progname, 'exec')
globs = {
'__file__': progname,
'__name__': '__main__',
'__package__': None,
}
runctx(code, globs, None, options.outfile, options.sort)
else:
parser.print_usage()
return parser
# When invoked as main program, invoke the profiler on a script
if __name__ == '__main__':
main()
| mit | 5,001,242,432,097,537,000 | 1,271,687,095,970,572,000 | 36.347541 | 80 | 0.588447 | false |
Arcensoth/cogbot | cogbot/cogs/join_leave/join_leave_server_state.py | 1 | 2346 | from discord import Member, Role
from discord.ext.commands import Context
from cogbot.cogs.abc.base_cog import BaseCogServerState
from cogbot.cogs.join_leave.join_leave_options import JoinLeaveOptions
class JoinLeaveServerState(BaseCogServerState[JoinLeaveOptions]):
async def create_options(self) -> JoinLeaveOptions:
return await JoinLeaveOptions().init(self, self.raw_options)
async def join_role(self, ctx: Context, author: Member, role_alias: str):
try:
role_entry = self.options.role_entry_from_alias[role_alias.lower()]
role = self.bot.get_role(self.server, role_entry.role_id)
await self.bot.add_roles(author, role)
await self.bot.say(f"{author.mention} has joined {role}")
except:
self.log.info(f"{author} failed to join the role: {role_alias}")
await self.bot.react_question(ctx)
async def leave_role(self, ctx: Context, author: Member, role_alias: str):
try:
role_entry = self.options.role_entry_from_alias[role_alias]
role = self.bot.get_role(self.server, role_entry.role_id)
await self.bot.remove_roles(author, role)
await self.bot.say(f"{author.mention} has left {role}")
except:
self.log.info(f"{author} failed to leave the role: {role_alias}")
await self.bot.react_question(ctx)
async def list_roles(self, ctx: Context, author: Member):
role_lines = []
for role_entry in self.options.role_entries:
role: Role = self.bot.get_role(self.server, role_entry.role_id)
role_lines.append(f"{role}")
role_aliases = role_entry.aliases
first_role_alias = role_aliases[0]
other_role_aliases = role_aliases[1:]
role_aliases_line = f" >join {first_role_alias}"
if other_role_aliases:
other_role_aliases_str = " or ".join(
f'"{role_alias}"' for role_alias in other_role_aliases
)
role_aliases_line = f"{role_aliases_line} (or {other_role_aliases_str})"
role_lines.append(role_aliases_line)
roles_str = "\n".join(role_lines)
await self.bot.say(
f"{author.mention} Available self-assignable roles:\n```\n{roles_str}\n```"
)
| mit | 2,710,367,110,225,600,500 | 4,680,485,064,762,498,000 | 45.92 | 88 | 0.6185 | false |
Rahulsharma0810/Scrapy-Python-TheHinduDailyNews | TheHinduDailyNews/settings.py | 1 | 3227 | # -*- coding: utf-8 -*-
# Scrapy settings for TheHinduDailyNews project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'TheHinduDailyNews'
SPIDER_MODULES = ['TheHinduDailyNews.spiders']
NEWSPIDER_MODULE = 'TheHinduDailyNews.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'TheHinduDailyNews (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'TheHinduDailyNews.middlewares.ThehindudailynewsSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'TheHinduDailyNews.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'TheHinduDailyNews.pipelines.SomePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| mit | -5,331,688,487,098,168,000 | 4,239,735,465,186,620,400 | 34.855556 | 109 | 0.771615 | false |
glennrub/micropython | tests/extmod/vfs_lfs.py | 10 | 3777 | # Test for VfsLittle using a RAM device
try:
import uos
uos.VfsLfs1
uos.VfsLfs2
except (ImportError, AttributeError):
print("SKIP")
raise SystemExit
class RAMBlockDevice:
ERASE_BLOCK_SIZE = 1024
def __init__(self, blocks):
self.data = bytearray(blocks * self.ERASE_BLOCK_SIZE)
def readblocks(self, block, buf, off):
addr = block * self.ERASE_BLOCK_SIZE + off
for i in range(len(buf)):
buf[i] = self.data[addr + i]
def writeblocks(self, block, buf, off):
addr = block * self.ERASE_BLOCK_SIZE + off
for i in range(len(buf)):
self.data[addr + i] = buf[i]
def ioctl(self, op, arg):
if op == 4: # block count
return len(self.data) // self.ERASE_BLOCK_SIZE
if op == 5: # block size
return self.ERASE_BLOCK_SIZE
if op == 6: # erase block
return 0
def print_stat(st, print_size=True):
# don't print times (just check that they have the correct type)
print(st[:6], st[6] if print_size else -1, type(st[7]), type(st[8]), type(st[9]))
def test(bdev, vfs_class):
print("test", vfs_class)
# mkfs
vfs_class.mkfs(bdev)
# construction
vfs = vfs_class(bdev)
# statvfs
print(vfs.statvfs("/"))
# open, write close
f = vfs.open("test", "w")
f.write("littlefs")
f.close()
# statvfs after creating a file
print(vfs.statvfs("/"))
# ilistdir
print(list(vfs.ilistdir()))
print(list(vfs.ilistdir("/")))
print(list(vfs.ilistdir(b"/")))
# mkdir, rmdir
vfs.mkdir("testdir")
print(list(vfs.ilistdir()))
print(sorted(list(vfs.ilistdir("testdir"))))
vfs.rmdir("testdir")
print(list(vfs.ilistdir()))
vfs.mkdir("testdir")
# stat a file
print_stat(vfs.stat("test"))
# stat a dir (size seems to vary on LFS2 so don't print that)
print_stat(vfs.stat("testdir"), False)
# read
with vfs.open("test", "r") as f:
print(f.read())
# create large file
with vfs.open("testbig", "w") as f:
data = "large012" * 32 * 16
print("data length:", len(data))
for i in range(4):
print("write", i)
f.write(data)
# stat after creating large file
print(vfs.statvfs("/"))
# rename
vfs.rename("testbig", "testbig2")
print(sorted(list(vfs.ilistdir())))
vfs.chdir("testdir")
vfs.rename("/testbig2", "testbig2")
print(sorted(list(vfs.ilistdir())))
vfs.rename("testbig2", "/testbig2")
vfs.chdir("/")
print(sorted(list(vfs.ilistdir())))
# remove
vfs.remove("testbig2")
print(sorted(list(vfs.ilistdir())))
# getcwd, chdir
vfs.mkdir("/testdir2")
vfs.mkdir("/testdir/subdir")
print(vfs.getcwd())
vfs.chdir("/testdir")
print(vfs.getcwd())
# create file in directory to make sure paths are relative
vfs.open("test2", "w").close()
print_stat(vfs.stat("test2"))
print_stat(vfs.stat("/testdir/test2"))
vfs.remove("test2")
# chdir back to root and remove testdir
vfs.chdir("/")
print(vfs.getcwd())
vfs.chdir("testdir")
print(vfs.getcwd())
vfs.chdir("..")
print(vfs.getcwd())
vfs.chdir("testdir/subdir")
print(vfs.getcwd())
vfs.chdir("../..")
print(vfs.getcwd())
vfs.chdir("/./testdir2")
print(vfs.getcwd())
vfs.chdir("../testdir")
print(vfs.getcwd())
vfs.chdir("../..")
print(vfs.getcwd())
vfs.chdir(".//testdir")
print(vfs.getcwd())
vfs.chdir("subdir/./")
print(vfs.getcwd())
vfs.chdir("/")
print(vfs.getcwd())
vfs.rmdir("testdir/subdir")
vfs.rmdir("testdir")
vfs.rmdir("testdir2")
bdev = RAMBlockDevice(30)
test(bdev, uos.VfsLfs1)
test(bdev, uos.VfsLfs2)
| mit | 8,405,463,046,186,722,000 | 5,053,751,641,921,150,000 | 23.525974 | 85 | 0.582208 | false |
mypinballs/whirlwind | effects.py | 1 | 8263 | # Top Rollover Lanes
__author__="jim"
__date__ ="$Jan 18, 2011 1:36:37 PM$"
import procgame
import locale
from procgame import *
base_path = config.value_for_key_path('base_path')
game_path = base_path+"games/whirlwind/"
class Effects(game.Mode):
def __init__(self, game, priority):
super(Effects, self).__init__(game, priority)
def drive_lamp(self, lamp_name, style='on',time=2):
if style == 'slow':
self.game.lamps[lamp_name].schedule(schedule=0x00ff00ff, cycle_seconds=0, now=True)
elif style == 'medium':
self.game.lamps[lamp_name].schedule(schedule=0x0f0f0f0f, cycle_seconds=0, now=True)
elif style == 'fast':
self.game.lamps[lamp_name].schedule(schedule=0x99999999, cycle_seconds=0, now=True)
elif style == 'superfast':
self.game.lamps[lamp_name].schedule(schedule=0xaaaaaaaa, cycle_seconds=0, now=True)
elif style == 'on':
self.game.lamps[lamp_name].enable()
elif style == 'off':
self.off(lamp_name)
elif style == 'smarton':
self.game.lamps[lamp_name].schedule(schedule=0xaaaaaaaa, cycle_seconds=0, now=True)
self.cancel_delayed(lamp_name+'_on')
self.delay(name=lamp_name+'_on', event_type=None, delay=0.6, handler=self.game.lamps[lamp_name].enable)
elif style == 'timedon':
self.game.lamps[lamp_name].enable()
self.cancel_delayed(lamp_name+'_off')
self.delay(name=lamp_name+'_off', event_type=None, delay=time, handler=self.off,param=lamp_name)
elif style == 'timeout':
if time>10:
self.cancel_delayed(lamp_name+'_medium')
self.delay(name=lamp_name+'_medium', event_type=None, delay=time-10, handler=lambda:self.drive_lamp(lamp_name,'medium'))
if time>5:
self.cancel_delayed(lamp_name+'_fast')
self.delay(name=lamp_name+'_fast', event_type=None, delay=time-5, handler=lambda:self.drive_lamp(lamp_name,'fast'))
if time>1:
self.cancel_delayed(lamp_name+'_superfast')
self.delay(name=lamp_name+'_superfast', event_type=None, delay=time-1, handler=lambda:self.drive_lamp(lamp_name,'superfast'))
self.delay(name=lamp_name+'_off', event_type=None, delay=time, handler=self.off,param=lamp_name)
def clear_lamp_timers(self,lamp_name):
self.cancel_delayed(lamp_name+'_medium')
self.cancel_delayed(lamp_name+'_fast')
self.cancel_delayed(lamp_name+'_superfast')
self.cancel_delayed(lamp_name+'on')
self.cancel_delayed(lamp_name+'_off')
def off(self,lamp_name):
self.clear_lamp_timers(lamp_name)
self.game.lamps[lamp_name].disable()
# def drive_super_fast(self, lamp_name):
# self.game.lamps[lamp_name].schedule(schedule=0x99999999, cycle_seconds=0, now=True)
#
# def drive_fast(self, lamp_name):
# self.game.lamps[lamp_name].schedule(schedule=0x55555555, cycle_seconds=0, now=True)
#
# def drive_medium(self, lamp_name):
# self.game.lamps[lamp_name].schedule(schedule=0x0f0f0f0f, cycle_seconds=0, now=True)
def drive_flasher(self, data, style='medium',cycle=0,time=2):
if isinstance(data, basestring):
flasher_name=data
else:
flasher_name=data[0]
style = data[1]
time = data[2]
if style == 'slow':
self.game.coils[flasher_name].schedule(schedule=0x00003000, cycle_seconds=cycle, now=True)
elif style == 'medium':
self.game.coils[flasher_name].schedule(schedule=0x30003000, cycle_seconds=cycle, now=True)
elif style == 'fast':
self.game.coils[flasher_name].schedule(schedule=0x11111111, cycle_seconds=cycle, now=True)
elif style == 'super':
self.game.coils[flasher_name].schedule(schedule=0x55555555, cycle_seconds=cycle, now=True)
elif style == 'super2':
self.game.coils[flasher_name].schedule(schedule=0x55055055, cycle_seconds=cycle, now=True)
elif style == 'strobe':
self.game.coils[flasher_name].schedule(schedule=0xeeeeeeee, cycle_seconds=cycle, now=True)
elif style == 'chaos':
self.game.coils[flasher_name].schedule(schedule=0x019930AB, cycle_seconds=cycle, now=True)
elif style == 'fade':
self.game.coils[flasher_name].schedule(schedule=0xAAA99933, cycle_seconds=cycle, now=True)
if time>0:
self.delay(name=flasher_name+'_off', event_type=None, delay=time, handler=self.game.coils[flasher_name].disable)
# def strobe_flasher_set(self,flasher_list,time=0.5):
# timer = 0
# for fname in flasher_list:
# self.delay(name=fname+'strobe', event_type=None, delay=timer, handler=self.drive_flasher, param=[fname,'fast',time])
# timer+=time
def strobe_flasher_set(self,flasher_list,time=1,overlap=0.2,repeats=1,enable=True):
timer = 0
for i in range(repeats):
for fname in flasher_list:
if enable:
self.delay(name=fname+'strobe', event_type=None, delay=timer, handler=self.drive_flasher, param=[fname,'fast',time+overlap])
timer+=time
else:
self.cancel_delayed(fname+'strobe')
self.game.coils[fname].disable()
def strobe_controlled_flasher_set(self,flasher_list,time=0.1,overlap=0.2,repeats=1,enable=True):
timer = 0
#playfield flashers
sequence=[]
for j in range(repeats):
sequence += flasher_list
for i in range(len(sequence)):
def flash(i,time,delay):
self.delay(delay=delay,handler=lambda:self.game.switched_coils.drive(name=sequence[i],style='fast',time=time+0.1))
flash(i,time,timer)
timer+=time
def drive_led(self,lamp_name,colour):
if colour=='red':
self.led_colour_data(lamp_name,'on','off','off')
elif colour=='pink':
self.led_colour_data(lamp_name,'on','off','med')
elif colour=='magenta':
self.led_colour_data(lamp_name,'on','off','on')
elif colour=='purple':
self.led_colour_data(lamp_name,'med','off','on')
elif colour=='skyblue':
self.led_colour_data(lamp_name,'off','med','on')
elif colour=='blue':
self.led_colour_data(lamp_name,'off','off','on')
elif colour=='cyan':
self.led_colour_data(lamp_name,'off','on','on')
elif colour=='turquoise':
self.led_colour_data(lamp_name,'off','on','med')
elif colour=='green':
self.led_colour_data(lamp_name,'off','on','off')
elif colour=='limegreen':
self.led_colour_data(lamp_name,'med','on','off')
elif colour=='yellow':
self.led_colour_data(lamp_name,'on','on','off')
elif colour=='orange':
self.led_colour_data(lamp_name,'on','med','off')
elif colour=='white':
self.led_colour_data(lamp_name,'on','on','on')
elif colour=='black':
self.led_colour_data(lamp_name,'off','off','off')
def led_colour_data(self,lamp_name,red,blue,green):
data=[red,green,blue]
name=['Red','Green','Blue']
for i in range(len(data)):
if data[i]=='off':
self.game.lamps[lamp_name+name[i]].disable()
elif data[i]=='on':
self.game.lamps[lamp_name+name[i]].enable()
elif data[i]=='med':
self.game.lamps[lamp_name+name[i]].schedule(schedule=0x80808080, cycle_seconds=0, now=True)
# self.game.lamps[lamp_name+name[i]].patter()
| gpl-3.0 | 5,570,874,081,880,066,000 | 6,633,811,232,838,052,000 | 44.15847 | 148 | 0.563839 | false |
aayush2911/Fibonaccio | web2py/gluon/contrib/memcache/__init__.py | 40 | 3734 | from gluon.contrib.memcache.memcache import Client
from gluon.cache import CacheAbstract
import time
"""
examle of usage:
cache.memcache = MemcacheClient(request,[127.0.0.1:11211],debug=true)
"""
import cPickle as pickle
import thread
from gluon import current
DEFAULT_TIME_EXPIRE = 300 # seconds (must be the same as cache.ram)
def MemcacheClient(*a, **b):
if not hasattr(current,'__memcache_client'):
current.__memcache_client = MemcacheClientObj(*a, **b)
return current.__memcache_client
class MemcacheClientObj(Client):
meta_storage = {}
max_time_expire = 24*3600
def __init__(self, request, servers, debug=0, pickleProtocol=0,
pickler=pickle.Pickler, unpickler=pickle.Unpickler,
pload=None, pid=None,
default_time_expire = DEFAULT_TIME_EXPIRE):
self.request=request
self.default_time_expire = default_time_expire
if request:
app = request.application
else:
app = ''
Client.__init__(self, servers, debug, pickleProtocol,
pickler, unpickler, pload, pid)
if not app in self.meta_storage:
self.storage = self.meta_storage[app] = {
CacheAbstract.cache_stats_name: {
'hit_total': 0,
'misses': 0,
}}
else:
self.storage = self.meta_storage[app]
def __call__(self, key, f, time_expire = 'default'):
if time_expire == 'default':
time_expire = self.default_time_expire
if time_expire == None:
time_expire = self.max_time_expire
# this must be commented because get and set are redefined
# key = self.__keyFormat__(key)
now = time.time()
value = None
if f is None: # force deletion of value
self.delete(key)
return None
elif time_expire==0: # value forced expired
item = None # value to be computed
else:
item = self.get(key)
if item:
if not isinstance(item,(list,tuple)):
value = item
elif (item[0] < now - time_expire): # value expired
item = None # value to be computed
else:
value = item[1]
if not item:
value = f()
self.set(key, (now,value), self.max_time_expire)
return value
def increment(self, key, value=1, time_expire='default'):
""" time_expire is ignored """
if time_expire == 'default':
time_expire = self.default_time_expire
newKey = self.__keyFormat__(key)
obj = Client.get(self, newKey)
if obj:
if isinstance(obj,(int,float,long)):
return Client.incr(self, newKey, value)
else:
value += obj[1]
Client.set(self,newKey,(time.time(),value),
self.max_time_expire)
return value
else:
Client.set(self, newKey, value, self.max_time_expire)
return value
def set(self, key, value, time_expire='default'):
if time_expire == 'default':
time_expire = self.default_time_expire
newKey = self.__keyFormat__(key)
return Client.set(self, newKey, value, time_expire)
def get(self, key):
newKey = self.__keyFormat__(key)
return Client.get(self, newKey)
def delete(self, key):
newKey = self.__keyFormat__(key)
return Client.delete(self, newKey)
def __keyFormat__(self, key):
return '%s/%s' % (self.request.application, key.replace(' ', '_'))
| gpl-2.0 | -4,524,009,200,215,444,000 | 8,434,786,697,373,565,000 | 32.63964 | 74 | 0.551152 | false |
TheTypoMaster/my-vim-set-mac | .vim/bundle/YouCompleteMe/third_party/ycmd/third_party/bottle/test/test_importhook.py | 50 | 1358 | # -*- coding: utf-8 -*-
import unittest
import sys, os
import imp
class TestImportHooks(unittest.TestCase):
def make_module(self, name, **args):
mod = sys.modules.setdefault(name, imp.new_module(name))
mod.__file__ = '<virtual %s>' % name
mod.__dict__.update(**args)
return mod
def test_direkt_import(self):
mod = self.make_module('bottle_test')
import bottle.ext.test
self.assertEqual(bottle.ext.test, mod)
def test_from_import(self):
mod = self.make_module('bottle_test')
from bottle.ext import test
self.assertEqual(test, mod)
def test_data_import(self):
mod = self.make_module('bottle_test', item='value')
from bottle.ext.test import item
self.assertEqual(item, 'value')
def test_import_fail(self):
''' Test a simple static page with this server adapter. '''
def test():
import bottle.ext.doesnotexist
self.assertRaises(ImportError, test)
def test_ext_isfile(self):
''' The virtual module needs a valid __file__ attribute.
If not, the Google app engine development server crashes on windows.
'''
from bottle import ext
self.assertTrue(os.path.isfile(ext.__file__))
if __name__ == '__main__': #pragma: no cover
unittest.main()
| gpl-2.0 | 7,777,472,773,247,802,000 | -9,101,183,282,476,660,000 | 30.581395 | 80 | 0.607511 | false |
Cisco-Talos/pyrebox | volatility/setup.py | 12 | 3606 | #!/usr/bin/env python
# Volatility
#
# Authors:
# AAron Walters <[email protected]>
# Mike Auty <[email protected]>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import volatility.constants
import sys
import os
py2exe_available = True
try:
import py2exe #pylint: disable-msg=W0611,F0401
except ImportError:
py2exe_available = False
def find_files(topdirs, py = False):
"""Lists all python files under any topdir from the topdirs lists.
Returns an appropriate list for data_files,
with source and destination directories the same"""
ret = []
for topdir in topdirs:
for r, _ds, fs in os.walk(topdir):
ret.append((r, [ os.path.join(r, f) for f in fs if (f.endswith('.py') or not py)]))
return ret
opts = {}
opts['name'] = "volatility"
opts['version'] = volatility.constants.VERSION
opts['description'] = "Volatility -- Volatile memory framework"
opts['author'] = "AAron Walters"
opts['author_email'] = "[email protected]"
opts['url'] = "http://www.volatilityfoundation.org"
opts['license'] = "GPL"
opts['scripts'] = ["vol.py"]
opts['packages'] = ["volatility",
"volatility.win32",
"volatility.renderers",
"volatility.plugins",
"volatility.plugins.addrspaces",
"volatility.plugins.overlays",
"volatility.plugins.overlays.windows",
"volatility.plugins.overlays.linux",
"volatility.plugins.overlays.mac",
"volatility.plugins.gui",
"volatility.plugins.gui.vtypes",
"volatility.plugins.linux",
"volatility.plugins.registry",
"volatility.plugins.malware",
"volatility.plugins.mac"]
opts['data_files'] = find_files(['contrib'], py = True) + find_files(['tools'])
if py2exe_available:
py2exe_distdir = 'dist/py2exe'
opts['console'] = [{ 'script': 'vol.py',
'icon_resources': [(1, 'resources/volatility.ico')]
}]
# Optimize must be 1 for plugins that use docstring for the help value,
# otherwise the help gets optimized out
opts['options'] = {'py2exe':{'optimize': 1,
'dist_dir': py2exe_distdir,
'packages': opts['packages'] + ['socket', 'ctypes', 'Crypto.Cipher', 'urllib', 'distorm3', 'yara', 'xml.etree.ElementTree'],
# This, along with zipfile = None, ensures a single binary
'bundle_files': 1,
}
}
opts['zipfile'] = None
distrib = setup(**opts) #pylint: disable-msg=W0142
if 'py2exe' in sys.argv:
# Any py2exe specific files or things that need doing can go in here
pass
| gpl-2.0 | -647,238,307,956,128,100 | -1,598,956,959,320,410,000 | 36.175258 | 157 | 0.606489 | false |
lilmuck/lilmuck | plugin.video.szenestreams/default.py | 1 | 6874 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib,urllib2,re,xbmcaddon,xbmcplugin,xbmcgui,xbmc,HTMLParser
from stream import *
htmlparser = HTMLParser.HTMLParser()
pluginhandle = int(sys.argv[1])
itemcnt = 0
baseurl = 'http://www.szene-streams.com'
settings = xbmcaddon.Addon(id='plugin.video.szene-streams')
maxitems = (int(settings.getSetting("items_per_page"))+1)*10
filterUnknownHoster = settings.getSetting("filterUnknownHoster") == 'true'
forceMovieViewMode = settings.getSetting("forceMovieViewMode") == 'true'
movieViewMode = str(settings.getSetting("movieViewMode"))
dbg = False
def CATEGORIES():
data = getUrl(baseurl)
cats = re.findall('<a[^>]*?class="CatInf"[^>]*?href="(.*?)"[^>]*?>.*?<div class="CatNumInf">(.*?)</div>[^<]*?<div[^>]*?class="CatNameInf">(.*?)</div>', data, re.S|re.I)
addDir('Letzte Updates', baseurl, 1, '', True)
addDir('Serien', baseurl + '/load', 0, '', True)
for (url, num, name) in cats:
if 'http:' not in url: url = baseurl + url
addDir(name + ' [COLOR=blue](' + num + ')[/COLOR]', url, 1, '', True)
xbmc.executebuiltin("Container.SetViewMode(400)")
def SERIES(url):
data = getUrl(url)
cats = re.findall('<a[^>]*?class="CatInf"[^>]*?href="(.*?)"[^>]*?>.*?<div class="CatNumInf">(.*?)</div>[^<]*?<div[^>]*?class="CatNameInf">(.*?)</div>', data, re.S|re.I)
addDir('Letzte Updates', baseurl + '/load/0-1', 1, '', True)
for (url, num, name) in cats:
if 'http:' not in url: url = baseurl + url
addDir(name + ' [COLOR=blue](' + num + ')[/COLOR]', url, 1, '', True)
xbmc.executebuiltin("Container.SetViewMode(400)")
def INDEX(url):
global itemcnt
nextPageUrl = re.sub('-[\d]+$', '', url)
print url
data = getUrl(url)
movies = re.findall('<div class="ImgWrapNews">[^<]*<a[^<]*<img[^>]*src="([^"]*.[jpg|png])"[^>]*alt="([^"]*)"[^>]*>.*?class="[^"]*entryLink[^"]*".*?href="([^"]*)"', data, re.S|re.I)
if movies:
for (image, title, url) in movies:
if 'http:' not in url: url = baseurl + url
addDir(clean(title), url, 2, image, True)
itemcnt = itemcnt + 1
nextPage = re.findall('<a class="swchItem"[^>]*onclick="spages\(\'(\d+)\'[^>]*?"[^>]*><span>»</span>', data, re.S)
if nextPage:
if itemcnt >= maxitems:
addDir('Weiter >>', nextPageUrl + '-' + nextPage[0], 1, '', True)
else:
INDEX(nextPageUrl + '-' + nextPage[0])
if forceMovieViewMode: xbmc.executebuiltin("Container.SetViewMode(" + movieViewMode + ")")
def VIDEOLINKS(url, image):
data = getUrl(url)
streams = []
raw = re.findall('(<fieldset[^>]*>[^<]*<legend>.*?</fieldset>)', data, re.S)
if raw:
for each in raw:
series = re.findall('<div class="spoiler"><font[^>]*><b[^>]*>(.+?)</b>(.*?)<input', each, re.S|re.I)
if not series: series = re.findall('<legend>(.+?)</legend>[^<]*<div class="spoiler">(.*?)<input', each, re.S|re.I)
if not series: series = re.findall('<legend>(.+?)</legend>.*?(<iframe.*?</iframe>|<a[^>]*href=".+"[^>]*>).*', each, re.S|re.I)
if series:
for ser in series:
for (s, n) in re.findall('<a[^>]*href="([^"]+)"[^>]*>([^<]*)<', each, re.S|re.I):
if dbg: print 'ser1'
if ser: n = clean(ser[1]) + ' ' + extractFilename(s)
n = clean(n) if n else extractFilename(s)
if n: streams += [(n, s)]
for s in re.findall('<iframe[^>]*src="([^"]*)"[^>]*>', each, re.S|re.I):
if dbg: print 'ser2'
if ser: n = clean(ser[1])
if not n: n = 'unknown'
if n: streams += [(n, s)]
elif re.match('.*?iframe.*?src.*', each, re.S|re.I):
if dbg: print 'nonser1'
streams += re.findall('<font[^>]*>.*?src=".*?/player/(.*?)\..{3}".*?<iframe.*?src=["|\'](.*?)["|\']', each, re.S|re.I)
else:
if dbg: print 'nonser2'
streams += re.findall('<font[^>]*>.*?src=".*?/player/(.*?)\..{3}".*?</font>.*?target="_blank" href=["|\'](.*?)["|\']', each, re.S|re.I)
if streams:
for (filename, stream) in streams:
hoster = get_stream_link().get_hostername(stream)
if filterUnknownHoster and hoster == 'Not Supported': continue
entry = '[COLOR=blue](' + hoster + ')[/COLOR] ' + filename
addLink(entry, clean(stream), 3, image)
def clean(s):
try: s = htmlparser.unescape(s)
except: print "could not unescape string '%s'"%(s)
s = re.sub('<[^>]*>', '', s)
s = s.replace('_', ' ')
s = re.sub('[ ]+', ' ', s)
for hit in set(re.findall("&#\d+;", s)):
try: s = s.replace(hit, unichr(int(hit[2:-1])))
except ValueError: pass
return s.strip('\n').strip()
def extractFilename(path):
path = re.sub('^.*/', '',clean(path)).replace('.html', '').replace('_', ' ')
return re.sub('\.[a-zA-Z]{3}', '', path)
def GETLINK(url):
stream_url = get_stream_link().get_stream(url)
if stream_url:
if re.match('^Error: ', stream_url, re.S|re.I):
xbmc.executebuiltin("XBMC.Notification(Fehler!, " + re.sub('^Error: ','',stream_url) + ", 4000)")
else:
listitem = xbmcgui.ListItem(path=stream_url)
return xbmcplugin.setResolvedUrl(pluginhandle, True, listitem)
def getUrl(url):
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
response = urllib2.urlopen(req)
data = response.read()
response.close()
return data
def get_params():
param=[]
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
def addLink(name, url, mode, image):
u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)
liz = xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=image)
liz.setInfo( type="Video", infoLabels={ "Title": name } )
liz.setProperty('IsPlayable', 'true')
return xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz)
def addDir(name, url, mode, image, is_folder=False):
u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&image="+urllib.quote_plus(image)
liz = xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=image)
liz.setInfo( type="Video", infoLabels={ "Title": name } )
return xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz, isFolder=is_folder)
params = get_params()
url = mode = image = None
try: url = urllib.unquote_plus(params["url"])
except: pass
try: mode = int(params["mode"])
except: pass
try: image = urllib.unquote_plus(params["image"])
except: pass
if mode==None or url==None or len(url)<1: CATEGORIES()
elif mode==0: SERIES(url)
elif mode==1: INDEX(url)
elif mode==2: VIDEOLINKS(url, image)
elif mode==3: GETLINK(url)
xbmcplugin.endOfDirectory(int(sys.argv[1])) | gpl-2.0 | 4,237,270,708,953,784,000 | 1,593,857,588,465,617,000 | 40.167665 | 181 | 0.608816 | false |
tseaver/gcloud-python | videointelligence/nox.py | 1 | 2462 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import nox
@nox.session
def default(session):
return unit(session, 'default')
@nox.session
@nox.parametrize('py', ['2.7', '3.5', '3.6', '3.7'])
def unit(session, py):
"""Run the unit test suite."""
# Run unit tests against all supported versions of Python.
if py != 'default':
session.interpreter = 'python{}'.format(py)
# Set the virtualenv directory name.
session.virtualenv_dirname = 'unit-' + py
# Install all test dependencies, then install this package in-place.
session.install('pytest')
session.install('-e', '.')
# Run py.test against the unit tests.
session.run('py.test', '--quiet', os.path.join('tests', 'unit'))
# TODO: Fix generated system tests
#@nox.session
#@nox.parametrize('py', ['2.7', '3.7'])
#def system(session, py):
# """Run the system test suite."""
#
# # Sanity check: Only run system tests if the environment variable is set.
# if not os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', ''):
# session.skip('Credentials must be set via environment variable.')
#
# # Run unit tests against all supported versions of Python.
# session.interpreter = 'python{}'.format(py)
#
# # Set the virtualenv dirname.
# session.virtualenv_dirname = 'sys-' + py
#
# # Install all test dependencies, then install this package in-place.
# session.install('pytest')
# session.install('-e', '.')
#
# # Run py.test against the unit tests.
# session.run('py.test', '--quiet', os.path.join('tests', 'system'),
# *session.posargs)
@nox.session
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
session.interpreter = 'python3.6'
session.install('docutils', 'pygments')
session.run('python', 'setup.py', 'check', '--restructuredtext',
'--strict')
| apache-2.0 | 9,098,995,854,922,876,000 | 1,060,864,091,102,827,000 | 30.974026 | 78 | 0.669374 | false |
andrewcmyers/tensorflow | tensorflow/python/kernel_tests/metrics_test.py | 28 | 139808 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.data_flow_grad # pylint: disable=unused-import
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
NAN = float('nan')
def _enqueue_vector(sess, queue, values, shape=None):
if not shape:
shape = (1, len(values))
dtype = queue.dtypes[0]
sess.run(
queue.enqueue(constant_op.constant(
values, dtype=dtype, shape=shape)))
def _binary_2d_label_to_2d_sparse_value(labels):
"""Convert dense 2D binary indicator to sparse ID.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator, shape [batch_size, num_classes].
Returns:
`SparseTensorValue` of shape [batch_size, num_classes], where num_classes
is the number of `1` values in each row of `labels`. Values are indices
of `1` values along the last dimension of `labels`.
"""
indices = []
values = []
batch = 0
for row in labels:
label = 0
xi = 0
for x in row:
if x == 1:
indices.append([batch, xi])
values.append(label)
xi += 1
else:
assert x == 0
label += 1
batch += 1
shape = [len(labels), len(labels[0])]
return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64),
np.array(values, np.int64), np.array(shape, np.int64))
def _binary_2d_label_to_1d_sparse_value(labels):
"""Convert dense 2D binary indicator to sparse ID.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator, shape [batch_size, num_classes]. Each
row must contain exactly 1 `1` value.
Returns:
`SparseTensorValue` of shape [batch_size]. Values are indices of `1` values
along the last dimension of `labels`.
Raises:
ValueError: if there is not exactly 1 `1` value per row of `labels`.
"""
indices = []
values = []
batch = 0
for row in labels:
label = 0
xi = 0
for x in row:
if x == 1:
indices.append([batch])
values.append(label)
xi += 1
else:
assert x == 0
label += 1
batch += 1
if indices != [[i] for i in range(len(labels))]:
raise ValueError('Expected 1 label/example, got %s.' % indices)
shape = [len(labels)]
return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64),
np.array(values, np.int64), np.array(shape, np.int64))
def _binary_3d_label_to_sparse_value(labels):
"""Convert dense 3D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensorValue` whose values are indices along the last dimension of
`labels`.
"""
indices = []
values = []
for d0, labels_d0 in enumerate(labels):
for d1, labels_d1 in enumerate(labels_d0):
d2 = 0
for class_id, label in enumerate(labels_d1):
if label == 1:
values.append(class_id)
indices.append([d0, d1, d2])
d2 += 1
else:
assert label == 0
shape = [len(labels), len(labels[0]), len(labels[0][0])]
return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64),
np.array(values, np.int64), np.array(shape, np.int64))
def _assert_nan(test_case, actual):
test_case.assertTrue(math.isnan(actual), 'Expected NAN, got %s.' % actual)
def _assert_local_variables(test_case, expected):
test_case.assertEquals(
set(expected), set(v.name for v in variables.local_variables()))
def _test_values(shape):
return np.reshape(np.cumsum(np.ones(shape)), newshape=shape)
class MeanTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.mean(array_ops.ones([4, 3]))
_assert_local_variables(self, ('mean/count:0', 'mean/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.mean(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testBasic(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.mean(values)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAlmostEqual(1.65, sess.run(mean), 5)
def testUpdateOpsReturnsCurrentValue(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.mean(values)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op), 5)
self.assertAlmostEqual(1.475, sess.run(update_op), 5)
self.assertAlmostEqual(12.4 / 6.0, sess.run(update_op), 5)
self.assertAlmostEqual(1.65, sess.run(update_op), 5)
self.assertAlmostEqual(1.65, sess.run(mean), 5)
def testUnweighted(self):
values = _test_values((3, 2, 4, 1))
mean_results = (
metrics.mean(values),
metrics.mean(values, weights=1.0),
metrics.mean(values, weights=np.ones((1, 1, 1))),
metrics.mean(values, weights=np.ones((1, 1, 1, 1))),
metrics.mean(values, weights=np.ones((1, 1, 1, 1, 1))),
metrics.mean(values, weights=np.ones((1, 1, 4))),
metrics.mean(values, weights=np.ones((1, 1, 4, 1))),
metrics.mean(values, weights=np.ones((1, 2, 1))),
metrics.mean(values, weights=np.ones((1, 2, 1, 1))),
metrics.mean(values, weights=np.ones((1, 2, 4))),
metrics.mean(values, weights=np.ones((1, 2, 4, 1))),
metrics.mean(values, weights=np.ones((3, 1, 1))),
metrics.mean(values, weights=np.ones((3, 1, 1, 1))),
metrics.mean(values, weights=np.ones((3, 1, 4))),
metrics.mean(values, weights=np.ones((3, 1, 4, 1))),
metrics.mean(values, weights=np.ones((3, 2, 1))),
metrics.mean(values, weights=np.ones((3, 2, 1, 1))),
metrics.mean(values, weights=np.ones((3, 2, 4))),
metrics.mean(values, weights=np.ones((3, 2, 4, 1))),
metrics.mean(values, weights=np.ones((3, 2, 4, 1, 1))),)
expected = np.mean(values)
with self.test_session():
variables.local_variables_initializer().run()
for mean_result in mean_results:
mean, update_op = mean_result
self.assertAlmostEqual(expected, update_op.eval())
self.assertAlmostEqual(expected, mean.eval())
def _test_3d_weighted(self, values, weights):
expected = (
np.sum(np.multiply(weights, values)) /
np.sum(np.multiply(weights, np.ones_like(values)))
)
mean, update_op = metrics.mean(values, weights=weights)
with self.test_session():
variables.local_variables_initializer().run()
self.assertAlmostEqual(expected, update_op.eval(), places=5)
self.assertAlmostEqual(expected, mean.eval(), places=5)
def test1x1x1Weighted(self):
self._test_3d_weighted(
_test_values((3, 2, 4)),
weights=np.asarray((5,)).reshape((1, 1, 1)))
def test1x1xNWeighted(self):
self._test_3d_weighted(
_test_values((3, 2, 4)),
weights=np.asarray((5, 7, 11, 3)).reshape((1, 1, 4)))
def test1xNx1Weighted(self):
self._test_3d_weighted(
_test_values((3, 2, 4)),
weights=np.asarray((5, 11)).reshape((1, 2, 1)))
def test1xNxNWeighted(self):
self._test_3d_weighted(
_test_values((3, 2, 4)),
weights=np.asarray((5, 7, 11, 3, 2, 13, 7, 5)).reshape((1, 2, 4)))
def testNx1x1Weighted(self):
self._test_3d_weighted(
_test_values((3, 2, 4)),
weights=np.asarray((5, 7, 11)).reshape((3, 1, 1)))
def testNx1xNWeighted(self):
self._test_3d_weighted(
_test_values((3, 2, 4)),
weights=np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3)).reshape((3, 1, 4)))
def testNxNxNWeighted(self):
self._test_3d_weighted(
_test_values((3, 2, 4)),
weights=np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3,
2, 17, 11, 3, 5, 7, 11, 3, 2, 12, 7, 5)).reshape((3, 2, 4)))
def testInvalidWeights(self):
values_placeholder = array_ops.placeholder(dtype=dtypes_lib.float32)
values = _test_values((3, 2, 4, 1))
invalid_weights = (
(1,),
(1, 1),
(3, 2),
(2, 4, 1),
(4, 2, 4, 1),
(3, 3, 4, 1),
(3, 2, 5, 1),
(3, 2, 4, 2),
(1, 1, 1, 1, 1))
expected_error_msg = 'weights can not be broadcast to values'
for invalid_weight in invalid_weights:
# Static shapes.
with self.assertRaisesRegexp(ValueError, expected_error_msg):
metrics.mean(values, invalid_weight)
# Dynamic shapes.
with self.assertRaisesRegexp(errors_impl.OpError, expected_error_msg):
with self.test_session():
_, update_op = metrics.mean(values_placeholder, invalid_weight)
variables.local_variables_initializer().run()
update_op.eval(feed_dict={values_placeholder: values})
class MeanTensorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.mean_tensor(array_ops.ones([4, 3]))
_assert_local_variables(self, ('mean/total_tensor:0',
'mean/count_tensor:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.mean_tensor(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean_tensor(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testBasic(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.mean_tensor(values)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(mean))
def testMultiDimensional(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(2, 2, 2))
_enqueue_vector(
sess,
values_queue, [[[1, 2], [1, 2]], [[1, 2], [1, 2]]],
shape=(2, 2, 2))
_enqueue_vector(
sess,
values_queue, [[[1, 2], [1, 2]], [[3, 4], [9, 10]]],
shape=(2, 2, 2))
values = values_queue.dequeue()
mean, update_op = metrics.mean_tensor(values)
sess.run(variables.local_variables_initializer())
for _ in range(2):
sess.run(update_op)
self.assertAllClose([[[1, 2], [1, 2]], [[2, 3], [5, 6]]], sess.run(mean))
def testUpdateOpsReturnsCurrentValue(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.mean_tensor(values)
sess.run(variables.local_variables_initializer())
self.assertAllClose([[0, 1]], sess.run(update_op), 5)
self.assertAllClose([[-2.1, 5.05]], sess.run(update_op), 5)
self.assertAllClose([[2.3 / 3., 10.1 / 3.]], sess.run(update_op), 5)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(update_op), 5)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(mean), 5)
def testWeighted1d(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
weights = weights_queue.dequeue()
mean, update_op = metrics.mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[3.25, 0.5]], sess.run(mean), 5)
def testWeighted2d_1(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[-2.1, 0.5]], sess.run(mean), 5)
def testWeighted2d_2(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[0, 0.5]], sess.run(mean), 5)
class AccuracyTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
name='my_accuracy')
_assert_local_variables(self, ('my_accuracy/count:0',
'my_accuracy/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones((10, 3))
labels = array_ops.ones((10, 4))
with self.assertRaises(ValueError):
metrics.accuracy(labels, predictions)
def testPredictionsAndWeightsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones((10, 3))
labels = array_ops.ones((10, 3))
weights = array_ops.ones((9, 3))
with self.assertRaises(ValueError):
metrics.accuracy(labels, predictions, weights)
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=3, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=3, dtype=dtypes_lib.int64, seed=1)
accuracy, update_op = metrics.accuracy(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_accuracy = accuracy.eval()
for _ in range(10):
self.assertEqual(initial_accuracy, accuracy.eval())
def testMultipleUpdates(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
labels = labels_queue.dequeue()
accuracy, update_op = metrics.accuracy(labels, predictions)
sess.run(variables.local_variables_initializer())
for _ in xrange(3):
sess.run(update_op)
self.assertEqual(0.5, sess.run(update_op))
self.assertEqual(0.5, accuracy.eval())
def testEffectivelyEquivalentSizes(self):
predictions = array_ops.ones((40, 1))
labels = array_ops.ones((40,))
with self.test_session() as sess:
accuracy, update_op = metrics.accuracy(labels, predictions)
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, update_op.eval())
self.assertEqual(1.0, accuracy.eval())
def testEffectivelyEquivalentSizesWithScalarWeight(self):
predictions = array_ops.ones((40, 1))
labels = array_ops.ones((40,))
with self.test_session() as sess:
accuracy, update_op = metrics.accuracy(labels, predictions, weights=2.0)
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, update_op.eval())
self.assertEqual(1.0, accuracy.eval())
def testEffectivelyEquivalentSizesWithStaticShapedWeight(self):
predictions = ops.convert_to_tensor([1, 1, 1]) # shape 3,
labels = array_ops.expand_dims(ops.convert_to_tensor([1, 0, 0]),
1) # shape 3, 1
weights = array_ops.expand_dims(ops.convert_to_tensor([100, 1, 1]),
1) # shape 3, 1
with self.test_session() as sess:
accuracy, update_op = metrics.accuracy(labels, predictions, weights)
sess.run(variables.local_variables_initializer())
# if streaming_accuracy does not flatten the weight, accuracy would be
# 0.33333334 due to an intended broadcast of weight. Due to flattening,
# it will be higher than .95
self.assertGreater(update_op.eval(), .95)
self.assertGreater(accuracy.eval(), .95)
def testEffectivelyEquivalentSizesWithDynamicallyShapedWeight(self):
predictions = ops.convert_to_tensor([1, 1, 1]) # shape 3,
labels = array_ops.expand_dims(ops.convert_to_tensor([1, 0, 0]),
1) # shape 3, 1
weights = [[100], [1], [1]] # shape 3, 1
weights_placeholder = array_ops.placeholder(
dtype=dtypes_lib.int32, name='weights')
feed_dict = {weights_placeholder: weights}
with self.test_session() as sess:
accuracy, update_op = metrics.accuracy(labels, predictions,
weights_placeholder)
sess.run(variables.local_variables_initializer())
# if streaming_accuracy does not flatten the weight, accuracy would be
# 0.33333334 due to an intended broadcast of weight. Due to flattening,
# it will be higher than .95
self.assertGreater(update_op.eval(feed_dict=feed_dict), .95)
self.assertGreater(accuracy.eval(feed_dict=feed_dict), .95)
def testMultipleUpdatesWithWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
labels = labels_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.int64, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
weights = weights_queue.dequeue()
accuracy, update_op = metrics.accuracy(labels, predictions, weights)
sess.run(variables.local_variables_initializer())
for _ in xrange(3):
sess.run(update_op)
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, accuracy.eval())
class PrecisionTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.precision(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_local_variables(self, ('precision/false_positives/count:0',
'precision/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
precision, update_op = metrics.precision(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_precision = precision.eval()
for _ in range(10):
self.assertEqual(initial_precision, precision.eval())
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs)
labels = constant_op.constant(inputs)
precision, update_op = metrics.precision(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op))
self.assertAlmostEqual(1, precision.eval())
def testSomeCorrect_multipleInputDtypes(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant([1, 0, 1, 0], shape=(1, 4)), dtype=dtype)
labels = math_ops.cast(
constant_op.constant([0, 1, 1, 0], shape=(1, 4)), dtype=dtype)
precision, update_op = metrics.precision(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, precision.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
precision, update_op = metrics.precision(
labels, predictions, weights=constant_op.constant([[2], [5]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 2.0 + 5.0
weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, precision.eval())
def testWeightedScalar_placeholders(self):
predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.precision(labels, predictions, weights=2)
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 2.0 + 2.0
weighted_positives = (2.0 + 2.0) + (2.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
def testWeighted1d_placeholders(self):
predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.precision(
labels, predictions, weights=constant_op.constant([[2], [5]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 2.0 + 5.0
weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
precision, update_op = metrics.precision(
labels,
predictions,
weights=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, precision.eval())
def testWeighted2d_placeholders(self):
predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.precision(
labels,
predictions,
weights=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs)
labels = constant_op.constant(1 - inputs)
precision, update_op = metrics.precision(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0, precision.eval())
def testZeroTrueAndFalsePositivesGivesZeroPrecision(self):
predictions = constant_op.constant([0, 0, 0, 0])
labels = constant_op.constant([0, 0, 0, 0])
precision, update_op = metrics.precision(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0.0, precision.eval())
class RecallTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.recall(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_local_variables(self, ('recall/false_negatives/count:0',
'recall/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
recall, update_op = metrics.recall(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_recall = recall.eval()
for _ in range(10):
self.assertEqual(initial_recall, recall.eval())
def testAllCorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(np_inputs)
recall, update_op = metrics.recall(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, recall.eval())
def testSomeCorrect_multipleInputDtypes(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant([1, 0, 1, 0], shape=(1, 4)), dtype=dtype)
labels = math_ops.cast(
constant_op.constant([0, 1, 1, 0], shape=(1, 4)), dtype=dtype)
recall, update_op = metrics.recall(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, recall.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[2], [5]])
recall, update_op = metrics.recall(labels, predictions, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_tp = 2.0 + 5.0
weighted_t = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_t
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, recall.eval())
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]])
recall, update_op = metrics.recall(labels, predictions, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_tp = 3.0 + 1.0
weighted_t = (2.0 + 3.0) + (4.0 + 1.0)
expected_precision = weighted_tp / weighted_t
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, recall.eval())
def testAllIncorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(1 - np_inputs)
recall, update_op = metrics.recall(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, recall.eval())
def testZeroTruePositivesAndFalseNegativesGivesZeroRecall(self):
predictions = array_ops.zeros((1, 4))
labels = array_ops.zeros((1, 4))
recall, update_op = metrics.recall(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, recall.eval())
class AUCTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.auc(predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)))
_assert_local_variables(self,
('auc/true_positives:0', 'auc/false_negatives:0',
'auc/false_positives:0', 'auc/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.auc(predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.auc(predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
auc, update_op = metrics.auc(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_auc = auc.eval()
for _ in range(10):
self.assertAlmostEqual(initial_auc, auc.eval(), 5)
def testAllCorrect(self):
self.allCorrectAsExpected('ROC')
def allCorrectAsExpected(self, curve):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
auc, update_op = metrics.auc(labels, predictions, curve=curve)
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, auc.eval())
def testSomeCorrect_multipleLabelDtypes(self):
with self.test_session() as sess:
for label_dtype in (
dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = math_ops.cast(
constant_op.constant([0, 1, 1, 0], shape=(1, 4)), dtype=label_dtype)
auc, update_op = metrics.auc(labels, predictions)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op))
self.assertAlmostEqual(0.5, auc.eval())
def testWeighted1d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
weights = constant_op.constant([2], shape=(1, 1))
auc, update_op = metrics.auc(labels, predictions, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op), 5)
self.assertAlmostEqual(0.5, auc.eval(), 5)
def testWeighted2d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
weights = constant_op.constant([1, 2, 3, 4], shape=(1, 4))
auc, update_op = metrics.auc(labels, predictions, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.7, sess.run(update_op), 5)
self.assertAlmostEqual(0.7, auc.eval(), 5)
def testAUCPRSpecialCase(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 1], shape=(1, 4))
auc, update_op = metrics.auc(labels, predictions, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.79166, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.79166, auc.eval(), delta=1e-3)
def testAnotherAUCPRSpecialCase(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8, 0.1, 0.135, 0.81],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 0, 1, 0, 1], shape=(1, 7))
auc, update_op = metrics.auc(labels, predictions, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.610317, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.610317, auc.eval(), delta=1e-3)
def testThirdAUCPRSpecialCase(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.0, 0.1, 0.2, 0.33, 0.3, 0.4, 0.5],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 0, 0, 1, 1, 1], shape=(1, 7))
auc, update_op = metrics.auc(labels, predictions, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.90277, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.90277, auc.eval(), delta=1e-3)
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
auc, update_op = metrics.auc(labels, predictions)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0, sess.run(update_op))
self.assertAlmostEqual(0, auc.eval())
def testZeroTruePositivesAndFalseNegativesGivesOneAUC(self):
with self.test_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
auc, update_op = metrics.auc(labels, predictions)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 6)
self.assertAlmostEqual(1, auc.eval(), 6)
def testRecallOneAndPrecisionOneGivesOnePRAUC(self):
with self.test_session() as sess:
predictions = array_ops.ones([4], dtype=dtypes_lib.float32)
labels = array_ops.ones([4])
auc, update_op = metrics.auc(labels, predictions, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 6)
self.assertAlmostEqual(1, auc.eval(), 6)
def np_auc(self, predictions, labels, weights):
"""Computes the AUC explicitly using Numpy.
Args:
predictions: an ndarray with shape [N].
labels: an ndarray with shape [N].
weights: an ndarray with shape [N].
Returns:
the area under the ROC curve.
"""
if weights is None:
weights = np.ones(np.size(predictions))
is_positive = labels > 0
num_positives = np.sum(weights[is_positive])
num_negatives = np.sum(weights[~is_positive])
# Sort descending:
inds = np.argsort(-predictions)
sorted_labels = labels[inds]
sorted_weights = weights[inds]
is_positive = sorted_labels > 0
tp = np.cumsum(sorted_weights * is_positive) / num_positives
return np.sum((sorted_weights * tp)[~is_positive]) / num_negatives
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=num_samples)
noise = np.random.normal(0.0, scale=0.2, size=num_samples)
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
def _enqueue_as_batches(x, enqueue_ops):
x_batches = x.astype(np.float32).reshape((num_batches, batch_size))
x_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(num_batches):
enqueue_ops[i].append(x_queue.enqueue(x_batches[i, :]))
return x_queue.dequeue()
for weights in (None, np.ones(num_samples), np.random.exponential(
scale=1.0, size=num_samples)):
expected_auc = self.np_auc(predictions, labels, weights)
with self.test_session() as sess:
enqueue_ops = [[] for i in range(num_batches)]
tf_predictions = _enqueue_as_batches(predictions, enqueue_ops)
tf_labels = _enqueue_as_batches(labels, enqueue_ops)
tf_weights = (_enqueue_as_batches(weights, enqueue_ops) if
weights is not None else None)
for i in range(num_batches):
sess.run(enqueue_ops[i])
auc, update_op = metrics.auc(tf_labels,
tf_predictions,
curve='ROC',
num_thresholds=500,
weights=tf_weights)
sess.run(variables.local_variables_initializer())
for i in range(num_batches):
sess.run(update_op)
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_auc, auc.eval(), 2)
class SpecificityAtSensitivityTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7)
_assert_local_variables(self,
('specificity_at_sensitivity/true_positives:0',
'specificity_at_sensitivity/false_negatives:0',
'specificity_at_sensitivity/false_positives:0',
'specificity_at_sensitivity/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=1)
specificity, update_op = metrics.specificity_at_sensitivity(
labels, predictions, sensitivity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_specificity = specificity.eval()
for _ in range(10):
self.assertAlmostEqual(initial_specificity, specificity.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
specificity, update_op = metrics.specificity_at_sensitivity(
labels, predictions, sensitivity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, specificity.eval())
def testSomeCorrectHighSensitivity(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.45, 0.5, 0.8, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.specificity_at_sensitivity(
labels, predictions, sensitivity=0.8)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1.0, sess.run(update_op))
self.assertAlmostEqual(1.0, specificity.eval())
def testSomeCorrectLowSensitivity(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.specificity_at_sensitivity(
labels, predictions, sensitivity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted1d_multipleLabelDtypes(self):
for label_dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [3]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = math_ops.cast(labels_values, dtype=label_dtype)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.specificity_at_sensitivity(
labels, predictions, weights=weights, sensitivity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted2d(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.specificity_at_sensitivity(
labels, predictions, weights=weights, sensitivity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(8.0 / 15.0, sess.run(update_op))
self.assertAlmostEqual(8.0 / 15.0, specificity.eval())
class SensitivityAtSpecificityTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7)
_assert_local_variables(self,
('sensitivity_at_specificity/true_positives:0',
'sensitivity_at_specificity/false_negatives:0',
'sensitivity_at_specificity/false_positives:0',
'sensitivity_at_specificity/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=1)
sensitivity, update_op = metrics.sensitivity_at_specificity(
labels, predictions, specificity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_sensitivity = sensitivity.eval()
for _ in range(10):
self.assertAlmostEqual(initial_sensitivity, sensitivity.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
specificity, update_op = metrics.sensitivity_at_specificity(
labels, predictions, specificity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, specificity.eval())
def testSomeCorrectHighSpecificity(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.45, 0.5, 0.8, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.sensitivity_at_specificity(
labels, predictions, specificity=0.8)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.8, sess.run(update_op))
self.assertAlmostEqual(0.8, specificity.eval())
def testSomeCorrectLowSpecificity(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.sensitivity_at_specificity(
labels, predictions, specificity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted_multipleLabelDtypes(self):
for label_dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions_values = [
0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = math_ops.cast(labels_values, dtype=label_dtype)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.sensitivity_at_specificity(
labels, predictions, weights=weights, specificity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.675, sess.run(update_op))
self.assertAlmostEqual(0.675, specificity.eval())
# TODO(nsilberman): Break this up into two sets of tests.
class PrecisionRecallThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0])
_assert_local_variables(self, (
'precision_at_thresholds/true_positives:0',
'precision_at_thresholds/false_positives:0',))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
prec, _ = metrics.precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
rec, _ = metrics.recall_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [prec, rec])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, precision_op = metrics.precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
_, recall_op = metrics.recall_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
self.assertListEqual(
ops.get_collection(my_collection_name), [precision_op, recall_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
thresholds = [0, 0.5, 1.0]
prec, prec_op = metrics.precision_at_thresholds(labels, predictions,
thresholds)
rec, rec_op = metrics.recall_at_thresholds(labels, predictions, thresholds)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates, then verify idempotency.
sess.run([prec_op, rec_op])
initial_prec = prec.eval()
initial_rec = rec.eval()
for _ in range(10):
sess.run([prec_op, rec_op])
self.assertAllClose(initial_prec, prec.eval())
self.assertAllClose(initial_rec, rec.eval())
# TODO(nsilberman): fix tests (passing but incorrect).
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
thresholds = [0.5]
prec, prec_op = metrics.precision_at_thresholds(labels, predictions,
thresholds)
rec, rec_op = metrics.recall_at_thresholds(labels, predictions,
thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertEqual(1, prec.eval())
self.assertEqual(1, rec.eval())
def testSomeCorrect_multipleLabelDtypes(self):
with self.test_session() as sess:
for label_dtype in (
dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = math_ops.cast(
constant_op.constant([0, 1, 1, 0], shape=(1, 4)), dtype=label_dtype)
thresholds = [0.5]
prec, prec_op = metrics.precision_at_thresholds(labels, predictions,
thresholds)
rec, rec_op = metrics.recall_at_thresholds(labels, predictions,
thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0.5, prec.eval())
self.assertAlmostEqual(0.5, rec.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
thresholds = [0.5]
prec, prec_op = metrics.precision_at_thresholds(labels, predictions,
thresholds)
rec, rec_op = metrics.recall_at_thresholds(labels, predictions,
thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0, prec.eval())
self.assertAlmostEqual(0, rec.eval())
def testWeights1d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0], [1]], shape=(2, 1), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
prec, prec_op = metrics.precision_at_thresholds(
labels, predictions, thresholds, weights=weights)
rec, rec_op = metrics.recall_at_thresholds(
labels, predictions, thresholds, weights=weights)
[prec_low, prec_high] = array_ops.split(
value=prec, num_or_size_splits=2, axis=0)
prec_low = array_ops.reshape(prec_low, shape=())
prec_high = array_ops.reshape(prec_high, shape=())
[rec_low, rec_high] = array_ops.split(
value=rec, num_or_size_splits=2, axis=0)
rec_low = array_ops.reshape(rec_low, shape=())
rec_high = array_ops.reshape(rec_high, shape=())
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(1.0, prec_low.eval(), places=5)
self.assertAlmostEqual(0.0, prec_high.eval(), places=5)
self.assertAlmostEqual(1.0, rec_low.eval(), places=5)
self.assertAlmostEqual(0.0, rec_high.eval(), places=5)
def testWeights2d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
prec, prec_op = metrics.precision_at_thresholds(
labels, predictions, thresholds, weights=weights)
rec, rec_op = metrics.recall_at_thresholds(
labels, predictions, thresholds, weights=weights)
[prec_low, prec_high] = array_ops.split(
value=prec, num_or_size_splits=2, axis=0)
prec_low = array_ops.reshape(prec_low, shape=())
prec_high = array_ops.reshape(prec_high, shape=())
[rec_low, rec_high] = array_ops.split(
value=rec, num_or_size_splits=2, axis=0)
rec_low = array_ops.reshape(rec_low, shape=())
rec_high = array_ops.reshape(rec_high, shape=())
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(1.0, prec_low.eval(), places=5)
self.assertAlmostEqual(0.0, prec_high.eval(), places=5)
self.assertAlmostEqual(1.0, rec_low.eval(), places=5)
self.assertAlmostEqual(0.0, rec_high.eval(), places=5)
def testExtremeThresholds(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 1], shape=(1, 4))
thresholds = [-1.0, 2.0] # lower/higher than any values
prec, prec_op = metrics.precision_at_thresholds(labels, predictions,
thresholds)
rec, rec_op = metrics.recall_at_thresholds(labels, predictions,
thresholds)
[prec_low, prec_high] = array_ops.split(
value=prec, num_or_size_splits=2, axis=0)
[rec_low, rec_high] = array_ops.split(
value=rec, num_or_size_splits=2, axis=0)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0.75, prec_low.eval())
self.assertAlmostEqual(0.0, prec_high.eval())
self.assertAlmostEqual(1.0, rec_low.eval())
self.assertAlmostEqual(0.0, rec_high.eval())
def testZeroLabelsPredictions(self):
with self.test_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
thresholds = [0.5]
prec, prec_op = metrics.precision_at_thresholds(labels, predictions,
thresholds)
rec, rec_op = metrics.recall_at_thresholds(labels, predictions,
thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0, prec.eval(), 6)
self.assertAlmostEqual(0, rec.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=(num_samples, 1))
noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
thresholds = [0.3]
tp = 0
fp = 0
fn = 0
tn = 0
for i in range(num_samples):
if predictions[i] > thresholds[0]:
if labels[i] == 1:
tp += 1
else:
fp += 1
else:
if labels[i] == 1:
fn += 1
else:
tn += 1
epsilon = 1e-7
expected_prec = tp / (epsilon + tp + fp)
expected_rec = tp / (epsilon + tp + fn)
labels = labels.astype(np.float32)
predictions = predictions.astype(np.float32)
with self.test_session() as sess:
# Reshape the data so its easy to queue up:
predictions_batches = predictions.reshape((batch_size, num_batches))
labels_batches = labels.reshape((batch_size, num_batches))
# Enqueue the data:
predictions_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
labels_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(int(num_batches)):
tf_prediction = constant_op.constant(predictions_batches[:, i])
tf_label = constant_op.constant(labels_batches[:, i])
sess.run([
predictions_queue.enqueue(tf_prediction),
labels_queue.enqueue(tf_label)
])
tf_predictions = predictions_queue.dequeue()
tf_labels = labels_queue.dequeue()
prec, prec_op = metrics.precision_at_thresholds(tf_labels, tf_predictions,
thresholds)
rec, rec_op = metrics.recall_at_thresholds(tf_labels, tf_predictions,
thresholds)
sess.run(variables.local_variables_initializer())
for _ in range(int(num_samples / batch_size)):
sess.run([prec_op, rec_op])
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_prec, prec.eval(), 2)
self.assertAlmostEqual(expected_rec, rec.eval(), 2)
def _test_sparse_precision_at_k(predictions,
labels,
k,
expected,
class_id=None,
weights=None,
test_case=None):
with ops.Graph().as_default() as g, test_case.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.sparse_precision_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=labels,
k=k,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
test_case.assertRaises(errors_impl.OpError, metric.eval)
test_case.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(test_case, update.eval())
_assert_nan(test_case, metric.eval())
else:
test_case.assertEqual(expected, update.eval())
test_case.assertEqual(expected, metric.eval())
def _test_sparse_average_precision_at_k(predictions,
labels,
k,
expected,
weights=None,
test_case=None):
with ops.Graph().as_default() as g, test_case.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
predictions = constant_op.constant(predictions, dtypes_lib.float32)
metric, update = metrics.sparse_average_precision_at_k(
labels, predictions, k, weights=weights)
# Fails without initialized vars.
test_case.assertRaises(errors_impl.OpError, metric.eval)
test_case.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(test_case, update.eval())
_assert_nan(test_case, metric.eval())
else:
test_case.assertAlmostEqual(expected, update.eval())
test_case.assertAlmostEqual(expected, metric.eval())
class SingleLabelSparsePrecisionTest(test.TestCase):
def setUp(self):
self._predictions = ((0.1, 0.3, 0.2, 0.4), (0.1, 0.2, 0.3, 0.4))
indicator_labels = ((0, 0, 0, 1), (0, 0, 1, 0))
class_labels = (3, 2)
# Sparse vs dense, and 1d vs 2d labels should all be handled the same.
self._labels = (
_binary_2d_label_to_1d_sparse_value(indicator_labels),
_binary_2d_label_to_2d_sparse_value(indicator_labels), np.array(
class_labels, dtype=np.int64), np.array(
[[class_id] for class_id in class_labels], dtype=np.int64))
self._test_sparse_precision_at_k = functools.partial(
_test_sparse_precision_at_k, test_case=self)
self._test_sparse_average_precision_at_k = functools.partial(
_test_sparse_average_precision_at_k, test_case=self)
def test_at_k1_nan(self):
for labels in self._labels:
# Classes 0,1,2 have 0 predictions, classes -1 and 4 are out of range.
for class_id in (-1, 0, 1, 2, 4):
self._test_sparse_precision_at_k(
self._predictions, labels, k=1, expected=NAN, class_id=class_id)
def test_at_k1(self):
for labels in self._labels:
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_sparse_precision_at_k(
self._predictions, labels, k=1, expected=1.0 / 2, class_id=3)
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_sparse_precision_at_k(
self._predictions, labels, k=1, expected=1.0 / 2)
class MultiLabelSparsePrecisionTest(test.TestCase):
def setUp(self):
self._test_sparse_precision_at_k = functools.partial(
_test_sparse_precision_at_k, test_case=self)
self._test_sparse_average_precision_at_k = functools.partial(
_test_sparse_average_precision_at_k, test_case=self)
def test_average_precision(self):
# Example 1.
# Matches example here:
# fastml.com/what-you-wanted-to-know-about-mean-average-precision
labels_ex1 = (0, 1, 2, 3, 4)
labels = np.array([labels_ex1], dtype=np.int64)
predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)
predictions = (predictions_ex1,)
precision_ex1 = (0.0 / 1, 1.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex1 = (0.0 / 1, precision_ex1[1] / 2, precision_ex1[1] / 3,
(precision_ex1[1] + precision_ex1[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex1[i])
self._test_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex1[i])
# Example 2.
labels_ex2 = (0, 2, 4, 5, 6)
labels = np.array([labels_ex2], dtype=np.int64)
predictions_ex2 = (0.3, 0.5, 0.0, 0.4, 0.0, 0.1, 0.2)
predictions = (predictions_ex2,)
precision_ex2 = (0.0 / 1, 0.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex2 = (0.0 / 1, 0.0 / 2, precision_ex2[2] / 3,
(precision_ex2[2] + precision_ex2[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex2[i])
self._test_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex2[i])
# Both examples, we expect both precision and average precision to be the
# average of the 2 examples.
labels = np.array([labels_ex1, labels_ex2], dtype=np.int64)
predictions = (predictions_ex1, predictions_ex2)
streaming_precision = [(ex1 + ex2) / 2
for ex1, ex2 in zip(precision_ex1, precision_ex2)]
streaming_average_precision = [
(ex1 + ex2) / 2
for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)
]
for i in xrange(4):
k = i + 1
self._test_sparse_precision_at_k(
predictions, labels, k, expected=streaming_precision[i])
self._test_sparse_average_precision_at_k(
predictions, labels, k, expected=streaming_average_precision[i])
# Weighted examples, we expect streaming average precision to be the
# weighted average of the 2 examples.
weights = (0.3, 0.6)
streaming_average_precision = [
(weights[0] * ex1 + weights[1] * ex2) / (weights[0] + weights[1])
for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)
]
for i in xrange(4):
k = i + 1
self._test_sparse_average_precision_at_k(
predictions,
labels,
k,
expected=streaming_average_precision[i],
weights=weights)
def test_average_precision_some_labels_out_of_range(self):
"""Tests that labels outside the [0, n_classes) range are ignored."""
labels_ex1 = (-1, 0, 1, 2, 3, 4, 7)
labels = np.array([labels_ex1], dtype=np.int64)
predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)
predictions = (predictions_ex1,)
precision_ex1 = (0.0 / 1, 1.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex1 = (0.0 / 1, precision_ex1[1] / 2, precision_ex1[1] / 3,
(precision_ex1[1] + precision_ex1[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex1[i])
self._test_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex1[i])
def test_three_labels_at_k5_no_predictions(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
sparse_labels = _binary_2d_label_to_2d_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.
for class_id in (-1, 1, 3, 8, 10):
self._test_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
def test_three_labels_at_k5_no_labels(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
sparse_labels = _binary_2d_label_to_2d_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,4,6,9: 0 labels, >=1 prediction.
for class_id in (0, 4, 6, 9):
self._test_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
def test_three_labels_at_k5(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
sparse_labels = _binary_2d_label_to_2d_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 2 labels, 2 correct predictions.
self._test_sparse_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, 1 correct prediction.
self._test_sparse_precision_at_k(
predictions, labels, k=5, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, 1 incorrect prediction.
self._test_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=7)
# All classes: 10 predictions, 3 correct.
self._test_sparse_precision_at_k(
predictions, labels, k=5, expected=3.0 / 10)
def test_three_labels_at_k5_some_out_of_range(self):
"""Tests that labels outside the [0, n_classes) range are ignored."""
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
sp_labels = sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2],
[1, 3]],
# values -1 and 10 are outside the [0, n_classes) range and are ignored.
values=np.array([2, 7, -1, 8, 1, 2, 5, 10], np.int64),
dense_shape=[2, 4])
# Class 2: 2 labels, 2 correct predictions.
self._test_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, 1 correct prediction.
self._test_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, 1 incorrect prediction.
self._test_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=0.0 / 1, class_id=7)
# All classes: 10 predictions, 3 correct.
self._test_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=3.0 / 10)
def test_3d_nan(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.
for class_id in (-1, 1, 3, 8, 10):
self._test_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
def test_3d_no_labels(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Classes 0,4,6,9: 0 labels, >=1 prediction.
for class_id in (0, 4, 6, 9):
self._test_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
def test_3d(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 4 predictions, all correct.
self._test_sparse_precision_at_k(
predictions, labels, k=5, expected=4.0 / 4, class_id=2)
# Class 5: 2 predictions, both correct.
self._test_sparse_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=5)
# Class 7: 2 predictions, 1 correct.
self._test_sparse_precision_at_k(
predictions, labels, k=5, expected=1.0 / 2, class_id=7)
# All classes: 20 predictions, 7 correct.
self._test_sparse_precision_at_k(
predictions, labels, k=5, expected=7.0 / 20)
def test_3d_ignore_some(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 2 predictions, both correct.
self._test_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
# Class 2: 2 predictions, both correct.
self._test_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
# Class 7: 1 incorrect prediction.
self._test_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
# Class 7: 1 correct prediction.
self._test_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
# Class 7: no predictions.
self._test_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=7,
weights=[[1, 0], [0, 1]])
# Class 7: 2 predictions, 1 correct.
self._test_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=1.0 / 2.0,
class_id=7,
weights=[[0, 1], [1, 0]])
def _test_recall_at_k(predictions,
labels,
k,
expected,
class_id=None,
weights=None,
test_case=None):
with ops.Graph().as_default() as g, test_case.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.recall_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=labels,
k=k,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
test_case.assertRaises(errors_impl.OpError, metric.eval)
test_case.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(test_case, update.eval())
_assert_nan(test_case, metric.eval())
else:
test_case.assertEqual(expected, update.eval())
test_case.assertEqual(expected, metric.eval())
class SingleLabelRecallAtKTest(test.TestCase):
def setUp(self):
self._predictions = ((0.1, 0.3, 0.2, 0.4), (0.1, 0.2, 0.3, 0.4))
indicator_labels = ((0, 0, 0, 1), (0, 0, 1, 0))
class_labels = (3, 2)
# Sparse vs dense, and 1d vs 2d labels should all be handled the same.
self._labels = (
_binary_2d_label_to_1d_sparse_value(indicator_labels),
_binary_2d_label_to_2d_sparse_value(indicator_labels), np.array(
class_labels, dtype=np.int64), np.array(
[[class_id] for class_id in class_labels], dtype=np.int64))
self._test_recall_at_k = functools.partial(
_test_recall_at_k, test_case=self)
def test_at_k1_nan(self):
# Classes 0,1 have 0 labels, 0 predictions, classes -1 and 4 are out of
# range.
for labels in self._labels:
for class_id in (-1, 0, 1, 4):
self._test_recall_at_k(
self._predictions, labels, k=1, expected=NAN, class_id=class_id)
def test_at_k1_no_predictions(self):
for labels in self._labels:
# Class 2: 0 predictions.
self._test_recall_at_k(
self._predictions, labels, k=1, expected=0.0, class_id=2)
def test_one_label_at_k1(self):
for labels in self._labels:
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_recall_at_k(
self._predictions, labels, k=1, expected=1.0 / 1, class_id=3)
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_recall_at_k(self._predictions, labels, k=1, expected=1.0 / 2)
def test_one_label_at_k1_weighted(self):
predictions = self._predictions
for labels in self._labels:
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_recall_at_k(
predictions, labels, k=1, expected=NAN, class_id=3, weights=(0.0,))
self._test_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0,))
self._test_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(2.0,))
self._test_recall_at_k(
predictions,
labels,
k=1,
expected=NAN,
class_id=3,
weights=(0.0, 0.0))
self._test_recall_at_k(
predictions,
labels,
k=1,
expected=NAN,
class_id=3,
weights=(0.0, 1.0))
self._test_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 0.0))
self._test_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 1.0))
self._test_recall_at_k(
predictions,
labels,
k=1,
expected=2.0 / 2,
class_id=3,
weights=(2.0, 3.0))
self._test_recall_at_k(
predictions,
labels,
k=1,
expected=3.0 / 3,
class_id=3,
weights=(3.0, 2.0))
self._test_recall_at_k(
predictions,
labels,
k=1,
expected=0.3 / 0.3,
class_id=3,
weights=(0.3, 0.6))
self._test_recall_at_k(
predictions,
labels,
k=1,
expected=0.6 / 0.6,
class_id=3,
weights=(0.6, 0.3))
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_recall_at_k(
predictions, labels, k=1, expected=NAN, weights=(0.0,))
self._test_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(1.0,))
self._test_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(2.0,))
self._test_recall_at_k(
predictions, labels, k=1, expected=1.0 / 1, weights=(1.0, 0.0))
self._test_recall_at_k(
predictions, labels, k=1, expected=0.0 / 1, weights=(0.0, 1.0))
self._test_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(1.0, 1.0))
self._test_recall_at_k(
predictions, labels, k=1, expected=2.0 / 5, weights=(2.0, 3.0))
self._test_recall_at_k(
predictions, labels, k=1, expected=3.0 / 5, weights=(3.0, 2.0))
self._test_recall_at_k(
predictions, labels, k=1, expected=0.3 / 0.9, weights=(0.3, 0.6))
self._test_recall_at_k(
predictions, labels, k=1, expected=0.6 / 0.9, weights=(0.6, 0.3))
class MultiLabel2dRecallAtKTest(test.TestCase):
def setUp(self):
self._predictions = ((0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9),
(0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6))
indicator_labels = ((0, 0, 1, 0, 0, 0, 0, 1, 1, 0),
(0, 1, 1, 0, 0, 1, 0, 0, 0, 0))
class_labels = ((2, 7, 8), (1, 2, 5))
# Sparse vs dense labels should be handled the same.
self._labels = (_binary_2d_label_to_2d_sparse_value(indicator_labels),
np.array(
class_labels, dtype=np.int64))
self._test_recall_at_k = functools.partial(
_test_recall_at_k, test_case=self)
def test_at_k5_nan(self):
for labels in self._labels:
# Classes 0,3,4,6,9 have 0 labels, class 10 is out of range.
for class_id in (0, 3, 4, 6, 9, 10):
self._test_recall_at_k(
self._predictions, labels, k=5, expected=NAN, class_id=class_id)
def test_at_k5_no_predictions(self):
for labels in self._labels:
# Class 8: 1 label, no predictions.
self._test_recall_at_k(
self._predictions, labels, k=5, expected=0.0 / 1, class_id=8)
def test_at_k5(self):
for labels in self._labels:
# Class 2: 2 labels, both correct.
self._test_recall_at_k(
self._predictions, labels, k=5, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, incorrect.
self._test_recall_at_k(
self._predictions, labels, k=5, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, incorrect.
self._test_recall_at_k(
self._predictions, labels, k=5, expected=0.0 / 1, class_id=7)
# All classes: 6 labels, 3 correct.
self._test_recall_at_k(self._predictions, labels, k=5, expected=3.0 / 6)
def test_at_k5_some_out_of_range(self):
"""Tests that labels outside the [0, n_classes) count in denominator."""
labels = sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2],
[1, 3]],
# values -1 and 10 are outside the [0, n_classes) range.
values=np.array([2, 7, -1, 8, 1, 2, 5, 10], np.int64),
dense_shape=[2, 4])
# Class 2: 2 labels, both correct.
self._test_recall_at_k(
self._predictions, labels, k=5, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, incorrect.
self._test_recall_at_k(
self._predictions, labels, k=5, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, incorrect.
self._test_recall_at_k(
self._predictions, labels, k=5, expected=0.0 / 1, class_id=7)
# All classes: 8 labels, 3 correct.
self._test_recall_at_k(self._predictions, labels, k=5, expected=3.0 / 8)
class MultiLabel3dRecallAtKTest(test.TestCase):
def setUp(self):
self._predictions = (((0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9),
(0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6)),
((0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6),
(0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9)))
# Note: We don't test dense labels here, since examples have different
# numbers of labels.
self._labels = _binary_3d_label_to_sparse_value(((
(0, 0, 1, 0, 0, 0, 0, 1, 1, 0), (0, 1, 1, 0, 0, 1, 0, 0, 0, 0)), (
(0, 1, 1, 0, 0, 1, 0, 1, 0, 0), (0, 0, 1, 0, 0, 0, 0, 0, 1, 0))))
self._test_recall_at_k = functools.partial(
_test_recall_at_k, test_case=self)
def test_3d_nan(self):
# Classes 0,3,4,6,9 have 0 labels, class 10 is out of range.
for class_id in (0, 3, 4, 6, 9, 10):
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=NAN, class_id=class_id)
def test_3d_no_predictions(self):
# Classes 1,8 have 0 predictions, >=1 label.
for class_id in (1, 8):
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=0.0, class_id=class_id)
def test_3d(self):
# Class 2: 4 labels, all correct.
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=4.0 / 4, class_id=2)
# Class 5: 2 labels, both correct.
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=2.0 / 2, class_id=5)
# Class 7: 2 labels, 1 incorrect.
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=1.0 / 2, class_id=7)
# All classes: 12 labels, 7 correct.
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=7.0 / 12)
def test_3d_ignore_all(self):
for class_id in xrange(10):
self._test_recall_at_k(
self._predictions,
self._labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_recall_at_k(
self._predictions,
self._labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=NAN, weights=[[0], [0]])
self._test_recall_at_k(
self._predictions,
self._labels,
k=5,
expected=NAN,
weights=[[0, 0], [0, 0]])
def test_3d_ignore_some(self):
# Class 2: 2 labels, both correct.
self._test_recall_at_k(
self._predictions,
self._labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
# Class 2: 2 labels, both correct.
self._test_recall_at_k(
self._predictions,
self._labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
# Class 7: 1 label, correct.
self._test_recall_at_k(
self._predictions,
self._labels,
k=5,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
# Class 7: 1 label, incorrect.
self._test_recall_at_k(
self._predictions,
self._labels,
k=5,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
# Class 7: 2 labels, 1 correct.
self._test_recall_at_k(
self._predictions,
self._labels,
k=5,
expected=1.0 / 2.0,
class_id=7,
weights=[[1, 0], [1, 0]])
# Class 7: No labels.
self._test_recall_at_k(
self._predictions,
self._labels,
k=5,
expected=NAN,
class_id=7,
weights=[[0, 1], [0, 1]])
class MeanAbsoluteErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.mean_absolute_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_local_variables(self, ('mean_absolute_error/count:0',
'mean_absolute_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.mean_absolute_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean_absolute_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.mean_absolute_error(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateWithErrorAndWeights(self):
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
error, update_op = metrics.mean_absolute_error(labels, predictions, weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(3, sess.run(update_op))
self.assertEqual(3, error.eval())
class MeanRelativeErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)))
_assert_local_variables(self, ('mean_relative_error/count:0',
'mean_relative_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
normalizer = random_ops.random_normal((10, 3), seed=3)
error, update_op = metrics.mean_relative_error(labels, predictions,
normalizer)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateNormalizedByLabels(self):
np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32)
np_labels = np.asarray([1, 3, 2, 3], dtype=np.float32)
expected_error = np.mean(
np.divide(np.absolute(np_predictions - np_labels), np_labels))
predictions = constant_op.constant(
np_predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(np_labels, shape=(1, 4))
error, update_op = metrics.mean_relative_error(
labels, predictions, normalizer=labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(expected_error, sess.run(update_op))
self.assertEqual(expected_error, error.eval())
def testSingleUpdateNormalizedByZeros(self):
np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32)
predictions = constant_op.constant(
np_predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
error, update_op = metrics.mean_relative_error(
labels, predictions, normalizer=array_ops.zeros_like(labels))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0.0, sess.run(update_op))
self.assertEqual(0.0, error.eval())
class MeanSquaredErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.mean_squared_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_local_variables(self, ('mean_squared_error/count:0',
'mean_squared_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.mean_squared_error(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
predictions = array_ops.zeros((1, 3), dtype=dtypes_lib.float32)
labels = array_ops.zeros((1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.mean_squared_error(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithError(self):
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.mean_squared_error(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(6, sess.run(update_op))
self.assertEqual(6, error.eval())
def testSingleUpdateWithErrorAndWeights(self):
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
error, update_op = metrics.mean_squared_error(labels, predictions, weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(13, sess.run(update_op))
self.assertEqual(13, error.eval())
def testMultipleBatchesOfSizeOne(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue, [10, 8, 6])
_enqueue_vector(sess, preds_queue, [-4, 3, -1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue, [1, 3, 2])
_enqueue_vector(sess, labels_queue, [2, 4, 6])
labels = labels_queue.dequeue()
error, update_op = metrics.mean_squared_error(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(208.0 / 6, sess.run(update_op), 5)
self.assertAlmostEqual(208.0 / 6, error.eval(), 5)
def testMetricsComputedConcurrently(self):
with self.test_session() as sess:
# Create the queue that populates one set of predictions.
preds_queue0 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue0, [10, 8, 6])
_enqueue_vector(sess, preds_queue0, [-4, 3, -1])
predictions0 = preds_queue0.dequeue()
# Create the queue that populates one set of predictions.
preds_queue1 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue1, [0, 1, 1])
_enqueue_vector(sess, preds_queue1, [1, 1, 0])
predictions1 = preds_queue1.dequeue()
# Create the queue that populates one set of labels.
labels_queue0 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue0, [1, 3, 2])
_enqueue_vector(sess, labels_queue0, [2, 4, 6])
labels0 = labels_queue0.dequeue()
# Create the queue that populates another set of labels.
labels_queue1 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue1, [-5, -3, -1])
_enqueue_vector(sess, labels_queue1, [5, 4, 3])
labels1 = labels_queue1.dequeue()
mse0, update_op0 = metrics.mean_squared_error(
labels0, predictions0, name='msd0')
mse1, update_op1 = metrics.mean_squared_error(
labels1, predictions1, name='msd1')
sess.run(variables.local_variables_initializer())
sess.run([update_op0, update_op1])
sess.run([update_op0, update_op1])
mse0, mse1 = sess.run([mse0, mse1])
self.assertAlmostEqual(208.0 / 6, mse0, 5)
self.assertAlmostEqual(79.0 / 6, mse1, 5)
def testMultipleMetricsOnMultipleBatchesOfSizeOne(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue, [10, 8, 6])
_enqueue_vector(sess, preds_queue, [-4, 3, -1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue, [1, 3, 2])
_enqueue_vector(sess, labels_queue, [2, 4, 6])
labels = labels_queue.dequeue()
mae, ma_update_op = metrics.mean_absolute_error(labels, predictions)
mse, ms_update_op = metrics.mean_squared_error(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run([ma_update_op, ms_update_op])
sess.run([ma_update_op, ms_update_op])
self.assertAlmostEqual(32.0 / 6, mae.eval(), 5)
self.assertAlmostEqual(208.0 / 6, mse.eval(), 5)
class RootMeanSquaredErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.root_mean_squared_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_local_variables(self, ('root_mean_squared_error/count:0',
'root_mean_squared_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.root_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.root_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.root_mean_squared_error(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
with self.test_session() as sess:
predictions = constant_op.constant(
0.0, shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(0.0, shape=(1, 3), dtype=dtypes_lib.float32)
rmse, update_op = metrics.root_mean_squared_error(labels, predictions)
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, rmse.eval())
def testSingleUpdateWithError(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
rmse, update_op = metrics.root_mean_squared_error(labels, predictions)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(math.sqrt(6), update_op.eval(), 5)
self.assertAlmostEqual(math.sqrt(6), rmse.eval(), 5)
def testSingleUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
rmse, update_op = metrics.root_mean_squared_error(labels, predictions,
weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(math.sqrt(13), sess.run(update_op))
self.assertAlmostEqual(math.sqrt(13), rmse.eval(), 5)
def _reweight(predictions, labels, weights):
return (np.concatenate([[p] * int(w) for p, w in zip(predictions, weights)]),
np.concatenate([[l] * int(w) for l, w in zip(labels, weights)]))
class MeanCosineDistanceTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1)
_assert_local_variables(self, (
'mean_cosine_distance/count:0',
'mean_cosine_distance/total:0',))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.mean_cosine_distance(labels, predictions, dim=1)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_labels, shape=(1, 3, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(1, 3, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.mean_cosine_distance(labels, predictions, dim=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithError1(self):
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.mean_cosine_distance(labels, predictions, dim=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 5)
self.assertAlmostEqual(1, error.eval(), 5)
def testSingleUpdateWithError2(self):
np_predictions = np.matrix(
('0.819031913261206 0.567041924552012 0.087465312324590;'
'-0.665139432070255 -0.739487441769973 -0.103671883216994;'
'0.707106781186548 -0.707106781186548 0'))
np_labels = np.matrix(
('0.819031913261206 0.567041924552012 0.087465312324590;'
'0.665139432070255 0.739487441769973 0.103671883216994;'
'0.707106781186548 0.707106781186548 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.mean_cosine_distance(labels, predictions, dim=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1.0, sess.run(update_op), 5)
self.assertAlmostEqual(1.0, error.eval(), 5)
def testSingleUpdateWithErrorAndWeights1(self):
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[1, 0, 0], shape=(3, 1, 1), dtype=dtypes_lib.float32)
error, update_op = metrics.mean_cosine_distance(
labels, predictions, dim=2, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithErrorAndWeights2(self):
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[0, 1, 1], shape=(3, 1, 1), dtype=dtypes_lib.float32)
error, update_op = metrics.mean_cosine_distance(
labels, predictions, dim=2, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1.5, update_op.eval())
self.assertEqual(1.5, error.eval())
class PcntBelowThreshTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.percentage_below(values=array_ops.ones((10,)), threshold=2)
_assert_local_variables(self, (
'percentage_below_threshold/count:0',
'percentage_below_threshold/total:0',))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.percentage_below(
values=array_ops.ones((10,)),
threshold=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.percentage_below(
values=array_ops.ones((10,)),
threshold=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testOneUpdate(self):
with self.test_session() as sess:
values = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
pcnt0, update_op0 = metrics.percentage_below(values, 100, name='high')
pcnt1, update_op1 = metrics.percentage_below(values, 7, name='medium')
pcnt2, update_op2 = metrics.percentage_below(values, 1, name='low')
sess.run(variables.local_variables_initializer())
sess.run([update_op0, update_op1, update_op2])
pcnt0, pcnt1, pcnt2 = sess.run([pcnt0, pcnt1, pcnt2])
self.assertAlmostEqual(1.0, pcnt0, 5)
self.assertAlmostEqual(0.75, pcnt1, 5)
self.assertAlmostEqual(0.0, pcnt2, 5)
def testSomePresentOneUpdate(self):
with self.test_session() as sess:
values = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[1, 0, 0, 1], shape=(1, 4), dtype=dtypes_lib.float32)
pcnt0, update_op0 = metrics.percentage_below(
values, 100, weights=weights, name='high')
pcnt1, update_op1 = metrics.percentage_below(
values, 7, weights=weights, name='medium')
pcnt2, update_op2 = metrics.percentage_below(
values, 1, weights=weights, name='low')
sess.run(variables.local_variables_initializer())
self.assertListEqual([1.0, 0.5, 0.0],
sess.run([update_op0, update_op1, update_op2]))
pcnt0, pcnt1, pcnt2 = sess.run([pcnt0, pcnt1, pcnt2])
self.assertAlmostEqual(1.0, pcnt0, 5)
self.assertAlmostEqual(0.5, pcnt1, 5)
self.assertAlmostEqual(0.0, pcnt2, 5)
class MeanIOUTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2)
_assert_local_variables(self, ('mean_iou/total_confusion_matrix:0',))
def testMetricsCollections(self):
my_collection_name = '__metrics__'
mean_iou, _ = metrics.mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean_iou])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones([10, 3])
labels = array_ops.ones([10, 4])
with self.assertRaises(ValueError):
metrics.mean_iou(labels, predictions, num_classes=2)
def testLabelsAndWeightsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones([10])
labels = array_ops.ones([10])
weights = array_ops.zeros([9])
with self.assertRaises(ValueError):
metrics.mean_iou(labels, predictions, num_classes=2, weights=weights)
def testValueTensorIsIdempotent(self):
num_classes = 3
predictions = random_ops.random_uniform(
[10], maxval=num_classes, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
[10], maxval=num_classes, dtype=dtypes_lib.int64, seed=1)
mean_iou, update_op = metrics.mean_iou(
labels, predictions, num_classes=num_classes)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_mean_iou = mean_iou.eval()
for _ in range(10):
self.assertEqual(initial_mean_iou, mean_iou.eval())
def testMultipleUpdates(self):
num_classes = 3
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
miou, update_op = metrics.mean_iou(labels, predictions, num_classes)
sess.run(variables.local_variables_initializer())
for _ in range(5):
sess.run(update_op)
desired_output = np.mean([1.0 / 2.0, 1.0 / 4.0, 0.])
self.assertEqual(desired_output, miou.eval())
def testMultipleUpdatesWithWeights(self):
num_classes = 2
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.0])
weights = weights_queue.dequeue()
mean_iou, update_op = metrics.mean_iou(
labels, predictions, num_classes, weights=weights)
variables.local_variables_initializer().run()
for _ in range(6):
sess.run(update_op)
desired_output = np.mean([2.0 / 3.0, 1.0 / 2.0])
self.assertAlmostEqual(desired_output, mean_iou.eval())
def testMultipleUpdatesWithMissingClass(self):
# Test the case where there are no predicions and labels for
# one class, and thus there is one row and one column with
# zero entries in the confusion matrix.
num_classes = 3
with self.test_session() as sess:
# Create the queue that populates the predictions.
# There is no prediction for class 2.
preds_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
# There is label for class 2.
labels_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
miou, update_op = metrics.mean_iou(labels, predictions, num_classes)
sess.run(variables.local_variables_initializer())
for _ in range(5):
sess.run(update_op)
desired_output = np.mean([1.0 / 3.0, 2.0 / 4.0, 0.])
self.assertAlmostEqual(desired_output, miou.eval())
def testUpdateOpEvalIsAccumulatedConfusionMatrix(self):
predictions = array_ops.concat(
[
constant_op.constant(
0, shape=[5]), constant_op.constant(
1, shape=[5])
],
0)
labels = array_ops.concat(
[
constant_op.constant(
0, shape=[3]), constant_op.constant(
1, shape=[7])
],
0)
num_classes = 2
with self.test_session() as sess:
miou, update_op = metrics.mean_iou(labels, predictions, num_classes)
sess.run(variables.local_variables_initializer())
confusion_matrix = update_op.eval()
self.assertAllEqual([[3, 0], [2, 5]], confusion_matrix)
desired_miou = np.mean([3. / 5., 5. / 7.])
self.assertAlmostEqual(desired_miou, miou.eval())
def testAllCorrect(self):
predictions = array_ops.zeros([40])
labels = array_ops.zeros([40])
num_classes = 1
with self.test_session() as sess:
miou, update_op = metrics.mean_iou(labels, predictions, num_classes)
sess.run(variables.local_variables_initializer())
self.assertEqual(40, update_op.eval()[0])
self.assertEqual(1.0, miou.eval())
def testAllWrong(self):
predictions = array_ops.zeros([40])
labels = array_ops.ones([40])
num_classes = 2
with self.test_session() as sess:
miou, update_op = metrics.mean_iou(labels, predictions, num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[0, 0], [40, 0]], update_op.eval())
self.assertEqual(0., miou.eval())
def testResultsWithSomeMissing(self):
predictions = array_ops.concat(
[
constant_op.constant(
0, shape=[5]), constant_op.constant(
1, shape=[5])
],
0)
labels = array_ops.concat(
[
constant_op.constant(
0, shape=[3]), constant_op.constant(
1, shape=[7])
],
0)
num_classes = 2
weights = array_ops.concat(
[
constant_op.constant(
0, shape=[1]), constant_op.constant(
1, shape=[8]), constant_op.constant(
0, shape=[1])
],
0)
with self.test_session() as sess:
miou, update_op = metrics.mean_iou(
labels, predictions, num_classes, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[2, 0], [2, 4]], update_op.eval())
desired_miou = np.mean([2. / 4., 4. / 6.])
self.assertAlmostEqual(desired_miou, miou.eval())
class MeanPerClassAccuracyTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.mean_per_class_accuracy(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2)
_assert_local_variables(self, ('mean_accuracy/total_confusion_matrix:0',))
def testMetricsCollections(self):
my_collection_name = '__metrics__'
mean_accuracy, _ = metrics.mean_per_class_accuracy(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2,
metrics_collections=[my_collection_name])
self.assertListEqual(
ops.get_collection(my_collection_name), [mean_accuracy])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean_per_class_accuracy(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones([10, 3])
labels = array_ops.ones([10, 4])
with self.assertRaises(ValueError):
metrics.mean_per_class_accuracy(labels, predictions, num_classes=2)
def testLabelsAndWeightsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones([10])
labels = array_ops.ones([10])
weights = array_ops.zeros([9])
with self.assertRaises(ValueError):
metrics.mean_per_class_accuracy(
labels, predictions, num_classes=2, weights=weights)
def testValueTensorIsIdempotent(self):
num_classes = 3
predictions = random_ops.random_uniform(
[10], maxval=num_classes, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
[10], maxval=num_classes, dtype=dtypes_lib.int64, seed=1)
mean_accuracy, update_op = metrics.mean_per_class_accuracy(
labels, predictions, num_classes=num_classes)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_mean_accuracy = mean_accuracy.eval()
for _ in range(10):
self.assertEqual(initial_mean_accuracy, mean_accuracy.eval())
num_classes = 3
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
mean_accuracy, update_op = metrics.mean_per_class_accuracy(
labels, predictions, num_classes)
sess.run(variables.local_variables_initializer())
for _ in range(5):
sess.run(update_op)
desired_output = np.mean([1.0, 1.0 / 3.0, 0.0])
self.assertAlmostEqual(desired_output, mean_accuracy.eval())
def testMultipleUpdatesWithWeights(self):
num_classes = 2
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.0])
weights = weights_queue.dequeue()
mean_accuracy, update_op = metrics.mean_per_class_accuracy(
labels, predictions, num_classes, weights=weights)
variables.local_variables_initializer().run()
for _ in range(6):
sess.run(update_op)
desired_output = np.mean([2.0 / 2.0, 1.0 / 2.0])
self.assertAlmostEqual(desired_output, mean_accuracy.eval())
def testMultipleUpdatesWithMissingClass(self):
# Test the case where there are no predicions and labels for
# one class, and thus there is one row and one column with
# zero entries in the confusion matrix.
num_classes = 3
with self.test_session() as sess:
# Create the queue that populates the predictions.
# There is no prediction for class 2.
preds_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
# There is label for class 2.
labels_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
mean_accuracy, update_op = metrics.mean_per_class_accuracy(
labels, predictions, num_classes)
sess.run(variables.local_variables_initializer())
for _ in range(5):
sess.run(update_op)
desired_output = np.mean([1.0 / 2.0, 2.0 / 3.0, 0.])
self.assertAlmostEqual(desired_output, mean_accuracy.eval())
def testUpdateOpEvalIsAccumulatedConfusionMatrix(self):
predictions = array_ops.concat([
constant_op.constant(0, shape=[5]), constant_op.constant(1, shape=[5])
], 0)
labels = array_ops.concat([
constant_op.constant(0, shape=[3]), constant_op.constant(1, shape=[7])
], 0)
num_classes = 2
with self.test_session() as sess:
mean_accuracy, update_op = metrics.mean_per_class_accuracy(
labels, predictions, num_classes)
sess.run(variables.local_variables_initializer())
confusion_matrix = update_op.eval()
self.assertAllEqual([[3, 0], [2, 5]], confusion_matrix)
desired_mean_accuracy = np.mean([3. / 3., 5. / 7.])
self.assertAlmostEqual(desired_mean_accuracy, mean_accuracy.eval())
def testAllCorrect(self):
predictions = array_ops.zeros([40])
labels = array_ops.zeros([40])
num_classes = 1
with self.test_session() as sess:
mean_accuracy, update_op = metrics.mean_per_class_accuracy(
labels, predictions, num_classes)
sess.run(variables.local_variables_initializer())
self.assertEqual(40, update_op.eval()[0])
self.assertEqual(1.0, mean_accuracy.eval())
def testAllWrong(self):
predictions = array_ops.zeros([40])
labels = array_ops.ones([40])
num_classes = 2
with self.test_session() as sess:
mean_accuracy, update_op = metrics.mean_per_class_accuracy(
labels, predictions, num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[0, 0], [40, 0]], update_op.eval())
self.assertEqual(0., mean_accuracy.eval())
def testResultsWithSomeMissing(self):
predictions = array_ops.concat([
constant_op.constant(0, shape=[5]), constant_op.constant(1, shape=[5])
], 0)
labels = array_ops.concat([
constant_op.constant(0, shape=[3]), constant_op.constant(1, shape=[7])
], 0)
num_classes = 2
weights = array_ops.concat([
constant_op.constant(0, shape=[1]), constant_op.constant(1, shape=[8]),
constant_op.constant(0, shape=[1])
], 0)
with self.test_session() as sess:
mean_accuracy, update_op = metrics.mean_per_class_accuracy(
labels, predictions, num_classes, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[2, 0], [2, 4]], update_op.eval())
desired_mean_accuracy = np.mean([2. / 2., 4. / 6.])
self.assertAlmostEqual(desired_mean_accuracy, mean_accuracy.eval())
if __name__ == '__main__':
test.main()
| apache-2.0 | -6,854,333,944,101,162,000 | -2,732,596,660,512,762,000 | 37.251163 | 80 | 0.616917 | false |
jupierce/openshift-tools | ansible/roles/oso_zagg_deploy/filter_plugins/filters.py | 25 | 2619 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
'''
Custom filters for use in openshift_sso_app
'''
class FilterModule(object):
''' Custom ansible filters '''
@staticmethod
def get_running_pods(podlist_results, pod_names_to_match):
''' This is a filter to see which pods in a project are running
This filter takes the
Example:
given this:
podlist_results:
results:
- items:
- status:
phase: Running
metadata:
labels:
deploymentconfig: oso-memcached-sso1
- status:
phase: Terminated
metadata:
labels:
deploymentconfig: oso-memcached-sso2
- status:
phase: Running
metadata:
labels:
deploymentconfig: oso-saml-sso
- status:
phase: Running
metadata:
labels:
deploymentconfig: oso-saml-sso
Then
{{ podlist_results | get_running_pods(['oso-memcached-sso1', 'oso-memcached-sso2', ''oso-saml-sso]) }}
gives an output of ['oso-memcached-sso1', 'oso-saml-sso', 'oso-saml-sso']
'''
rval = []
if 'results' not in podlist_results:
return rval
if len(podlist_results['results']) == 0:
return rval
if 'items' not in podlist_results['results'][0]:
return rval
for pod in podlist_results['results'][0]['items']:
if 'status' not in pod:
continue
if 'phase' not in pod['status']:
continue
if pod['status']['phase'] != 'Running':
continue
if 'metadata' not in pod or 'labels' not in pod['metadata']:
continue
if 'deploymentconfig' not in pod['metadata']['labels']:
continue
if pod['metadata']['labels']['deploymentconfig'] in pod_names_to_match:
rval.append(pod['metadata']['labels']['deploymentconfig'])
return rval
def filters(self):
''' returns a mapping of filters to methods '''
return {
"get_running_pods": self.get_running_pods,
}
| apache-2.0 | -1,474,972,701,739,416,600 | 3,806,029,959,422,391,000 | 33.012987 | 116 | 0.457808 | false |
deepcell/xhtml2pdf | xhtml2pdf/document.py | 37 | 6381 | # -*- coding: utf-8 -*-
from xhtml2pdf.context import pisaContext
from xhtml2pdf.default import DEFAULT_CSS
from xhtml2pdf.parser import pisaParser
from reportlab.platypus.flowables import Spacer
from reportlab.platypus.frames import Frame
from xhtml2pdf.xhtml2pdf_reportlab import PmlBaseDoc, PmlPageTemplate
from xhtml2pdf.util import pisaTempFile, getBox, pyPdf
import cgi
import logging
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
log = logging.getLogger("xhtml2pdf")
def pisaErrorDocument(dest, c):
out = pisaTempFile(capacity=c.capacity)
out.write("<p style='background-color:red;'><strong>%d error(s) occured:</strong><p>" % c.err)
for mode, line, msg, _ in c.log:
if mode=="error":
out.write("<pre>%s in line %d: %s</pre>" % (mode, line, cgi.escape(msg)))
out.write("<p><strong>%d warning(s) occured:</strong><p>" % c.warn)
for mode, line, msg, _ in c.log:
if mode=="warning":
out.write("<p>%s in line %d: %s</p>" % (mode, line, cgi.escape(msg)))
return pisaDocument(out.getvalue(), dest, raise_exception=False)
def pisaStory(src, path=None, link_callback=None, debug=0, default_css=None,
xhtml=False, encoding=None, context=None, xml_output=None,
**kw):
# Prepare Context
if not context:
context = pisaContext(path, debug=debug)
context.pathCallback = link_callback
# Use a default set of CSS definitions to get an expected output
if default_css is None:
default_css = DEFAULT_CSS
# Parse and fill the story
pisaParser(src, context, default_css, xhtml, encoding, xml_output)
# Avoid empty documents
if not context.story:
context.story = [Spacer(1,1)]
if context.indexing_story:
context.story.append(context.indexing_story)
# Remove anchors if they do not exist (because of a bug in Reportlab)
for frag, anchor in context.anchorFrag:
if anchor not in context.anchorName:
frag.link = None
return context
def pisaDocument(src, dest=None, path=None, link_callback=None, debug=0,
default_css=None, xhtml=False, encoding=None, xml_output=None,
raise_exception=True, capacity=100*1024, **kw):
log.debug("pisaDocument options:\n src = %r\n dest = %r\n path = %r\n link_callback = %r\n xhtml = %r",
src,
dest,
path,
link_callback,
xhtml)
# Prepare simple context
context = pisaContext(path, debug=debug, capacity=capacity)
context.pathCallback = link_callback
# Build story
context = pisaStory(src, path, link_callback, debug, default_css, xhtml,
encoding, context=context, xml_output=xml_output)
# Buffer PDF into memory
out = pisaTempFile(capacity=context.capacity)
doc = PmlBaseDoc(
out,
pagesize=context.pageSize,
author=context.meta["author"].strip(),
subject=context.meta["subject"].strip(),
keywords=[x.strip() for x in
context.meta["keywords"].strip().split(",") if x],
title=context.meta["title"].strip(),
showBoundary=0,
allowSplitting=1)
# Prepare templates and their frames
if "body" in context.templateList:
body = context.templateList["body"]
del context.templateList["body"]
else:
x, y, w, h = getBox("1cm 1cm -1cm -1cm", context.pageSize)
body = PmlPageTemplate(
id="body",
frames=[
Frame(x, y, w, h,
id="body",
leftPadding=0,
rightPadding=0,
bottomPadding=0,
topPadding=0)],
pagesize = context.pageSize)
doc.addPageTemplates([body] + context.templateList.values())
# Use multibuild e.g. if a TOC has to be created
if context.multiBuild:
doc.multiBuild(context.story)
else:
doc.build(context.story)
# Add watermarks
if pyPdf:
for bgouter in context.pisaBackgroundList:
# If we have at least one background, then lets do it
if bgouter:
istream = out
output = pyPdf.PdfFileWriter()
input1 = pyPdf.PdfFileReader(istream)
ctr = 0
# TODO: Why do we loop over the same list again?
# see bgouter at line 137
for bg in context.pisaBackgroundList:
page = input1.getPage(ctr)
if (bg and not bg.notFound()
and (bg.mimetype=="application/pdf")):
bginput = pyPdf.PdfFileReader(bg.getFile())
pagebg = bginput.getPage(0)
pagebg.mergePage(page)
page = pagebg
else:
log.warn(context.warning(
"Background PDF %s doesn't exist.", bg))
output.addPage(page)
ctr += 1
out = pisaTempFile(capacity=context.capacity)
output.write(out)
# data = sout.getvalue()
# Found a background? So leave loop after first occurence
break
else:
log.warn(context.warning("pyPDF not installed!"))
# Get the resulting PDF and write it to the file object
# passed from the caller
if dest is None:
# No output file was passed - Let's use a pisaTempFile
dest = pisaTempFile(capacity=context.capacity)
context.dest = dest
data = out.getvalue() # TODO: That load all the tempfile in RAM - Why bother with a swapping tempfile then?
context.dest.write(data) # TODO: context.dest is a tempfile as well...
return context
| apache-2.0 | -6,236,947,578,721,935,000 | 2,786,168,469,317,869,000 | 35.884393 | 112 | 0.604764 | false |
Skyeouyang/Text-Analytics-Project | lexicon analysis.py | 1 | 2398 | #######################################
##Author Skye Ouyang
##Date 19th Apr.
#######################################
import glob
import os
def IsNotNull(value):
return value is not None and len(value) > 0
#create weapon list
dict_weapon = []
weapons = open('D:/1. msba/Trimester II Jan.2017-May.2017/text analytics/project/lexicon/weapon_words.txt','r')
for weapon in weapons:
t = weapon.strip().lower()
if (IsNotNull(t)):
dict_weapon.append(t)
weapons.close()
#create bloody words list
dict_bloody = []
bloodys = open('D:/1. msba/Trimester II Jan.2017-May.2017/text analytics/project/lexicon/bloody_words.txt','r')
for bloody in bloodys:
b = bloody.strip().lower()
if (IsNotNull(b)):
dict_bloody.append(b)
#create mysterious words list
dict_mysterious = []
mysteriouss = open('D:/1. msba/Trimester II Jan.2017-May.2017/text analytics/project/lexicon/mysterious_words.txt','r')
for mysterious in mysteriouss:
m = mysterious.strip().lower()
if (IsNotNull(m)):
dict_mysterious.append(m)
#input data
path ="D:/1. msba/Trimester II Jan.2017-May.2017/text analytics/project/dataset/low_score_novel"
allFiles = glob.glob(path + "/*.txt")
#file = open('D:/1. msba/Trimester II Jan.2017-May.2017/text analytics/project/dataset/high_score_novel/01. The Girl with the Dragon Tattoo.txt','r')
weapon_cnt = []
bloody_cnt = []
mysterious_cnt = []
for file in allFiles:
with open(file) as fle:
fiction = fle.read()
# set for loop
wea_cnt = 0
blo_cnt = 0
mys_cnt = 0
# count of weapon words
for word in dict_weapon:
if (word in fiction):
wea_cnt = wea_cnt + 1
for word in dict_bloody:
if (word in fiction):
blo_cnt = blo_cnt + 1
for word in dict_mysterious:
if (word in fiction):
mys_cnt = mys_cnt + 1
print (wea_cnt, blo_cnt , mys_cnt)
# write into list
weapon_cnt.append(wea_cnt)
bloody_cnt.append(blo_cnt)
mysterious_cnt.append(mys_cnt)
weapon_cnt
'''
for file in allFiles:
with open (file) as fle:
blo_cnt = 0
fiction = fle.read()
'''
#file_name = os.path.splitext(path + '/*.txt')[0]
#print ('The size of %s is ' % (file_name) + str(len(fiction)))
| apache-2.0 | 3,036,640,632,361,981,400 | 1,724,574,684,088,596,700 | 27.604938 | 149 | 0.582569 | false |
ueshin/apache-spark | python/pyspark/sql/context.py | 15 | 23877 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import warnings
from pyspark import since, _NoValue
from pyspark.sql.session import _monkey_patch_RDD, SparkSession
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.udf import UDFRegistration # noqa: F401
from pyspark.sql.utils import install_exception_handler
__all__ = ["SQLContext", "HiveContext"]
class SQLContext(object):
"""The entry point for working with structured data (rows and columns) in Spark, in Spark 1.x.
As of Spark 2.0, this is replaced by :class:`SparkSession`. However, we are keeping the class
here for backward compatibility.
A SQLContext can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
.. deprecated:: 3.0.0
Use :func:`SparkSession.builder.getOrCreate()` instead.
Parameters
----------
sparkContext : :class:`SparkContext`
The :class:`SparkContext` backing this SQLContext.
sparkSession : :class:`SparkSession`
The :class:`SparkSession` around which this SQLContext wraps.
jsqlContext : optional
An optional JVM Scala SQLContext. If set, we do not instantiate a new
SQLContext in the JVM, instead we make all calls to this object.
This is only for internal.
Examples
--------
>>> from datetime import datetime
>>> from pyspark.sql import Row
>>> sqlContext = SQLContext(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> sqlContext.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + 1)=2, (d + 1)=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, 'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
_instantiatedContext = None
def __init__(self, sparkContext, sparkSession=None, jsqlContext=None):
if sparkSession is None:
warnings.warn(
"Deprecated in 3.0.0. Use SparkSession.builder.getOrCreate() instead.",
FutureWarning
)
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if sparkSession is None:
sparkSession = SparkSession.builder.getOrCreate()
if jsqlContext is None:
jsqlContext = sparkSession._jwrapped
self.sparkSession = sparkSession
self._jsqlContext = jsqlContext
_monkey_patch_RDD(self.sparkSession)
install_exception_handler()
if (SQLContext._instantiatedContext is None
or SQLContext._instantiatedContext._sc._jsc is None):
SQLContext._instantiatedContext = self
@property
def _ssql_ctx(self):
"""Accessor for the JVM Spark SQL context.
Subclasses can override this property to provide their own
JVM Contexts.
"""
return self._jsqlContext
@property
def _conf(self):
"""Accessor for the JVM SQL-specific configurations"""
return self.sparkSession._jsparkSession.sessionState().conf()
@classmethod
def getOrCreate(cls, sc):
"""
Get the existing SQLContext or create a new one with given SparkContext.
.. versionadded:: 1.6.0
.. deprecated:: 3.0.0
Use :func:`SparkSession.builder.getOrCreate()` instead.
Parameters
----------
sc : :class:`SparkContext`
"""
warnings.warn(
"Deprecated in 3.0.0. Use SparkSession.builder.getOrCreate() instead.",
FutureWarning
)
if (cls._instantiatedContext is None
or SQLContext._instantiatedContext._sc._jsc is None):
jsqlContext = sc._jvm.SparkSession.builder().sparkContext(
sc._jsc.sc()).getOrCreate().sqlContext()
sparkSession = SparkSession(sc, jsqlContext.sparkSession())
cls(sc, sparkSession, jsqlContext)
return cls._instantiatedContext
def newSession(self):
"""
Returns a new SQLContext as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
.. versionadded:: 1.6.0
"""
return self.__class__(self._sc, self.sparkSession.newSession())
def setConf(self, key, value):
"""Sets the given Spark SQL configuration property.
.. versionadded:: 1.3.0
"""
self.sparkSession.conf.set(key, value)
def getConf(self, key, defaultValue=_NoValue):
"""Returns the value of Spark SQL configuration property for the given key.
If the key is not set and defaultValue is set, return
defaultValue. If the key is not set and defaultValue is not set, return
the system default value.
.. versionadded:: 1.3.0
Examples
--------
>>> sqlContext.getConf("spark.sql.shuffle.partitions")
'200'
>>> sqlContext.getConf("spark.sql.shuffle.partitions", "10")
'10'
>>> sqlContext.setConf("spark.sql.shuffle.partitions", "50")
>>> sqlContext.getConf("spark.sql.shuffle.partitions", "10")
'50'
"""
return self.sparkSession.conf.get(key, defaultValue)
@property
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
.. versionadded:: 1.3.1
Returns
-------
:class:`UDFRegistration`
"""
return self.sparkSession.udf
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
.. versionadded:: 1.4.0
Parameters
----------
start : int
the start value
end : int, optional
the end value (exclusive)
step : int, optional
the incremental step (default: 1)
numPartitions : int, optional
the number of partitions of the DataFrame
Returns
-------
:class:`DataFrame`
Examples
--------
>>> sqlContext.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> sqlContext.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
return self.sparkSession.range(start, end, step, numPartitions)
def registerFunction(self, name, f, returnType=None):
"""An alias for :func:`spark.udf.register`.
See :meth:`pyspark.sql.UDFRegistration.register`.
.. versionadded:: 1.2.0
.. deprecated:: 2.3.0
Use :func:`spark.udf.register` instead.
"""
warnings.warn(
"Deprecated in 2.3.0. Use spark.udf.register instead.",
FutureWarning
)
return self.sparkSession.udf.register(name, f, returnType)
def registerJavaFunction(self, name, javaClassName, returnType=None):
"""An alias for :func:`spark.udf.registerJavaFunction`.
See :meth:`pyspark.sql.UDFRegistration.registerJavaFunction`.
.. versionadded:: 2.1.0
.. deprecated:: 2.3.0
Use :func:`spark.udf.registerJavaFunction` instead.
"""
warnings.warn(
"Deprecated in 2.3.0. Use spark.udf.registerJavaFunction instead.",
FutureWarning
)
return self.sparkSession.udf.registerJavaFunction(name, javaClassName, returnType)
# TODO(andrew): delete this once we refactor things to take in SparkSession
def _inferSchema(self, rdd, samplingRatio=None):
"""
Infer schema from an RDD of Row or tuple.
Parameters
----------
rdd : :class:`RDD`
an RDD of Row or tuple
samplingRatio : float, optional
sampling ratio, or no sampling (default)
Returns
-------
:class:`pyspark.sql.types.StructType`
"""
return self.sparkSession._inferSchema(rdd, samplingRatio)
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
.. versionadded:: 1.3.0
.. versionchanged:: 2.0.0
The ``schema`` parameter can be a :class:`pyspark.sql.types.DataType` or a
datatype string after 2.0.
If it's not a :class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` and each record will also be wrapped into a tuple.
.. versionchanged:: 2.1.0
Added verifySchema.
Parameters
----------
data : :class:`RDD` or iterable
an RDD of any kind of SQL data representation (:class:`Row`,
:class:`tuple`, ``int``, ``boolean``, etc.), or :class:`list`, or
:class:`pandas.DataFrame`.
schema : :class:`pyspark.sql.types.DataType`, str or list, optional
a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is None. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`.
We can also use ``int`` as a short name for :class:`pyspark.sql.types.IntegerType`.
samplingRatio : float, optional
the sample ratio of rows used for inferring
verifySchema : bool, optional
verify data types of every row against schema. Enabled by default.
Returns
-------
:class:`DataFrame`
Examples
--------
>>> l = [('Alice', 1)]
>>> sqlContext.createDataFrame(l).collect()
[Row(_1='Alice', _2=1)]
>>> sqlContext.createDataFrame(l, ['name', 'age']).collect()
[Row(name='Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> sqlContext.createDataFrame(d).collect()
[Row(age=1, name='Alice')]
>>> rdd = sc.parallelize(l)
>>> sqlContext.createDataFrame(rdd).collect()
[Row(_1='Alice', _2=1)]
>>> df = sqlContext.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name='Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = sqlContext.createDataFrame(person)
>>> df2.collect()
[Row(name='Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = sqlContext.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name='Alice', age=1)]
>>> sqlContext.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name='Alice', age=1)]
>>> sqlContext.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> sqlContext.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a='Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> sqlContext.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> sqlContext.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
return self.sparkSession.createDataFrame(data, schema, samplingRatio, verifySchema)
def registerDataFrameAsTable(self, df, tableName):
"""Registers the given :class:`DataFrame` as a temporary table in the catalog.
Temporary tables exist only during the lifetime of this instance of :class:`SQLContext`.
.. versionadded:: 1.3.0
Examples
--------
>>> sqlContext.registerDataFrameAsTable(df, "table1")
"""
df.createOrReplaceTempView(tableName)
def dropTempTable(self, tableName):
""" Remove the temporary table from catalog.
.. versionadded:: 1.6.0
Examples
--------
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> sqlContext.dropTempTable("table1")
"""
self.sparkSession.catalog.dropTempView(tableName)
def createExternalTable(self, tableName, path=None, source=None, schema=None, **options):
"""Creates an external table based on the dataset in a data source.
It returns the DataFrame associated with the external table.
The data source is specified by the ``source`` and a set of ``options``.
If ``source`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
Optionally, a schema can be provided as the schema of the returned :class:`DataFrame` and
created external table.
.. versionadded:: 1.3.0
Returns
-------
:class:`DataFrame`
"""
return self.sparkSession.catalog.createExternalTable(
tableName, path, source, schema, **options)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
.. versionadded:: 1.0.0
Returns
-------
:class:`DataFrame`
Examples
--------
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2='row1'), Row(f1=2, f2='row2'), Row(f1=3, f2='row3')]
"""
return self.sparkSession.sql(sqlQuery)
def table(self, tableName):
"""Returns the specified table or view as a :class:`DataFrame`.
.. versionadded:: 1.0.0
Returns
-------
:class:`DataFrame`
Examples
--------
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return self.sparkSession.table(tableName)
def tables(self, dbName=None):
"""Returns a :class:`DataFrame` containing names of tables in the given database.
If ``dbName`` is not specified, the current database will be used.
The returned DataFrame has two columns: ``tableName`` and ``isTemporary``
(a column with :class:`BooleanType` indicating if a table is a temporary one or not).
.. versionadded:: 1.3.0
Parameters
----------
dbName: str, optional
name of the database to use.
Returns
-------
:class:`DataFrame`
Examples
--------
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.tables()
>>> df2.filter("tableName = 'table1'").first()
Row(namespace='', tableName='table1', isTemporary=True)
"""
if dbName is None:
return DataFrame(self._ssql_ctx.tables(), self)
else:
return DataFrame(self._ssql_ctx.tables(dbName), self)
def tableNames(self, dbName=None):
"""Returns a list of names of tables in the database ``dbName``.
.. versionadded:: 1.3.0
Parameters
----------
dbName: str
name of the database to use. Default to the current database.
Returns
-------
list
list of table names, in string
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> "table1" in sqlContext.tableNames()
True
>>> "table1" in sqlContext.tableNames("default")
True
"""
if dbName is None:
return [name for name in self._ssql_ctx.tableNames()]
else:
return [name for name in self._ssql_ctx.tableNames(dbName)]
@since(1.0)
def cacheTable(self, tableName):
"""Caches the specified table in-memory."""
self._ssql_ctx.cacheTable(tableName)
@since(1.0)
def uncacheTable(self, tableName):
"""Removes the specified table from the in-memory cache."""
self._ssql_ctx.uncacheTable(tableName)
@since(1.3)
def clearCache(self):
"""Removes all cached tables from the in-memory cache. """
self._ssql_ctx.clearCache()
@property
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
.. versionadded:: 1.4.0
Returns
-------
:class:`DataFrameReader`
"""
return DataFrameReader(self)
@property
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. versionadded:: 2.0.0
Notes
-----
This API is evolving.
Returns
-------
:class:`DataStreamReader`
>>> text_sdf = sqlContext.readStream.text(tempfile.mkdtemp())
>>> text_sdf.isStreaming
True
"""
return DataStreamReader(self)
@property
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. versionadded:: 2.0.0
Notes
-----
This API is evolving.
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._ssql_ctx.streams())
class HiveContext(SQLContext):
"""A variant of Spark SQL that integrates with data stored in Hive.
Configuration for Hive is read from ``hive-site.xml`` on the classpath.
It supports running both SQL and HiveQL commands.
.. deprecated:: 2.0.0
Use SparkSession.builder.enableHiveSupport().getOrCreate().
Parameters
----------
sparkContext : :class:`SparkContext`
The SparkContext to wrap.
jhiveContext : optional
An optional JVM Scala HiveContext. If set, we do not instantiate a new
:class:`HiveContext` in the JVM, instead we make all calls to this object.
This is only for internal use.
"""
def __init__(self, sparkContext, jhiveContext=None):
warnings.warn(
"HiveContext is deprecated in Spark 2.0.0. Please use " +
"SparkSession.builder.enableHiveSupport().getOrCreate() instead.",
FutureWarning
)
if jhiveContext is None:
sparkContext._conf.set("spark.sql.catalogImplementation", "hive")
sparkSession = SparkSession.builder._sparkContext(sparkContext).getOrCreate()
else:
sparkSession = SparkSession(sparkContext, jhiveContext.sparkSession())
SQLContext.__init__(self, sparkContext, sparkSession, jhiveContext)
@classmethod
def _createForTesting(cls, sparkContext):
"""(Internal use only) Create a new HiveContext for testing.
All test code that touches HiveContext *must* go through this method. Otherwise,
you may end up launching multiple derby instances and encounter with incredibly
confusing error messages.
"""
jsc = sparkContext._jsc.sc()
jtestHive = sparkContext._jvm.org.apache.spark.sql.hive.test.TestHiveContext(jsc, False)
return cls(sparkContext, jtestHive)
def refreshTable(self, tableName):
"""Invalidate and refresh all the cached the metadata of the given
table. For performance reasons, Spark SQL or the external data source
library it uses might cache certain metadata about a table, such as the
location of blocks. When those change outside of Spark SQL, users should
call this function to invalidate the cache.
"""
self._ssql_ctx.refreshTable(tableName)
def _test():
import os
import doctest
import tempfile
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext
import pyspark.sql.context
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.context.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['tempfile'] = tempfile
globs['os'] = os
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")]
)
globs['df'] = rdd.toDF()
jsonStrings = [
'{"field1": 1, "field2": "row1", "field3":{"field4":11}}',
'{"field1" : 2, "field3":{"field4":22, "field5": [10, 11]},"field6":[{"field7": "row2"}]}',
'{"field1" : null, "field2": "row3", "field3":{"field4":33, "field5": []}}'
]
globs['jsonStrings'] = jsonStrings
globs['json'] = sc.parallelize(jsonStrings)
(failure_count, test_count) = doctest.testmod(
pyspark.sql.context, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 | -418,326,753,385,891,000 | -1,920,893,840,104,205,800 | 34.478455 | 100 | 0.604389 | false |
onceuponatimeforever/oh-mainline | vendor/packages/gdata/samples/contentforshopping/add_product.py | 32 | 1571 | #!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import getpass
from gdata.contentforshopping.data import build_entry
from gdata.contentforshopping.client import ContentForShoppingClient
# Gather merchant information
account_id = raw_input('Merchant Account ID? ').strip()
email = raw_input('Google Email Address? ').strip()
# Create a client
client = ContentForShoppingClient(account_id)
# Perform programmatic login
client.client_login(email, getpass.getpass('Google Password? '),
'Shopping API for Content sample', 'structuredcontent')
# Generate a product entry
product_entry = build_entry(
product_id='ipod2',
target_country = 'US',
content_language = 'EN',
title='iPod Nano 8GB',
content='A nice small mp3 player',
price='149',
price_unit='USD',
shipping_price = '5',
shipping_price_unit = 'USD',
tax_rate='17.5',
condition = 'new',
link = 'http://pseudoscience.co.uk/google4e823e35f032f011.html',
)
# Post it to the service
client.insert_product(product_entry)
| agpl-3.0 | -1,388,991,226,745,280,000 | 7,335,092,151,102,545,000 | 30.42 | 74 | 0.742839 | false |
gandalfcode/gandalf | examples/example09.py | 1 | 1749 | #==============================================================================
# example09.py
# Create initial conditions for pure N-body simulation inside the python
# script, and then run the simulation to completion while plotting results.
#==============================================================================
from gandalf.analysis.facade import *
import numpy as np
import time
# Create empty numpy arrays for setting star initial conditions
Nstar = 3
x = np.zeros(Nstar)
y = np.zeros(Nstar)
vx = np.zeros(Nstar)
vy = np.zeros(Nstar)
m = np.zeros(Nstar)
h = 0.000001*np.ones(Nstar)
# Set values for each star individually (Note all velocities initially zero)
m[0] = 3.0; x[0] = 1.0; y[0] = 3.0
m[1] = 4.0; x[1] = -2.0; y[1] = -1.0
m[2] = 5.0; x[2] = 1.0; y[2] = -1.0
# Create new 1D simulation object and set parameters
sim = newsim(ndim=2,sim='nbody')
sim.SetParam('ic','python')
sim.SetParam('nbody','hermite4ts')
sim.SetParam('sub_systems',0)
sim.SetParam('Npec',3)
sim.SetParam('Nlevels',1)
sim.SetParam('Nstar',Nstar)
sim.SetParam('tend',80.0)
sim.SetParam('dt_snap',1.0)
sim.SetParam('noutputstep',128)
sim.SetParam('ndiagstep',2048)
sim.SetParam('dimensionless',1)
sim.SetParam('run_id','BURRAU1')
sim.SetParam('out_file_form','su')
# Call setup routines and import particle data
sim.PreSetupForPython()
sim.ImportArray(x,'x','star')
sim.ImportArray(y,'y','star')
sim.ImportArray(vx,'vx','star')
sim.ImportArray(vy,'vy','star')
sim.ImportArray(m,'m','star')
sim.ImportArray(h,'h','star')
sim.SetupSimulation()
# Plot the density of all particles near the shock
plot("x","y",type="star")
limit("x",-30.0,30.0,window="all")
limit("y",-20.0,40.0,window="all")
# Run simulation and save plot to file
run()
block()
| gpl-2.0 | 3,827,408,456,027,968,000 | -551,328,706,968,777,900 | 29.684211 | 79 | 0.63522 | false |
swdream/neutron | neutron/tests/unit/api/v2/test_resource.py | 28 | 14954 | # Copyright (c) 2012 Intel Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import oslo_i18n
from webob import exc
import webtest
from neutron.api.v2 import resource as wsgi_resource
from neutron.common import exceptions as n_exc
from neutron import context
from neutron.tests import base
from neutron import wsgi
class RequestTestCase(base.BaseTestCase):
def setUp(self):
super(RequestTestCase, self).setUp()
self.req = wsgi_resource.Request({'foo': 'bar'})
def test_content_type_missing(self):
request = wsgi.Request.blank('/tests/123', method='POST')
request.body = b"<body />"
self.assertIsNone(request.get_content_type())
def test_content_type_with_charset(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Type"] = "application/json; charset=UTF-8"
result = request.get_content_type()
self.assertEqual(result, "application/json")
def test_content_type_from_accept(self):
content_type = 'application/json'
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = content_type
result = request.best_match_content_type()
self.assertEqual(result, content_type)
def test_content_type_from_accept_best(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/json"
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = ("application/json; q=0.3, "
"application/xml; q=0.9")
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
def test_content_type_from_query_extension(self):
request = wsgi.Request.blank('/tests/123.json')
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
request = wsgi.Request.blank('/tests/123.invalid')
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
def test_content_type_accept_and_query_extension(self):
request = wsgi.Request.blank('/tests/123.json')
request.headers["Accept"] = "application/xml"
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
def test_content_type_accept_default(self):
request = wsgi.Request.blank('/tests/123.unsupported')
request.headers["Accept"] = "application/unsupported1"
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
def test_context_with_neutron_context(self):
ctxt = context.Context('fake_user', 'fake_tenant')
self.req.environ['neutron.context'] = ctxt
self.assertEqual(self.req.context, ctxt)
def test_context_without_neutron_context(self):
self.assertTrue(self.req.context.is_admin)
def test_request_context_elevated(self):
user_context = context.Context(
'fake_user', 'fake_project', admin=False)
self.assertFalse(user_context.is_admin)
admin_context = user_context.elevated()
self.assertFalse(user_context.is_admin)
self.assertTrue(admin_context.is_admin)
self.assertNotIn('admin', user_context.roles)
self.assertIn('admin', admin_context.roles)
def test_best_match_language(self):
# Test that we are actually invoking language negotiation by webop
request = wsgi.Request.blank('/')
oslo_i18n.get_available_languages = mock.MagicMock()
oslo_i18n.get_available_languages.return_value = ['known-language',
'es', 'zh']
request.headers['Accept-Language'] = 'known-language'
language = request.best_match_language()
self.assertEqual(language, 'known-language')
# If the Accept-Leader is an unknown language, missing or empty,
# the best match locale should be None
request.headers['Accept-Language'] = 'unknown-language'
language = request.best_match_language()
self.assertIsNone(language)
request.headers['Accept-Language'] = ''
language = request.best_match_language()
self.assertIsNone(language)
request.headers.pop('Accept-Language')
language = request.best_match_language()
self.assertIsNone(language)
class ResourceTestCase(base.BaseTestCase):
@staticmethod
def _get_deserializer():
return wsgi.JSONDeserializer()
def test_unmapped_neutron_error_with_json(self):
msg = u'\u7f51\u7edc'
class TestException(n_exc.NeutronException):
message = msg
expected_res = {'body': {
'NeutronError': {
'type': 'TestException',
'message': msg,
'detail': ''}}}
controller = mock.MagicMock()
controller.test.side_effect = TestException()
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {'wsgiorg.routing_args': (None, {'action': 'test',
'format': 'json'})}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPInternalServerError.code)
self.assertEqual(wsgi.JSONDeserializer().deserialize(res.body),
expected_res)
@mock.patch('oslo_i18n.translate')
def test_unmapped_neutron_error_localized(self, mock_translation):
msg_translation = 'Translated error'
mock_translation.return_value = msg_translation
msg = _('Unmapped error')
class TestException(n_exc.NeutronException):
message = msg
controller = mock.MagicMock()
controller.test.side_effect = TestException()
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {'wsgiorg.routing_args': (None, {'action': 'test',
'format': 'json'})}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPInternalServerError.code)
self.assertIn(msg_translation,
str(wsgi.JSONDeserializer().deserialize(res.body)))
def test_mapped_neutron_error_with_json(self):
msg = u'\u7f51\u7edc'
class TestException(n_exc.NeutronException):
message = msg
expected_res = {'body': {
'NeutronError': {
'type': 'TestException',
'message': msg,
'detail': ''}}}
controller = mock.MagicMock()
controller.test.side_effect = TestException()
faults = {TestException: exc.HTTPGatewayTimeout}
resource = webtest.TestApp(wsgi_resource.Resource(controller,
faults=faults))
environ = {'wsgiorg.routing_args': (None, {'action': 'test',
'format': 'json'})}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPGatewayTimeout.code)
self.assertEqual(wsgi.JSONDeserializer().deserialize(res.body),
expected_res)
@mock.patch('oslo_i18n.translate')
def test_mapped_neutron_error_localized(self, mock_translation):
msg_translation = 'Translated error'
mock_translation.return_value = msg_translation
msg = _('Unmapped error')
class TestException(n_exc.NeutronException):
message = msg
controller = mock.MagicMock()
controller.test.side_effect = TestException()
faults = {TestException: exc.HTTPGatewayTimeout}
resource = webtest.TestApp(wsgi_resource.Resource(controller,
faults=faults))
environ = {'wsgiorg.routing_args': (None, {'action': 'test',
'format': 'json'})}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPGatewayTimeout.code)
self.assertIn(msg_translation,
str(wsgi.JSONDeserializer().deserialize(res.body)))
@staticmethod
def _make_request_with_side_effect(side_effect):
controller = mock.MagicMock()
controller.test.side_effect = side_effect
resource = webtest.TestApp(wsgi_resource.Resource(controller))
routing_args = {'action': 'test'}
environ = {'wsgiorg.routing_args': (None, routing_args)}
res = resource.get('', extra_environ=environ, expect_errors=True)
return res
def test_http_error(self):
res = self._make_request_with_side_effect(exc.HTTPGatewayTimeout())
# verify that the exception structure is the one expected
# by the python-neutronclient
self.assertEqual(exc.HTTPGatewayTimeout().explanation,
res.json['NeutronError']['message'])
self.assertEqual('HTTPGatewayTimeout',
res.json['NeutronError']['type'])
self.assertEqual('', res.json['NeutronError']['detail'])
self.assertEqual(exc.HTTPGatewayTimeout.code, res.status_int)
def test_unhandled_error(self):
expected_res = {'body': {'NeutronError':
{'detail': '',
'message': _(
'Request Failed: internal server '
'error while processing your request.'),
'type': 'HTTPInternalServerError'}}}
res = self._make_request_with_side_effect(side_effect=Exception())
self.assertEqual(exc.HTTPInternalServerError.code,
res.status_int)
self.assertEqual(expected_res,
self._get_deserializer().deserialize(res.body))
def test_not_implemented_error(self):
expected_res = {'body': {'NeutronError':
{'detail': '',
'message': _(
'The server has either erred or is '
'incapable of performing the requested '
'operation.'),
'type': 'HTTPNotImplemented'}}}
res = self._make_request_with_side_effect(exc.HTTPNotImplemented())
self.assertEqual(exc.HTTPNotImplemented.code, res.status_int)
self.assertEqual(expected_res,
self._get_deserializer().deserialize(res.body))
def test_status_200(self):
controller = mock.MagicMock()
controller.test = lambda request: {'foo': 'bar'}
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {'wsgiorg.routing_args': (None, {'action': 'test'})}
res = resource.get('', extra_environ=environ)
self.assertEqual(res.status_int, 200)
def test_status_204(self):
controller = mock.MagicMock()
controller.test = lambda request: {'foo': 'bar'}
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {'wsgiorg.routing_args': (None, {'action': 'delete'})}
res = resource.delete('', extra_environ=environ)
self.assertEqual(res.status_int, 204)
def _test_error_log_level(self, expected_webob_exc, expect_log_info=False,
use_fault_map=True, exc_raised=None):
if not exc_raised:
class TestException(n_exc.NeutronException):
message = 'Test Exception'
exc_raised = TestException
controller = mock.MagicMock()
controller.test.side_effect = exc_raised()
faults = {exc_raised: expected_webob_exc} if use_fault_map else {}
resource = webtest.TestApp(wsgi_resource.Resource(controller, faults))
environ = {'wsgiorg.routing_args': (None, {'action': 'test'})}
with mock.patch.object(wsgi_resource, 'LOG') as log:
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, expected_webob_exc.code)
self.assertEqual(expect_log_info, log.info.called)
self.assertNotEqual(expect_log_info, log.exception.called)
def test_4xx_error_logged_info_level(self):
self._test_error_log_level(exc.HTTPNotFound, expect_log_info=True)
def test_non_4xx_error_logged_exception_level(self):
self._test_error_log_level(exc.HTTPServiceUnavailable,
expect_log_info=False)
def test_unmapped_error_logged_exception_level(self):
self._test_error_log_level(exc.HTTPInternalServerError,
expect_log_info=False, use_fault_map=False)
def test_webob_4xx_logged_info_level(self):
self._test_error_log_level(exc.HTTPNotFound,
use_fault_map=False, expect_log_info=True,
exc_raised=exc.HTTPNotFound)
def test_webob_5xx_logged_info_level(self):
self._test_error_log_level(exc.HTTPServiceUnavailable,
use_fault_map=False, expect_log_info=False,
exc_raised=exc.HTTPServiceUnavailable)
def test_no_route_args(self):
controller = mock.MagicMock()
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPInternalServerError.code)
def test_post_with_body(self):
controller = mock.MagicMock()
controller.test = lambda request, body: {'foo': 'bar'}
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {'wsgiorg.routing_args': (None, {'action': 'test'})}
res = resource.post('', params='{"key": "val"}',
extra_environ=environ)
self.assertEqual(res.status_int, 200)
| apache-2.0 | -5,978,935,368,970,101,000 | -7,739,448,915,041,990,000 | 41.848138 | 78 | 0.605724 | false |
gunzy83/ansible-modules-extras | monitoring/datadog_event.py | 33 | 5325 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Author: Artūras 'arturaz' Šlajus <[email protected]>
# Author: Naoya Nakazawa <[email protected]>
#
# This module is proudly sponsored by iGeolise (www.igeolise.com) and
# Tiny Lab Productions (www.tinylabproductions.com).
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Import Datadog
try:
from datadog import initialize, api
HAS_DATADOG = True
except:
HAS_DATADOG = False
DOCUMENTATION = '''
---
module: datadog_event
short_description: Posts events to DataDog service
description:
- "Allows to post events to DataDog (www.datadoghq.com) service."
- "Uses http://docs.datadoghq.com/api/#events API."
version_added: "1.3"
author:
- "Artūras `arturaz` Šlajus (@arturaz)"
- "Naoya Nakazawa (@n0ts)"
notes: []
requirements: []
options:
api_key:
description: ["Your DataDog API key."]
required: true
default: null
app_key:
description: ["Your DataDog app key."]
required: true
version_added: "2.2"
title:
description: ["The event title."]
required: true
default: null
text:
description: ["The body of the event."]
required: true
default: null
date_happened:
description:
- POSIX timestamp of the event.
- Default value is now.
required: false
default: now
priority:
description: ["The priority of the event."]
required: false
default: normal
choices: [normal, low]
tags:
description: ["Comma separated list of tags to apply to the event."]
required: false
default: null
alert_type:
description: ["Type of alert."]
required: false
default: info
choices: ['error', 'warning', 'info', 'success']
aggregation_key:
description: ["An arbitrary string to use for aggregation."]
required: false
default: null
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 1.5.1
'''
EXAMPLES = '''
# Post an event with low priority
datadog_event: title="Testing from ansible" text="Test!" priority="low"
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
app_key: "j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN"
# Post an event with several tags
datadog_event: title="Testing from ansible" text="Test!"
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
app_key: "j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN"
tags=aa,bb,#host:{{ inventory_hostname }}
'''
# Import Datadog
def main():
module = AnsibleModule(
argument_spec=dict(
api_key=dict(required=True, no_log=True),
app_key=dict(required=True, no_log=True),
title=dict(required=True),
text=dict(required=True),
date_happened=dict(required=False, default=None, type='int'),
priority=dict(
required=False, default='normal', choices=['normal', 'low']
),
tags=dict(required=False, default=None, type='list'),
alert_type=dict(
required=False, default='info',
choices=['error', 'warning', 'info', 'success']
),
aggregation_key=dict(required=False, default=None),
validate_certs = dict(default='yes', type='bool'),
)
)
# Prepare Datadog
if not HAS_DATADOG:
module.fail_json(msg='datadogpy required for this module')
options = {
'api_key': module.params['api_key'],
'app_key': module.params['app_key']
}
initialize(**options)
_post_event(module)
def _post_event(module):
try:
msg = api.Event.create(title=module.params['title'],
text=module.params['text'],
tags=module.params['tags'],
priority=module.params['priority'],
alert_type=module.params['alert_type'],
aggregation_key=module.params['aggregation_key'],
source_type_name='ansible')
if msg['status'] != 'ok':
module.fail_json(msg=msg)
module.exit_json(changed=True, msg=msg)
except Exception:
e = get_exception()
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
| gpl-3.0 | -6,185,725,686,101,613,000 | -5,209,082,525,901,746,000 | 31.248485 | 88 | 0.606465 | false |
ciudadanointeligente/votainteligente-portal-electoral | popular_proposal/rest_api.py | 2 | 1890 | from rest_framework.serializers import (HyperlinkedModelSerializer,
JSONField,
StringRelatedField)
from rest_framework.viewsets import ReadOnlyModelViewSet
from popular_proposal.models import PopularProposal, Commitment
from elections.models import Candidate
class ProposalSerializer(HyperlinkedModelSerializer):
data = JSONField()
proposer = StringRelatedField()
class Meta:
model = PopularProposal
fields = ('id','title', 'slug', 'get_absolute_url', 'data', 'proposer','created', 'clasification','is_local_meeting','nro_supports')
class ProposalViewSet(ReadOnlyModelViewSet):
queryset = PopularProposal.objects.all()
serializer_class = ProposalSerializer
def get_queryset(self):
queryset = super(ProposalViewSet, self).get_queryset()
username = self.request.query_params.get('proposer', None)
if username is not None:
queryset = queryset.filter(proposer__username=username)
clasification = self.request.query_params.get('clasification', None)
if clasification is not None:
queryset = queryset.filter(clasification=clasification)
return queryset
class CommitmentsSerializer(HyperlinkedModelSerializer):
class Meta:
model = Commitment
fields = ('id','proposal','candidate', 'detail', 'commited', 'get_absolute_url')
class CommitmentViewSet(ReadOnlyModelViewSet):
queryset = Commitment.objects.all()
serializer_class = CommitmentsSerializer
class CandidateSerializer(HyperlinkedModelSerializer):
class Meta:
model = Candidate
fields = ('id','name', 'get_absolute_url', 'commitments')
class CandidateViewSet(ReadOnlyModelViewSet):
queryset = Candidate.objects.all()
serializer_class = CandidateSerializer
pagination_class = None | gpl-3.0 | 1,563,690,800,908,253,200 | -7,789,034,580,205,829,000 | 36.078431 | 140 | 0.696825 | false |
ycl2045/nova-master | nova/scheduler/filters/isolated_hosts_filter.py | 13 | 3321 | # Copyright (c) 2011-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from nova.scheduler import filters
isolated_opts = [
cfg.ListOpt('isolated_images',
default=[],
help='Images to run on isolated host'),
cfg.ListOpt('isolated_hosts',
default=[],
help='Host reserved for specific images'),
cfg.BoolOpt('restrict_isolated_hosts_to_isolated_images',
default=True,
help='Whether to force isolated hosts to run only isolated '
'images'),
]
CONF = cfg.CONF
CONF.register_opts(isolated_opts)
class IsolatedHostsFilter(filters.BaseHostFilter):
"""Keep specified images to selected hosts."""
# The configuration values do not change within a request
run_filter_once_per_request = True
def host_passes(self, host_state, filter_properties):
"""Result Matrix with 'restrict_isolated_hosts_to_isolated_images' set
to True:
| isolated_image | non_isolated_image
-------------+----------------+-------------------
iso_host | True | False
non_iso_host | False | True
Result Matrix with 'restrict_isolated_hosts_to_isolated_images' set
to False:
| isolated_image | non_isolated_image
-------------+----------------+-------------------
iso_host | True | True
non_iso_host | False | True
"""
# If the configuration does not list any hosts, the filter will always
# return True, assuming a configuration error, so letting all hosts
# through.
isolated_hosts = CONF.isolated_hosts
isolated_images = CONF.isolated_images
restrict_isolated_hosts_to_isolated_images = (CONF.
restrict_isolated_hosts_to_isolated_images)
if not isolated_images:
# As there are no images to match, return True if the filter is
# not restrictive otherwise return False if the host is in the
# isolation list.
return ((not restrict_isolated_hosts_to_isolated_images) or
(host_state.host not in isolated_hosts))
spec = filter_properties.get('request_spec', {})
props = spec.get('instance_properties', {})
image_ref = props.get('image_ref')
image_isolated = image_ref in isolated_images
host_isolated = host_state.host in isolated_hosts
if restrict_isolated_hosts_to_isolated_images:
return (image_isolated == host_isolated)
else:
return (not image_isolated) or host_isolated
| apache-2.0 | 8,661,403,271,935,237,000 | 8,555,895,742,115,964,000 | 40 | 78 | 0.601325 | false |
EricSB/nupic | tests/unit/nupic/research/spatial_pooler_boost_test.py | 15 | 11879 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import time
import numpy
import unittest2 as unittest
from nupic.support.unittesthelpers.algorithm_test_helpers \
import CreateSP
from nupic.bindings.math import GetNTAReal
uintType = "uint32"
# set a single seed for running both implementations
SEED = int((time.time()%10000)*10)
def _computeOverlap(x, y):
"""
Given two binary arrays, compute their overlap. The overlap is the number
of bits where x[i] and y[i] are both 1
"""
return ((x + y) == 2).sum()
def _areAllSDRsUnique(sdrDict):
"""Return True iff all the SDR's in the dict are unique."""
for k1, v1 in sdrDict.iteritems():
for k2, v2 in sdrDict.iteritems():
# Return false if two different keys have identical SDR's
if (k2 != k1) and ((v1 == v2).sum() == v1.size):
return False
return True
class SpatialPoolerBoostTest(unittest.TestCase):
"""
Test boosting.
The test is constructed as follows: we construct a set of 5 known inputs. Two
of the input patterns have 50% overlap while all other combinations have 0%
overlap. Each input pattern has 20 bits on to ensure reasonable overlap with
almost all columns.
SP parameters: The SP is set to have 600 columns with 10% output sparsity.
This ensures that the 5 inputs cannot use up all the columns. Yet we still can
have a reasonable number of winning columns at each step in order to test
overlap properties. boostStrength is set to 10 so that some boosted columns are
guaranteed to win eventually but not necessarily quickly. potentialPct is set
to 0.9 to ensure all columns have at least some overlap with at least one
input bit. Thus, when sufficiently boosted, every column should become a
winner at some point. We set permanence increment and decrement to 0 so that
winning columns don't change unless they have been boosted.
Learning is OFF for Phase 1 & 4 and ON for Phase 2 & 3
Phase 1: Run spatial pooler on the dataset with learning off to get a baseline
The boosting factors should be all ones in this phase. A significant fraction
of the columns will not be used at all. There will be significant overlap
between the first two inputs.
Phase 2: Learning is on over the next 10 iterations. During this phase,
columns that are active frequently will have low boost factors, and columns
that are not active enough will have high boost factors. All columns should
be active at some point in phase 2.
Phase 3: Run one more batch on with learning On. Because of the artificially
induced thrashing behavior in this test due to boosting, all the inputs should
now have pretty distinct patterns.
Phase 4: Run spatial pooler with learning off. Make sure boosting factors
do not change when learning is off
"""
def setUp(self):
"""
Set various constants. Create the input patterns and the spatial pooler
"""
self.inputSize = 90
self.columnDimensions = 600
# Create a set of input vectors, x
# B,C,D don't overlap at all with other patterns
self.x = numpy.zeros((5, self.inputSize), dtype=uintType)
self.x[0, 0:20] = 1 # Input pattern A
self.x[1, 10:30] = 1 # Input pattern A' (half the bits overlap with A)
self.x[2, 30:50] = 1 # Input pattern B (no overlap with others)
self.x[3, 50:70] = 1 # Input pattern C (no overlap with others)
self.x[4, 70:90] = 1 # Input pattern D (no overlap with others)
# For each column, this will contain the last iteration number where that
# column was a winner
self.winningIteration = numpy.zeros(self.columnDimensions)
# For each input vector i, lastSDR[i] contains the most recent SDR output
# by the SP.
self.lastSDR = {}
self.spImplementation = "None"
self.sp = None
# Setup the SP creation parameters we will use
self.params = {
'inputDimensions': [self.inputSize],
'columnDimensions': [self.columnDimensions],
'potentialRadius': self.inputSize,
'potentialPct': 0.9,
'globalInhibition': True,
'numActiveColumnsPerInhArea': 60,
'synPermActiveInc': 0.0,
'synPermInactiveDec': 0.0,
'dutyCyclePeriod': 10,
'boostStrength': 10.0,
'seed': SEED,
}
print "SP seed set to:", self.params['seed']
def debugPrint(self):
"""
Helpful debug print statements while debugging this test.
"""
activeDutyCycle = numpy.zeros(self.columnDimensions, dtype=GetNTAReal())
self.sp.getActiveDutyCycles(activeDutyCycle)
boost = numpy.zeros(self.columnDimensions, dtype=GetNTAReal())
self.sp.getBoostFactors(boost)
print "\n--------- ITERATION", (
self.sp.getIterationNum() ),"-----------------------"
print "SP implementation:", self.spImplementation
print "Learning iteration:",
print "Max/min active duty cycle:", (
activeDutyCycle.max(), activeDutyCycle.min() )
print "Average non-zero active duty cycle:", (
activeDutyCycle[activeDutyCycle>0].mean() )
print "Active duty cycle", activeDutyCycle
print
print "Boost factor for sp:", boost
print
print "Last winning iteration for each column"
print self.winningIteration
print "Number of columns that have won at some point:", (
self.columnDimensions - (self.winningIteration==0).sum() )
def verifySDRProperties(self):
"""
Verify that all SDRs have the properties desired for this test.
The bounds for checking overlap are set fairly loosely here since there is
some variance due to randomness and the artificial parameters used in this
test.
"""
# Verify that all SDR's are unique
self.assertTrue(_areAllSDRsUnique(self.lastSDR), "All SDR's are not unique")
# Verify that the first two SDR's have some overlap.
self.assertGreater(_computeOverlap(self.lastSDR[0], self.lastSDR[1]), 9,
"First two SDR's don't overlap much")
# Verify the last three SDR's have low overlap with everyone else.
for i in [2, 3, 4]:
for j in range(5):
if (i!=j):
self.assertLess(_computeOverlap(self.lastSDR[i], self.lastSDR[j]),
18, "One of the last three SDRs has high overlap")
def boostTestPhase1(self):
y = numpy.zeros(self.columnDimensions, dtype = uintType)
# Do one batch through the input patterns while learning is Off
for idx, v in enumerate(self.x):
y.fill(0)
self.sp.compute(v, False, y)
self.winningIteration[y.nonzero()[0]] = self.sp.getIterationLearnNum()
self.lastSDR[idx] = y.copy()
# The boost factor for all columns should be at 1.
boost = numpy.zeros(self.columnDimensions, dtype = GetNTAReal())
self.sp.getBoostFactors(boost)
self.assertEqual((boost==1).sum(), self.columnDimensions,
"Boost factors are not all 1")
# At least half of the columns should have never been active.
self.assertGreaterEqual((self.winningIteration==0).sum(),
self.columnDimensions/2, "More than half of the columns have been active")
self.verifySDRProperties()
def boostTestPhase2(self):
y = numpy.zeros(self.columnDimensions, dtype = uintType)
# Do 9 training batch through the input patterns
for _ in range(10):
for idx, v in enumerate(self.x):
y.fill(0)
self.sp.compute(v, True, y)
self.winningIteration[y.nonzero()[0]] = self.sp.getIterationLearnNum()
self.lastSDR[idx] = y.copy()
# All the never-active columns should have duty cycle of 0
dutyCycles = numpy.zeros(self.columnDimensions, dtype = GetNTAReal())
self.sp.getActiveDutyCycles(dutyCycles)
self.assertEqual(dutyCycles[self.winningIteration == 0].sum(), 0,
"Inactive columns have positive duty cycle.")
boost = numpy.zeros(self.columnDimensions, dtype=GetNTAReal())
self.sp.getBoostFactors(boost)
self.assertLessEqual(numpy.max(boost[numpy.where(dutyCycles>0.1)]), 1.0,
"Strongly active columns have high boost factors")
self.assertGreaterEqual(numpy.min(boost[numpy.where(dutyCycles<0.1)]), 1.0,
"Weakly active columns have low boost factors")
# By now, every column should have been sufficiently boosted to win at least
# once. The number of columns that have never won should now be 0
numLosersAfter = (self.winningIteration == 0).sum()
self.assertEqual(numLosersAfter, 0)
# Because of the artificially induced thrashing, even the first two patterns
# should have low overlap. Verify that the first two SDR's now have little
# overlap
self.assertLess(_computeOverlap(self.lastSDR[0], self.lastSDR[1]), 7,
"First two SDR's overlap significantly when they "
"shouldn't")
def boostTestPhase3(self):
# Do one more training batches through the input patterns
y = numpy.zeros(self.columnDimensions, dtype = uintType)
for idx, v in enumerate(self.x):
y.fill(0)
self.sp.compute(v, True, y)
self.winningIteration[y.nonzero()[0]] = self.sp.getIterationLearnNum()
self.lastSDR[idx] = y.copy()
# By now, every column should have been sufficiently boosted to win at least
# once. The number of columns that have never won should now be 0
numLosersAfter = (self.winningIteration==0).sum()
self.assertEqual(numLosersAfter, 0)
# Because of the artificially induced thrashing, even the first two patterns
# should have low overlap. Verify that the first two SDR's now have little
# overlap
self.assertLess(_computeOverlap(self.lastSDR[0], self.lastSDR[1]), 7,
"First two SDR's overlap significantly when they "
"shouldn't")
def boostTestPhase4(self):
boostAtBeg = numpy.zeros(self.columnDimensions, dtype=GetNTAReal())
self.sp.getBoostFactors(boostAtBeg)
# Do one more iteration through the input patterns with learning OFF
y = numpy.zeros(self.columnDimensions, dtype=uintType)
for _, v in enumerate(self.x):
y.fill(0)
self.sp.compute(v, False, y)
boost = numpy.zeros(self.columnDimensions, dtype=GetNTAReal())
self.sp.getBoostFactors(boost)
self.assertEqual(boost.sum(), boostAtBeg.sum(),
"Boost factors changed when learning is off")
def boostTestLoop(self, imp):
"""Main test loop."""
self.sp = CreateSP(imp, self.params)
self.spImplementation = imp
self.winningIteration.fill(0)
self.lastSDR = {}
self.boostTestPhase1()
self.boostTestPhase2()
self.boostTestPhase3()
self.boostTestPhase4()
def testBoostingPY(self):
self.boostTestLoop("py")
def testBoostingCPP(self):
self.boostTestLoop("cpp")
if __name__ == "__main__":
unittest.main()
| agpl-3.0 | 7,860,778,161,959,022,000 | -663,058,764,568,794,100 | 37.196141 | 81 | 0.672784 | false |
mrtnrdl/.macdots | scripts/bin/platform-tools/systrace/catapult/common/py_utils/py_utils/refactor/annotated_symbol/function_definition.py | 9 | 1301 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import symbol
from py_utils.refactor.annotated_symbol import base_symbol
__all__ = [
'Function',
]
class Function(base_symbol.AnnotatedSymbol):
# pylint: disable=abstract-class-not-used
@classmethod
def Annotate(cls, symbol_type, children):
if symbol_type != symbol.stmt:
return None
compound_statement = children[0]
if compound_statement.type != symbol.compound_stmt:
return None
statement = compound_statement.children[0]
if statement.type == symbol.funcdef:
return cls(statement.type, statement.children)
elif (statement.type == symbol.decorated and
statement.children[-1].type == symbol.funcdef):
return cls(statement.type, statement.children)
else:
return None
@property
def suite(self):
# TODO: Complete.
raise NotImplementedError()
def FindChild(self, snippet_type, **kwargs):
return self.suite.FindChild(snippet_type, **kwargs)
def FindChildren(self, snippet_type):
return self.suite.FindChildren(snippet_type)
def Cut(self, child):
self.suite.Cut(child)
def Paste(self, child):
self.suite.Paste(child)
| unlicense | -639,582,723,076,108,800 | 3,646,907,679,135,625,700 | 24.509804 | 72 | 0.700999 | false |
rgerkin/neuroConstruct | lib/jython/Lib/modjy/modjy_log.py | 109 | 2133 | ###
#
# Copyright Alan Kennedy.
#
# You may contact the copyright holder at this uri:
#
# http://www.xhaus.com/contact/modjy
#
# The licence under which this code is released is the Apache License v2.0.
#
# The terms and conditions of this license are listed in a file contained
# in the distribution that also contained this file, under the name
# LICENSE.txt.
#
# You may also read a copy of the license at the following web address.
#
# http://modjy.xhaus.com/LICENSE.txt
#
###
import java
import sys
DEBUG = 'debug'
INFO = 'info'
WARN = 'warn'
ERROR = 'error'
FATAL = 'fatal'
levels_dict = {}
ix = 0
for level in [DEBUG, INFO, WARN, ERROR, FATAL, ]:
levels_dict[level]=ix
ix += 1
class modjy_logger:
def __init__(self, context):
self.log_ctx = context
self.format_str = "%(lvl)s:\t%(msg)s"
self.log_level = levels_dict[DEBUG]
def _log(self, level, level_str, msg, exc):
if level >= self.log_level:
msg = self.format_str % {'lvl': level_str, 'msg': msg, }
if exc:
# java.lang.System.err.println(msg, exc)
self.log_ctx.log(msg, exc)
else:
# java.lang.System.err.println(msg)
self.log_ctx.log(msg)
def debug(self, msg, exc=None):
self._log(0, DEBUG, msg, exc)
def info(self, msg, exc=None):
self._log(1, INFO, msg, exc)
def warn(self, msg, exc=None):
self._log(2, WARN, msg, exc)
def error(self, msg, exc=None):
self._log(3, ERROR, msg, exc)
def fatal(self, msg, exc=None):
self._log(4, FATAL, msg, exc)
def set_log_level(self, level_string):
try:
self.log_level = levels_dict[level_string]
except KeyError:
raise BadParameter("Invalid log level: '%s'" % level_string)
def set_log_format(self, format_string):
# BUG! Format string never actually used in this function.
try:
self._log(debug, "This is a log formatting test", None)
except KeyError:
raise BadParameter("Bad format string: '%s'" % format_string)
| gpl-2.0 | -3,164,591,822,762,739,000 | 6,130,669,576,973,272,000 | 25.6625 | 75 | 0.593999 | false |
BizzCloud/PosBox | addons/mrp_repair/__openerp__.py | 65 | 2540 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Repairs Management',
'version': '1.0',
'category': 'Manufacturing',
'description': """
The aim is to have a complete module to manage all products repairs.
====================================================================
The following topics should be covered by this module:
------------------------------------------------------
* Add/remove products in the reparation
* Impact for stocks
* Invoicing (products and/or services)
* Warranty concept
* Repair quotation report
* Notes for the technician and for the final customer
""",
'author': 'OpenERP SA',
'images': ['images/repair_order.jpeg'],
'depends': ['mrp', 'sale', 'account'],
'data': [
'security/ir.model.access.csv',
'security/mrp_repair_security.xml',
'mrp_repair_data.xml',
'mrp_repair_sequence.xml',
'wizard/mrp_repair_cancel_view.xml',
'wizard/mrp_repair_make_invoice_view.xml',
'mrp_repair_view.xml',
'mrp_repair_workflow.xml',
'mrp_repair_report.xml',
'views/report_mrprepairorder.xml',
],
'demo': ['mrp_repair_demo.yml'],
'test': ['test/mrp_repair_users.yml',
'test/test_mrp_repair_noneinv.yml',
'test/test_mrp_repair_b4inv.yml',
'test/test_mrp_repair_afterinv.yml',
'test/test_mrp_repair_cancel.yml',
'test/test_mrp_repair_fee.yml',
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -5,203,438,197,864,164,000 | 320,053,470,235,664,700 | 36.910448 | 78 | 0.574803 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.