filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_30357 | import Messages
import re
# Least common multiple of all possible character widths. A line wrap must occur when the combined widths of all of the
# characters on a line reach this value.
NORMAL_LINE_WIDTH = 1801800
# Attempting to display more lines in a single text box will cause additional lines to bleed past the bottom of the box.
LINES_PER_BOX = 4
# Attempting to display more characters in a single text box will cause buffer overflows. First, visual artifacts will
# appear in lower areas of the text box. Eventually, the text box will become uncloseable.
MAX_CHARACTERS_PER_BOX = 200
CONTROL_CHARS = {
'LINE_BREAK': ['&', '\x01'],
'BOX_BREAK': ['^', '\x04'],
'NAME': ['@', '\x0F'],
'COLOR': ['#', '\x05\x00'],
}
TEXT_END = '\x02'
def line_wrap(text, strip_existing_lines=False, strip_existing_boxes=False, replace_control_chars=True):
# Replace stand-in characters with their actual control code.
if replace_control_chars:
def replace_bytes(matchobj):
return ''.join(chr(x) for x in bytes.fromhex(matchobj[1]))
for char in CONTROL_CHARS.values():
text = text.replace(char[0], char[1])
text = re.sub(r"\$\{((?:[0-9a-f][0-9a-f] ?)+)}", replace_bytes, text, flags=re.IGNORECASE)
# Parse the text into a list of control codes.
text_codes = Messages.parse_control_codes(text)
# Existing line/box break codes to strip.
strip_codes = []
if strip_existing_boxes:
strip_codes.append(0x04)
if strip_existing_lines:
strip_codes.append(0x01)
# Replace stripped codes with a space.
if strip_codes:
index = 0
while index < len(text_codes):
text_code = text_codes[index]
if text_code.code in strip_codes:
# Check for existing whitespace near this control code.
# If one is found, simply remove this text code.
if index > 0 and text_codes[index-1].code == 0x20:
text_codes.pop(index)
continue
if index + 1 < len(text_codes) and text_codes[index+1].code == 0x20:
text_codes.pop(index)
continue
# Replace this text code with a space.
text_codes[index] = Messages.Text_Code(0x20, 0)
index += 1
# Split the text codes by current box breaks.
boxes = []
start_index = 0
end_index = 0
for text_code in text_codes:
end_index += 1
if text_code.code == 0x04:
boxes.append(text_codes[start_index:end_index])
start_index = end_index
boxes.append(text_codes[start_index:end_index])
# Split the boxes into lines and words.
processed_boxes = []
for box_codes in boxes:
line_width = NORMAL_LINE_WIDTH
icon_code = None
words = []
# Group the text codes into words.
index = 0
while index < len(box_codes):
text_code = box_codes[index]
index += 1
# Check for an icon code and lower the width of this box if one is found.
if text_code.code == 0x13:
line_width = 1441440
icon_code = text_code
# Find us a whole word.
if text_code.code in [0x01, 0x04, 0x20]:
if index > 1:
words.append(box_codes[0:index-1])
if text_code.code in [0x01, 0x04]:
# If we have ran into a line or box break, add it as a "word" as well.
words.append([box_codes[index-1]])
box_codes = box_codes[index:]
index = 0
if index > 0 and index == len(box_codes):
words.append(box_codes)
box_codes = []
# Arrange our words into lines.
lines = []
start_index = 0
end_index = 0
box_count = 1
while end_index < len(words):
# Our current confirmed line.
end_index += 1
line = words[start_index:end_index]
# If this word is a line/box break, trim our line back a word and deal with it later.
break_char = False
if words[end_index-1][0].code in [0x01, 0x04]:
line = words[start_index:end_index-1]
break_char = True
# Check the width of the line after adding one more word.
if end_index == len(words) or break_char or calculate_width(words[start_index:end_index+1]) > line_width:
if line or lines:
lines.append(line)
start_index = end_index
# If we've reached the end of the box, finalize it.
if end_index == len(words) or words[end_index-1][0].code == 0x04 or len(lines) == LINES_PER_BOX:
# Append the same icon to any wrapped boxes.
if icon_code and box_count > 1:
lines[0][0] = [icon_code] + lines[0][0]
processed_boxes.append(lines)
lines = []
box_count += 1
# Construct our final string.
# This is a hideous level of list comprehension. Sorry.
return '\x04'.join(['\x01'.join([' '.join([''.join([code.get_string() for code in word]) for word in line]) for line in box]) for box in processed_boxes])
def calculate_width(words):
words_width = 0
for word in words:
index = 0
while index < len(word):
character = word[index]
index += 1
if character.code in Messages.CONTROL_CODES:
if character.code == 0x06:
words_width += character.data
words_width += get_character_width(chr(character.code))
spaces_width = get_character_width(' ') * (len(words) - 1)
return words_width + spaces_width
def get_character_width(character):
try:
return character_table[character]
except KeyError:
if ord(character) < 0x20:
if character in control_code_width:
return sum([character_table[c] for c in control_code_width[character]])
else:
return 0
else:
# A sane default with the most common character width
return character_table[' ']
control_code_width = {
'\x0F': '00000000',
'\x16': '00\'00"',
'\x17': '00\'00"',
'\x18': '00000',
'\x19': '100',
'\x1D': '00',
'\x1E': '00000',
'\x1F': '00\'00"',
}
# Tediously measured by filling a full line of a gossip stone's text box with one character until it is reasonably full
# (with a right margin) and counting how many characters fit. OoT does not appear to use any kerning, but, if it does,
# it will only make the characters more space-efficient, so this is an underestimate of the number of letters per line,
# at worst. This ensures that we will never bleed text out of the text box while line wrapping.
# Larger numbers in the denominator mean more of that character fits on a line; conversely, larger values in this table
# mean the character is wider and can't fit as many on one line.
character_table = {
'\x0F': 655200,
'\x16': 292215,
'\x17': 292215,
'\x18': 300300,
'\x19': 145860,
'\x1D': 85800,
'\x1E': 300300,
'\x1F': 265980,
'a': 51480, # LINE_WIDTH / 35
'b': 51480, # LINE_WIDTH / 35
'c': 51480, # LINE_WIDTH / 35
'd': 51480, # LINE_WIDTH / 35
'e': 51480, # LINE_WIDTH / 35
'f': 34650, # LINE_WIDTH / 52
'g': 51480, # LINE_WIDTH / 35
'h': 51480, # LINE_WIDTH / 35
'i': 25740, # LINE_WIDTH / 70
'j': 34650, # LINE_WIDTH / 52
'k': 51480, # LINE_WIDTH / 35
'l': 25740, # LINE_WIDTH / 70
'm': 81900, # LINE_WIDTH / 22
'n': 51480, # LINE_WIDTH / 35
'o': 51480, # LINE_WIDTH / 35
'p': 51480, # LINE_WIDTH / 35
'q': 51480, # LINE_WIDTH / 35
'r': 42900, # LINE_WIDTH / 42
's': 51480, # LINE_WIDTH / 35
't': 42900, # LINE_WIDTH / 42
'u': 51480, # LINE_WIDTH / 35
'v': 51480, # LINE_WIDTH / 35
'w': 81900, # LINE_WIDTH / 22
'x': 51480, # LINE_WIDTH / 35
'y': 51480, # LINE_WIDTH / 35
'z': 51480, # LINE_WIDTH / 35
'A': 81900, # LINE_WIDTH / 22
'B': 51480, # LINE_WIDTH / 35
'C': 72072, # LINE_WIDTH / 25
'D': 72072, # LINE_WIDTH / 25
'E': 51480, # LINE_WIDTH / 35
'F': 51480, # LINE_WIDTH / 35
'G': 81900, # LINE_WIDTH / 22
'H': 60060, # LINE_WIDTH / 30
'I': 25740, # LINE_WIDTH / 70
'J': 51480, # LINE_WIDTH / 35
'K': 60060, # LINE_WIDTH / 30
'L': 51480, # LINE_WIDTH / 35
'M': 81900, # LINE_WIDTH / 22
'N': 72072, # LINE_WIDTH / 25
'O': 81900, # LINE_WIDTH / 22
'P': 51480, # LINE_WIDTH / 35
'Q': 81900, # LINE_WIDTH / 22
'R': 60060, # LINE_WIDTH / 30
'S': 60060, # LINE_WIDTH / 30
'T': 51480, # LINE_WIDTH / 35
'U': 60060, # LINE_WIDTH / 30
'V': 72072, # LINE_WIDTH / 25
'W': 100100, # LINE_WIDTH / 18
'X': 72072, # LINE_WIDTH / 25
'Y': 60060, # LINE_WIDTH / 30
'Z': 60060, # LINE_WIDTH / 30
' ': 51480, # LINE_WIDTH / 35
'1': 25740, # LINE_WIDTH / 70
'2': 51480, # LINE_WIDTH / 35
'3': 51480, # LINE_WIDTH / 35
'4': 60060, # LINE_WIDTH / 30
'5': 51480, # LINE_WIDTH / 35
'6': 51480, # LINE_WIDTH / 35
'7': 51480, # LINE_WIDTH / 35
'8': 51480, # LINE_WIDTH / 35
'9': 51480, # LINE_WIDTH / 35
'0': 60060, # LINE_WIDTH / 30
'!': 51480, # LINE_WIDTH / 35
'?': 72072, # LINE_WIDTH / 25
'\'': 17325, # LINE_WIDTH / 104
'"': 34650, # LINE_WIDTH / 52
'.': 25740, # LINE_WIDTH / 70
',': 25740, # LINE_WIDTH / 70
'/': 51480, # LINE_WIDTH / 35
'-': 34650, # LINE_WIDTH / 52
'_': 51480, # LINE_WIDTH / 35
'(': 42900, # LINE_WIDTH / 42
')': 42900, # LINE_WIDTH / 42
'$': 51480 # LINE_WIDTH / 35
}
# To run tests, enter the following into a python3 REPL:
# >>> import Messages
# >>> from TextBox import line_wrap_tests
# >>> line_wrap_tests()
def line_wrap_tests():
test_wrap_simple_line()
test_honor_forced_line_wraps()
test_honor_box_breaks()
test_honor_control_characters()
test_honor_player_name()
test_maintain_multiple_forced_breaks()
test_trim_whitespace()
test_support_long_words()
def test_wrap_simple_line():
words = 'Hello World! Hello World! Hello World!'
expected = 'Hello World! Hello World! Hello\x01World!'
result = line_wrap(words)
if result != expected:
print('"Wrap Simple Line" test failed: Got ' + result + ', wanted ' + expected)
else:
print('"Wrap Simple Line" test passed!')
def test_honor_forced_line_wraps():
words = 'Hello World! Hello World!&Hello World! Hello World! Hello World!'
expected = 'Hello World! Hello World!\x01Hello World! Hello World! Hello\x01World!'
result = line_wrap(words)
if result != expected:
print('"Honor Forced Line Wraps" test failed: Got ' + result + ', wanted ' + expected)
else:
print('"Honor Forced Line Wraps" test passed!')
def test_honor_box_breaks():
words = 'Hello World! Hello World!^Hello World! Hello World! Hello World!'
expected = 'Hello World! Hello World!\x04Hello World! Hello World! Hello\x01World!'
result = line_wrap(words)
if result != expected:
print('"Honor Box Breaks" test failed: Got ' + result + ', wanted ' + expected)
else:
print('"Honor Box Breaks" test passed!')
def test_honor_control_characters():
words = 'Hello World! #Hello# World! Hello World!'
expected = 'Hello World! \x05\x00Hello\x05\x00 World! Hello\x01World!'
result = line_wrap(words)
if result != expected:
print('"Honor Control Characters" test failed: Got ' + result + ', wanted ' + expected)
else:
print('"Honor Control Characters" test passed!')
def test_honor_player_name():
words = 'Hello @! Hello World! Hello World!'
expected = 'Hello \x0F! Hello World!\x01Hello World!'
result = line_wrap(words)
if result != expected:
print('"Honor Player Name" test failed: Got ' + result + ', wanted ' + expected)
else:
print('"Honor Player Name" test passed!')
def test_maintain_multiple_forced_breaks():
words = 'Hello World!&&&Hello World!'
expected = 'Hello World!\x01\x01\x01Hello World!'
result = line_wrap(words)
if result != expected:
print('"Maintain Multiple Forced Breaks" test failed: Got ' + result + ', wanted ' + expected)
else:
print('"Maintain Multiple Forced Breaks" test passed!')
def test_trim_whitespace():
words = 'Hello World! & Hello World!'
expected = 'Hello World!\x01Hello World!'
result = line_wrap(words)
if result != expected:
print('"Trim Whitespace" test failed: Got ' + result + ', wanted ' + expected)
else:
print('"Trim Whitespace" test passed!')
def test_support_long_words():
words = 'Hello World! WWWWWWWWWWWWWWWWWWWW Hello World!'
expected = 'Hello World!\x01WWWWWWWWWWWWWWWWWWWW\x01Hello World!'
result = line_wrap(words)
if result != expected:
print('"Support Long Words" test failed: Got ' + result + ', wanted ' + expected)
else:
print('"Support Long Words" test passed!')
|
the-stack_106_30360 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import tensorflow as tf
from niftynet.layer.additive_upsample import ResidualUpsampleLayer
from tests.niftynet_testcase import NiftyNetTestCase
def get_3d_input():
input_shape = (2, 16, 16, 16, 4)
x = tf.ones(input_shape)
return x
def get_2d_input():
input_shape = (2, 16, 16, 4)
x = tf.ones(input_shape)
return x
class ResidualUpsampleTest(NiftyNetTestCase):
def run_test(self, param_dict, expected_shape, is_3d=True):
if is_3d:
x = get_3d_input()
else:
x = get_2d_input()
upsample_layer = ResidualUpsampleLayer(**param_dict)
resized = upsample_layer(x)
print(upsample_layer)
with self.cached_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
out = sess.run(resized)
self.assertAllClose(out.shape, expected_shape)
def test_3d_shape(self):
params = {'kernel_size': 3, 'stride': 2, 'n_splits': 2}
expected_shape = (2, 32, 32, 32, 2)
self.run_test(params, expected_shape, True)
params = {'kernel_size': 2, 'stride': 3, 'n_splits': 4}
expected_shape = (2, 48, 48, 48, 1)
self.run_test(params, expected_shape, True)
params = {'kernel_size': 2, 'stride': 3, 'n_splits': 1,
'acti_func': 'prelu'}
expected_shape = (2, 48, 48, 48, 4)
self.run_test(params, expected_shape, True)
params = {'kernel_size': 2, 'stride': (3, 2, 3), 'n_splits': 1,
'acti_func': 'prelu'}
expected_shape = (2, 48, 32, 48, 4)
self.run_test(params, expected_shape, True)
def test_2d_shape(self):
params = {'kernel_size': 3, 'stride': 2, 'n_splits': 2}
expected_shape = (2, 32, 32, 2)
self.run_test(params, expected_shape, False)
params = {'kernel_size': 2, 'stride': 3, 'n_splits': 4}
expected_shape = (2, 48, 48, 1)
self.run_test(params, expected_shape, False)
params = {'kernel_size': 2, 'stride': 3, 'n_splits': 1,
'acti_func': 'prelu'}
expected_shape = (2, 48, 48, 4)
self.run_test(params, expected_shape, False)
params = {'kernel_size': 2, 'stride': (3, 2), 'n_splits': 1,
'acti_func': 'prelu'}
expected_shape = (2, 48, 32, 4)
self.run_test(params, expected_shape, False)
def test_float_params(self):
params = {'kernel_size': 2.1, 'stride': 3, 'n_splits': 1.1,
'acti_func': 'prelu'}
expected_shape = (2, 48, 48, 4)
self.run_test(params, expected_shape, False)
def test_bad_int_shape(self):
params = {'kernel_size': 2, 'stride': 3, 'n_splits': 3,
'acti_func': 'prelu'}
with self.assertRaisesRegexp(AssertionError, ""):
self.run_test(params, (None,) * 2, False)
with self.assertRaisesRegexp(AssertionError, ""):
self.run_test(params, (None,) * 3, True)
if __name__ == "__main__":
tf.test.main()
|
the-stack_106_30361 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('front', '0006_auto_20150106_2347'),
]
operations = [
migrations.AlterField(
model_name='website',
name='pw_max_length',
field=models.SmallIntegerField(choices=[(0, '<16'), (1, '<10'), (2, 'Unlimitiert'), (3, '>=16')]),
preserve_default=True,
),
]
|
the-stack_106_30362 | ##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
""" Unit tests for FSDTMLMethod module.
"""
import unittest
import Testing
from os.path import join as path_join
from Acquisition import aq_base
from App.Common import rfc1123_date
from DateTime import DateTime
from OFS.Folder import Folder
from Products.StandardCacheManagers import RAMCacheManager
from zope.component import getSiteManager
from zope.component.hooks import setHooks
from zope.testing.cleanup import cleanUp
from Products.CMFCore.FSMetadata import FSMetadata
from Products.CMFCore.interfaces import ICachingPolicyManager
from Products.CMFCore.tests.base.dummy import DummyCachingManager
from Products.CMFCore.tests.base.dummy import DummyCachingManagerWithPolicy
from Products.CMFCore.tests.base.dummy import DummyContent
from Products.CMFCore.tests.base.testcase import FSDVTest
from Products.CMFCore.tests.base.testcase import SecurityTest
from Products.CMFCore.tests.base.testcase import TransactionalTest
class FSDTMLMaker(FSDVTest):
def _makeOne(self, id, filename):
from Products.CMFCore.FSDTMLMethod import FSDTMLMethod
path = path_join(self.skin_path_name, filename)
metadata = FSMetadata(path)
metadata.read()
return FSDTMLMethod(id, path, properties=metadata.getProperties())
class FSDTMLMethodTests(TransactionalTest, FSDTMLMaker):
def setUp(self):
TransactionalTest.setUp(self)
FSDTMLMaker.setUp(self)
setHooks()
def tearDown(self):
cleanUp()
FSDTMLMaker.tearDown(self)
TransactionalTest.tearDown(self)
def test___call__(self):
script = self._makeOne('testDTML', 'testDTML.dtml')
script = script.__of__(self.app)
self.assertEqual(script(self.app, self.REQUEST), 'nohost\n')
def test_caching(self):
# Test HTTP caching headers.
cpm = DummyCachingManager()
getSiteManager().registerUtility(cpm, ICachingPolicyManager)
original_len = len(self.RESPONSE.headers)
obj = self._makeOne('testDTML', 'testDTML.dtml')
obj = obj.__of__(self.app)
obj(self.app, self.REQUEST, self.RESPONSE)
self.assertTrue(len(self.RESPONSE.headers) >= original_len + 2)
self.assertTrue('foo' in self.RESPONSE.headers.keys())
self.assertTrue('bar' in self.RESPONSE.headers.keys())
def test_ownership(self):
script = self._makeOne('testDTML', 'testDTML.dtml')
script = script.__of__(self.app)
# fsdtmlmethod has no owner
owner_tuple = script.getOwnerTuple()
self.assertEqual(owner_tuple, None)
# and ownership is not acquired [CMF/450]
self.app._owner = ('/foobar', 'baz')
owner_tuple = script.getOwnerTuple()
self.assertEqual(owner_tuple, None)
def test_304_response_from_cpm(self):
# test that we get a 304 response from the cpm via this template
mod_time = DateTime()
cpm = DummyCachingManagerWithPolicy()
getSiteManager().registerUtility(cpm, ICachingPolicyManager)
content = DummyContent(id='content')
content.modified_date = mod_time
content = content.__of__(self.app)
script = self._makeOne('testDTML', 'testDTML.dtml')
script = script.__of__(content)
self.REQUEST.environ['IF_MODIFIED_SINCE'
] = '%s;' % rfc1123_date(mod_time + 3600)
data = script(content, self.REQUEST, self.RESPONSE)
self.assertEqual(data, '')
self.assertEqual(self.RESPONSE.getStatus(), 304)
class FSDTMLMethodCustomizationTests(SecurityTest, FSDTMLMaker):
def setUp(self):
FSDTMLMaker.setUp(self)
SecurityTest.setUp(self)
self.skins, self.custom, self.fsdir, self.fsDTML = self._makeContext(
'testDTML', 'testDTML.dtml')
def tearDown(self):
cleanUp()
SecurityTest.tearDown(self)
FSDTMLMaker.tearDown(self)
def test_customize(self):
self.fsDTML.manage_doCustomize(folder_path='custom')
self.assertEqual(len(self.custom.objectIds()), 1)
self.assertTrue('testDTML' in self.custom.objectIds())
def test_customize_alternate_root(self):
self.app.other = Folder('other')
self.fsDTML.manage_doCustomize(folder_path='other', root=self.app)
self.assertFalse('testDTML' in self.custom.objectIds())
self.assertTrue('testDTML' in self.app.other.objectIds())
def test_customize_fspath_as_dot(self):
self.fsDTML.manage_doCustomize(folder_path='.')
self.assertFalse('testDTML' in self.custom.objectIds())
self.assertTrue('testDTML' in self.skins.objectIds())
def test_customize_manual_clone(self):
clone = Folder('testDTML')
self.fsDTML.manage_doCustomize(folder_path='custom', obj=clone)
self.assertTrue('testDTML' in self.custom.objectIds())
self.assertTrue(aq_base(self.custom._getOb('testDTML')) is clone)
def test_customize_caching(self):
# Test to ensure that cache manager associations survive customizing
cache_id = 'gofast'
RAMCacheManager.manage_addRAMCacheManager(self.app, cache_id,
REQUEST=None)
self.fsDTML.ZCacheable_setManagerId(cache_id, REQUEST=None)
self.assertEqual(self.fsDTML.ZCacheable_getManagerId(), cache_id)
self.fsDTML.manage_doCustomize(folder_path='custom')
custom_pt = self.custom.testDTML
self.assertEqual(custom_pt.ZCacheable_getManagerId(), cache_id)
def test_suite():
return unittest.TestSuite((
unittest.makeSuite(FSDTMLMethodTests),
unittest.makeSuite(FSDTMLMethodCustomizationTests),
))
|
the-stack_106_30364 | #!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
import os
import sys
import json
import subprocess
from multiprocessing.connection import Client
# To simulate certbot DNS hooks:
# CERTBOT_DOMAIN=yourdomain.net CERTBOT_VALIDATION=xxx python3 certbottxt.py deploy
# CERTBOT_DOMAIN=yourdomain.net CERTBOT_VALIDATION=xxx CERTBOT_AUTH_OUTPUT=_acme-challenge.asdf.com python3 certbottxt.py cleanup
BASE_PATH=os.path.realpath(__file__)
CERTBOT_DOMAIN=os.getenv('CERTBOT_DOMAIN')
CERTBOT_VALIDATION=os.getenv('CERTBOT_VALIDATION')
from multiprocessing.connection import Client
address = ('localhost', 6000)
def help():
print("Command: renovate [domain] [email]\n")
if len(sys.argv) == 1:
help()
elif sys.argv[1] == 'deploy':
DOMAIN="_acme-challenge.%s" % CERTBOT_DOMAIN
conn = Client(address, authkey=b'secret')
conn.send(json.dumps({'command': 'ADDTXT', 'key': DOMAIN, 'val': CERTBOT_VALIDATION}, ensure_ascii=False, indent=4))
print(DOMAIN)
conn.close()
elif sys.argv[1] == 'cleanup':
CERTBOT_AUTH_OUTPUT=os.getenv('CERTBOT_AUTH_OUTPUT', '*')
conn = Client(address, authkey=b'secret')
conn.send(json.dumps({'command': 'REMOVETXT', 'key': CERTBOT_AUTH_OUTPUT}, ensure_ascii=False, indent=4))
conn.close()
elif sys.argv[1] == 'wildcard' or sys.argv[1] == 'naked':
if len(sys.argv) != 4:
help()
else:
script = os.path.abspath(__file__)
basename = sys.argv[2] + '-' + sys.argv[1]
command = [
'certbot', 'certonly', '--noninteractive', # TEST: '--test-cert',
'--agree-tos', '--email', sys.argv[3],
'--manual', '--preferred-challenges=dns', '--manual-public-ip-logging-ok',
'--manual-auth-hook', 'python3 {0} deploy'.format(script),
'--manual-cleanup-hook', 'python3 {0} cleanup'.format(script),
'-d', ('*.' if sys.argv[1] == 'wildcard' else '') + sys.argv[2]
]
output = subprocess.run(command)
print(output.stdout)
print(output.stderr)
|
the-stack_106_30365 | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##################################################################################
import json, logging, os
import boto3
import traceback
from urllib.request import Request, urlopen
from time import sleep
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logging.basicConfig(
format='%(levelname)s %(threadName)s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d:%H:%M:%S',
level=logging.INFO
)
try:
logger.info("Container initialization completed")
except Exception as e:
logger.error(e, exc_info=True)
init_failed = e
############################################################
# ASSOCIATION FUNCTIONS #
############################################################
def get_account_email(root_org_client, root_role, spoke_id):
"""
Retrieves the email address associated to the Spoke account
:param root_org_client: The Organizations client for the Root account
:param root_role: The role in the Org master to assume into
:param spoke_id: The account number for the spoke
:return: The email address associated to the spoke account
"""
try:
account_info = root_org_client.describe_account(AccountId=spoke_id)
email = account_info['Account']['Email']
logger.info("Email found for account {}: {}".format(spoke_id, email))
return email
except Exception as e:
logger.error("Unable to find email for account: {}".format(e))
raise
def find_create_detector_spoke():
"""
Finds existing detectors in the spoke or creates one if it does not already exist
:return: The ID of the detector in the spoke account
"""
spoke_guardduty = boto3.client('guardduty')
try:
logger.info('Finding Spoke Detector ID...')
list_spoke_detectors = spoke_guardduty.list_detectors()
spoke_detector = list_spoke_detectors['DetectorIds']
if not spoke_detector:
logger.info('Detector not found, creating one...')
create_spoke_detector = spoke_guardduty.create_detector(
Enable=True,
FindingPublishingFrequency='FIFTEEN_MINUTES'
)
spoke_detector_id = create_spoke_detector['DetectorId']
logger.info("Created detector with ID {}".format(spoke_detector_id))
return spoke_detector_id
elif len(spoke_detector) > 1:
logger.error("Too many detectors found! List of detectors: {}".format(spoke_detector))
raise ValueError("Too many detectors")
else:
spoke_detector_id = spoke_detector[0]
logger.info("Detector already exists: {}".format(spoke_detector_id))
return spoke_detector_id
except Exception as e:
logger.error('Unable to find/create the detector: {}'.format(e))
raise
def find_create_detector_hub(hub_gd):
"""
Finds existing detectors in the hub or creates one if it does not already exist
:param hub_gd: The GuardDuty client for the Hub account
:return: The ID of the detector found in the Hub account
"""
logger.info('Finding Hub detector ID...')
try:
list_hub_detectors = hub_gd.list_detectors()
hub_detector = list_hub_detectors['DetectorIds']
if not hub_detector:
logger.info('Detector not found, creating one...')
create_hub_detector = hub_gd.create_detector(
Enable=True,
FindingPublishingFrequency='FIFTEEN_MINUTES'
)
hub_detector = create_hub_detector['DetectorId']
logger.info("Created detector of ID {} in the Hub account".format(hub_detector))
return hub_detector
elif len(hub_detector) > 1:
logger.error("Too many detectors found! List of detectors: {}".format(hub_detector))
raise ValueError("Too many detectors")
else:
logger.info("Detector found with ID {}".format(hub_detector[0]))
return hub_detector[0]
except Exception as e:
logger.error('Unable to find/create the detector: {}'.format(e))
raise
def create_member_in_hub(hub_gd, hub_detect_id, spoke_id, spoke_email):
"""
Creates member accounts of the Hub account
:param hub_gd: The GuardDuty client for the Hub account
:param hub_detect_id: The ID for the GuardDuty detector in the Hub account
:param spoke_id: The ID of the spoke account
:param spoke_email: The email associated to the Spoke account
"""
try:
logger.info('Attempting to create member')
hub_gd.create_members(
AccountDetails=[
{
'AccountId': spoke_id,
'Email': spoke_email
}
],
DetectorId=hub_detect_id)
return
except Exception as e:
logger.error("Unable to create members for GuardDuty in the Hub account: {}".format(e))
raise
def invite_detector_from_hub(hub_gd, hub_detector, spoke_detector, spoke_id, hub_id):
"""
Invites spoke detectors to join GuardDuty from the Hub account
:param hub_gd: The GuardDuty client for the Hub account
:param spoke_gd: The GuardDuty client for the Spoke account
:param hub_detector: The ID for the GuardDuty detector in the Hub account
:param spoke_detector: The ID for the GuardDuty detector in the Spoke account
:param spoke_id: The ID of the spoke account
:param hub_id: The ID of the Hub account
:return: Response status
"""
try:
logger.info('Checking whether the Spoke Detector Id is the same as the Hub DetectorId')
if hub_detector != spoke_detector:
logger.info("Attempting to invite spoke account {} from detector {} in Hub account".format(spoke_id, hub_detector))
hub_gd.invite_members(
AccountIds=[
spoke_id,
],
DetectorId=hub_detector,
DisableEmailNotification=False,
Message="Account {} has been invited to join GuardDuty in the Hub Account ({})".format(spoke_id, hub_id))
return accept_invitation_from_spoke(hub_id, spoke_detector)
else:
logger.info('No action needed as Spoke is the Hub')
return 'SUCCESS'
except Exception as e:
logger.error("Unable to invite the detector from the Hub: {}".format(e))
raise e
def accept_invitation_from_spoke(hub_acc_id, spoke_detector_id):
"""
Invites spoke detectors to join GuardDuty from the Hub account
:param guardduty: The GuardDuty client for the Spoke account
:param hub_acc_id: The ID for the GuardDuty detector in the Hub account
:param spoke_detector_id: The ID for the GuardDuty detector in the Spoke account
:return: The ID of the detector found in the Hub account
"""
guardduty = boto3.client('guardduty')
try:
logger.info('Searching for Invitation in Spoke account...')
invite_list = {'Invitations': []}
# Guard against attempting to accept invitations too early
# Willing to wait up to 90 seconds for invitations to appear
break_counter = 0
while not invite_list['Invitations'] and break_counter < 18:
sleep(5)
break_counter += 1
invite_list = guardduty.list_invitations()
# If list is empty at the end of 1 minute
if not invite_list['Invitations']:
logger.error('No invitations found')
return 'FAILED'
for invite in invite_list['Invitations']:
if invite['AccountId'] == hub_acc_id:
logger.info('Invitation from Hub account found: {}'.format(invite))
invitation_id = invite['InvitationId']
guardduty.accept_invitation(DetectorId=spoke_detector_id, InvitationId=invitation_id, MasterId=hub_acc_id)
logger.info('Account added to hub account')
return 'SUCCESS'
logger.error('No match found')
return 'FAILED'
except Exception as e:
logger.error('Could not accept invitations: {}'.format(e))
raise
finally:
logger.info('FINISHED')
############################################################
# PRIMARY FUNCTIONS #
############################################################
def disassociate_guardduty(hub_account_id):
spoke_guardduty = boto3.client('guardduty')
try:
logger.info('Finding Spoke Detector ID')
listSpokeDetectors = spoke_guardduty.list_detectors()
spoke_detector = listSpokeDetectors['DetectorIds']
if not spoke_detector:
logger.error('Detector Not Found')
return "FAILED"
else:
logger.info("Detector found: {}".format(spoke_detector))
for detector_id in spoke_detector:
spoke_guardduty.disassociate_from_master_account(DetectorId=detector_id)
invite_list = spoke_guardduty.list_invitations()
for invite in invite_list['Invitations']:
if invite['AccountId'] == hub_account_id:
invitation_id = invite['InvitationId']
logger.info('Invitation found: {}'.format(invitation_id))
spoke_guardduty.delete_invitations(AccountIds=[hub_account_id])
return "SUCCESS"
else:
logger.error('No invitations found')
return "FAILED"
except Exception as e:
logger.error('The request is rejected: {}'.format(e))
raise
def associate_guardduty(spoke_account_id, root_org_role, hub_guardduty_role, hub_account_id):
sts = boto3.client('sts')
organizations = sts.assume_role(RoleArn=root_org_role, RoleSessionName='OrganizationsSearch')
root_of_org_credentials = organizations['Credentials']
root_of_org_organizations_client = boto3.client(
'organizations',
aws_access_key_id=root_of_org_credentials['AccessKeyId'],
aws_secret_access_key=root_of_org_credentials['SecretAccessKey'],
aws_session_token=root_of_org_credentials['SessionToken'],
)
assume_hub_account = sts.assume_role(RoleArn=hub_guardduty_role, RoleSessionName='SpokeGuardDuty')
hub_credentials = assume_hub_account['Credentials']
hub_guardduty = boto3.client(
'guardduty',
aws_access_key_id=hub_credentials['AccessKeyId'],
aws_secret_access_key=hub_credentials['SecretAccessKey'],
aws_session_token=hub_credentials['SessionToken'],
)
hub_detector_id = find_create_detector_hub(hub_guardduty)
spoke_detector_id = find_create_detector_spoke()
logger.info('Searching Organization for Spoke email...')
spoke_email = get_account_email(root_of_org_organizations_client, root_org_role, spoke_account_id)
create_member_in_hub(hub_guardduty, hub_detector_id, spoke_account_id, spoke_email)
return invite_detector_from_hub(hub_guardduty, hub_detector_id, spoke_detector_id, spoke_account_id, hub_account_id)
############################################################
# HELPER FUNCTION #
############################################################
def send_response(e, c, rs, rd):
"""
Packages response and send signals to CloudFormation
:param e: The event given to this Lambda function
:param c: Context object, as above
:param rs: Returned status to be sent back to CFN
:param rd: Returned data to be sent back to CFN
"""
logger.info("Sending response: {}".format(rs))
r = json.dumps({
"Status": rs,
"Reason": "CloudWatch Log Stream: " + c.log_stream_name,
"PhysicalResourceId": c.log_stream_name,
"StackId": e['StackId'],
"RequestId": e['RequestId'],
"LogicalResourceId": e['LogicalResourceId'],
"Data": rd
})
d = str.encode(r)
h = {
'content-type': '',
'content-length': str(len(d))
}
req = Request(e['ResponseURL'], data=d, method='PUT', headers=h)
r = urlopen(req)
logger.info("Status message: {} {}".format(r.msg, r.getcode()))
############################################################
# LAMBDA FUNCTION HANDLER #
############################################################
# IMPORTANT: The Lambda function will be called whenever #
# changes are made to the stack. Thus, ensure that the #
# signals are handled by your Lambda function correctly, #
# or the stack could get stuck in the DELETE_FAILED state #
############################################################
def handler(event, context):
"""
Entrypoint to Lambda
:param event: event passed to the Lambda handler from CloudFormation
:param context: contains information about the Lambda function
"""
request_type = event['RequestType']
logger.info("Received an event of type {} from CloudFormation".format(request_type))
hub_account_id = event['ResourceProperties']['HubAccountId']
try:
if request_type == 'Create':
spoke_account_id = event['ResourceProperties']['SpokeAccountID']
root_org_role = event['ResourceProperties']['GuardDutyAssumableOrgRoleArn']
hub_guardduty_role = event['ResourceProperties']['AssumableHubRoleArn']
response_status = associate_guardduty(spoke_account_id, root_org_role, hub_guardduty_role, hub_account_id)
send_response(event, context, response_status, {"Message": "Created"})
elif request_type == 'Delete':
response_status = disassociate_guardduty(hub_account_id)
send_response(event, context, response_status, {"Message": "Deleted"})
elif request_type == 'Update':
logger.info('Update Requests are not supported: Delete Product and redeploy to make changes')
send_response(event, context, "FAILED", {"Message": "Unsupported"})
except Exception as ex:
logger.error(ex)
traceback.print_tb(ex.__traceback__)
send_response(
event,
context,
"FAILED",
{
"Message": "Exception"
}
)
|
the-stack_106_30366 | import sqlite3
from datetime import datetime
from os import listdir
import os
import re
import json
import shutil
import pandas as pd
from application_logging.logger import App_Logger
class Prediction_Data_validation:
"""
This class shall be used for handling all the validation done on the Raw Prediction Data!!.
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
def __init__(self,path):
self.Batch_Directory = path
self.schema_path = 'schema_prediction.json'
self.logger = App_Logger()
def valuesFromSchema(self):
"""
Method Name: valuesFromSchema
Description: This method extracts all the relevant information from the pre-defined "Schema" file.
Output: LengthOfDateStampInFile, LengthOfTimeStampInFile, column_names, Number of Columns
On Failure: Raise ValueError,KeyError,Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
with open(self.schema_path, 'r') as f:
dic = json.load(f)
f.close()
pattern = dic['SampleFileName']
LengthOfDateStampInFile = dic['LengthOfDateStampInFile']
LengthOfTimeStampInFile = dic['LengthOfTimeStampInFile']
column_names = dic['ColName']
NumberofColumns = dic['NumberofColumns']
file = open("Training_Logs/valuesfromSchemaValidationLog.txt", 'a+')
message ="LengthOfDateStampInFile:: %s" %LengthOfDateStampInFile + "\t" + "LengthOfTimeStampInFile:: %s" % LengthOfTimeStampInFile +"\t " + "NumberofColumns:: %s" % NumberofColumns + "\n"
self.logger.log(file,message)
file.close()
except ValueError:
file = open("Prediction_Logs/valuesfromSchemaValidationLog.txt", 'a+')
self.logger.log(file,"ValueError:Value not found inside schema_training.json")
file.close()
raise ValueError
except KeyError:
file = open("Prediction_Logs/valuesfromSchemaValidationLog.txt", 'a+')
self.logger.log(file, "KeyError:Key value error incorrect key passed")
file.close()
raise KeyError
except Exception as e:
file = open("Prediction_Logs/valuesfromSchemaValidationLog.txt", 'a+')
self.logger.log(file, str(e))
file.close()
raise e
return LengthOfDateStampInFile, LengthOfTimeStampInFile, column_names, NumberofColumns
def manualRegexCreation(self):
"""
Method Name: manualRegexCreation
Description: This method contains a manually defined regex based on the "FileName" given in "Schema" file.
This Regex is used to validate the filename of the prediction data.
Output: Regex pattern
On Failure: None
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
regex = "['fraudDetection']+['\_'']+[\d_]+[\d]+\.csv"
return regex
def createDirectoryForGoodBadRawData(self):
"""
Method Name: createDirectoryForGoodBadRawData
Description: This method creates directories to store the Good Data and Bad Data
after validating the prediction data.
Output: None
On Failure: OSError
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
path = os.path.join("Prediction_Raw_Files_Validated/", "Good_Raw/")
if not os.path.isdir(path):
os.makedirs(path)
path = os.path.join("Prediction_Raw_Files_Validated/", "Bad_Raw/")
if not os.path.isdir(path):
os.makedirs(path)
except OSError as ex:
file = open("Prediction_Logs/GeneralLog.txt", 'a+')
self.logger.log(file,"Error while creating Directory %s:" % ex)
file.close()
raise OSError
def deleteExistingGoodDataTrainingFolder(self):
"""
Method Name: deleteExistingGoodDataTrainingFolder
Description: This method deletes the directory made to store the Good Data
after loading the data in the table. Once the good files are
loaded in the DB,deleting the directory ensures space optimization.
Output: None
On Failure: OSError
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
path = 'Prediction_Raw_Files_Validated/'
# if os.path.isdir("ids/" + userName):
# if os.path.isdir(path + 'Bad_Raw/'):
# shutil.rmtree(path + 'Bad_Raw/')
if os.path.isdir(path + 'Good_Raw/'):
shutil.rmtree(path + 'Good_Raw/')
file = open("Prediction_Logs/GeneralLog.txt", 'a+')
self.logger.log(file,"GoodRaw directory deleted successfully!!!")
file.close()
except OSError as s:
file = open("Prediction_Logs/GeneralLog.txt", 'a+')
self.logger.log(file,"Error while Deleting Directory : %s" %s)
file.close()
raise OSError
def deleteExistingBadDataTrainingFolder(self):
"""
Method Name: deleteExistingBadDataTrainingFolder
Description: This method deletes the directory made to store the bad Data.
Output: None
On Failure: OSError
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
path = 'Prediction_Raw_Files_Validated/'
if os.path.isdir(path + 'Bad_Raw/'):
shutil.rmtree(path + 'Bad_Raw/')
file = open("Prediction_Logs/GeneralLog.txt", 'a+')
self.logger.log(file,"BadRaw directory deleted before starting validation!!!")
file.close()
except OSError as s:
file = open("Prediction_Logs/GeneralLog.txt", 'a+')
self.logger.log(file,"Error while Deleting Directory : %s" %s)
file.close()
raise OSError
def moveBadFilesToArchiveBad(self):
"""
Method Name: moveBadFilesToArchiveBad
Description: This method deletes the directory made to store the Bad Data
after moving the data in an archive folder. We archive the bad
files to send them back to the client for invalid data issue.
Output: None
On Failure: OSError
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
now = datetime.now()
date = now.date()
time = now.strftime("%H%M%S")
try:
path= "PredictionArchivedBadData"
if not os.path.isdir(path):
os.makedirs(path)
source = 'Prediction_Raw_Files_Validated/Bad_Raw/'
dest = 'PredictionArchivedBadData/BadData_' + str(date)+"_"+str(time)
if not os.path.isdir(dest):
os.makedirs(dest)
files = os.listdir(source)
for f in files:
if f not in os.listdir(dest):
shutil.move(source + f, dest)
file = open("Prediction_Logs/GeneralLog.txt", 'a+')
self.logger.log(file,"Bad files moved to archive")
path = 'Prediction_Raw_Files_Validated/'
if os.path.isdir(path + 'Bad_Raw/'):
shutil.rmtree(path + 'Bad_Raw/')
self.logger.log(file,"Bad Raw Data Folder Deleted successfully!!")
file.close()
except OSError as e:
file = open("Prediction_Logs/GeneralLog.txt", 'a+')
self.logger.log(file, "Error while moving bad files to archive:: %s" % e)
file.close()
raise OSError
def validationFileNameRaw(self,regex,LengthOfDateStampInFile,LengthOfTimeStampInFile):
"""
Method Name: validationFileNameRaw
Description: This function validates the name of the prediction csv file as per given name in the schema!
Regex pattern is used to do the validation.If name format do not match the file is moved
to Bad Raw Data folder else in Good raw data.
Output: None
On Failure: Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
# delete the directories for good and bad data in case last run was unsuccessful and folders were not deleted.
self.deleteExistingBadDataTrainingFolder()
self.deleteExistingGoodDataTrainingFolder()
self.createDirectoryForGoodBadRawData()
onlyfiles = [f for f in listdir(self.Batch_Directory)]
try:
f = open("Prediction_Logs/nameValidationLog.txt", 'a+')
for filename in onlyfiles:
if (re.match(regex, filename)):
splitAtDot = re.split('.csv', filename)
splitAtDot = (re.split('_', splitAtDot[0]))
if len(splitAtDot[1]) == LengthOfDateStampInFile:
if len(splitAtDot[2]) == LengthOfTimeStampInFile:
shutil.copy("Prediction_Batch_files/" + filename, "Prediction_Raw_Files_Validated/Good_Raw")
self.logger.log(f,"Valid File name!! File moved to GoodRaw Folder :: %s" % filename)
else:
shutil.copy("Prediction_Batch_files/" + filename, "Prediction_Raw_Files_Validated/Bad_Raw")
self.logger.log(f,"Invalid File Name!! File moved to Bad Raw Folder :: %s" % filename)
else:
shutil.copy("Prediction_Batch_files/" + filename, "Prediction_Raw_Files_Validated/Bad_Raw")
self.logger.log(f,"Invalid File Name!! File moved to Bad Raw Folder :: %s" % filename)
else:
shutil.copy("Prediction_Batch_files/" + filename, "Prediction_Raw_Files_Validated/Bad_Raw")
self.logger.log(f, "Invalid File Name!! File moved to Bad Raw Folder :: %s" % filename)
f.close()
except Exception as e:
f = open("Prediction_Logs/nameValidationLog.txt", 'a+')
self.logger.log(f, "Error occured while validating FileName %s" % e)
f.close()
raise e
def validateColumnLength(self,NumberofColumns):
"""
Method Name: validateColumnLength
Description: This function validates the number of columns in the csv files.
It is should be same as given in the schema file.
If not same file is not suitable for processing and thus is moved to Bad Raw Data folder.
If the column number matches, file is kept in Good Raw Data for processing.
The csv file is missing the first column name, this function changes the missing name to "Wafer".
Output: None
On Failure: Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
f = open("Prediction_Logs/columnValidationLog.txt", 'a+')
self.logger.log(f,"Column Length Validation Started!!")
for file in listdir('Prediction_Raw_Files_Validated/Good_Raw/'):
csv = pd.read_csv("Prediction_Raw_Files_Validated/Good_Raw/" + file)
if csv.shape[1] == NumberofColumns:
csv.to_csv("Prediction_Raw_Files_Validated/Good_Raw/" + file, index=None, header=True)
else:
shutil.move("Prediction_Raw_Files_Validated/Good_Raw/" + file, "Prediction_Raw_Files_Validated/Bad_Raw")
self.logger.log(f, "Invalid Column Length for the file!! File moved to Bad Raw Folder :: %s" % file)
self.logger.log(f, "Column Length Validation Completed!!")
except OSError:
f = open("Prediction_Logs/columnValidationLog.txt", 'a+')
self.logger.log(f, "Error Occured while moving the file :: %s" % OSError)
f.close()
raise OSError
except Exception as e:
f = open("Prediction_Logs/columnValidationLog.txt", 'a+')
self.logger.log(f, "Error Occured:: %s" % e)
f.close()
raise e
f.close()
def deletePredictionFile(self):
if os.path.exists('Prediction_Output_File/Predictions.csv'):
os.remove('Prediction_Output_File/Predictions.csv')
def validateMissingValuesInWholeColumn(self):
"""
Method Name: validateMissingValuesInWholeColumn
Description: This function validates if any column in the csv file has all values missing.
If all the values are missing, the file is not suitable for processing.
SUch files are moved to bad raw data.
Output: None
On Failure: Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
try:
f = open("Prediction_Logs/missingValuesInColumn.txt", 'a+')
self.logger.log(f, "Missing Values Validation Started!!")
for file in listdir('Prediction_Raw_Files_Validated/Good_Raw/'):
csv = pd.read_csv("Prediction_Raw_Files_Validated/Good_Raw/" + file)
count = 0
for columns in csv:
if (len(csv[columns]) - csv[columns].count()) == len(csv[columns]):
count+=1
shutil.move("Prediction_Raw_Files_Validated/Good_Raw/" + file,
"Prediction_Raw_Files_Validated/Bad_Raw")
self.logger.log(f,"Invalid Column Length for the file!! File moved to Bad Raw Folder :: %s" % file)
break
if count==0:
csv.to_csv("Prediction_Raw_Files_Validated/Good_Raw/" + file, index=None, header=True)
except OSError:
f = open("Prediction_Logs/missingValuesInColumn.txt", 'a+')
self.logger.log(f, "Error Occured while moving the file :: %s" % OSError)
f.close()
raise OSError
except Exception as e:
f = open("Prediction_Logs/missingValuesInColumn.txt", 'a+')
self.logger.log(f, "Error Occured:: %s" % e)
f.close()
raise e
f.close()
|
the-stack_106_30367 | # -*- coding: utf-8 -*-
"""
Module: ClusterGenerator
"""
import numpy as np
from pprint import pprint
from scipy.integrate import dblquad
"""Useful Snippets"""
def RMatrix(theta):
"""
수학적인 정의에 필요.
회전행렬을 생성하는 함수
"""
# theta = np.radians(30)
c, s = np.cos(theta), np.sin(theta)
return np.array(((c, -s), (s, c)))
def PinMatrix(matrix, right=True, lower=True):
"""
행렬을 상/하/좌/우 한칸씩 밀고 당기는 함수.
"""
rl = [matrix, np.zeros((matrix.shape[0],1))==1]
if not right:
rl.reverse()
matrix_temp = np.hstack(rl)
ud = [matrix_temp, np.zeros((1, matrix_temp.shape[1]))==1]
if not lower:
ud.reverse()
matrix_final = np.vstack(ud)
return matrix_final
def PinMatrixTF(matrix, right=True, lower=True):
"""
행렬을 상/하/좌/우 한칸씩 밀고 당긴 후 True 값으로 필터링하는 함수
"""
return PinMatrix(matrix, right, lower) == True
"""# 시그널 클래스 정의
시그널 클래스 (Signal) 가 정의된 이후에 (아날로그 시그널임), ``Signal`` 클래스를 상속한 클래스만이 이후에 시그널로 쓰일 수 있음. (이외에는 전부 AssertionError 로 Assert)
"""
class Signal:
"""
시그널을 정의하기위한 기반 클래스
Attributes
----------
None
"""
def GetPDF(self, X,Y):
"""
시그널의 Probability Density Distribution 을 생성함.
본 클래스는 기반 클래스이므로, 0 값을 반환함
Parameters
----------
X : int or float or numpy.ndarray
Y : int or float or numpy.ndarray
Returns
-------
0 or numpy.zeros(X.shape)
"""
if isinstance(X, np.ndarray) and isinstance(Y, np.ndarray):
if X.size == Y.size:
print("The signal is not defined. (this is pure class) Returning zero signal for all")
return np.zeros(X.shape)
return 0
def GetSignal(self, X, Y, amplitude=100):
"""
특정 위치 / 위치 묶음에서 신호값을 계산함.
본 클래스는 기반 클래스이므로, 0 값을 반환함
Parameters
----------
X : int or float or numpy.ndarray
Y : int or float or numpy.ndarray
Returns
-------
numpy.zeros(X.shape)
"""
return np.zeros(X.shape)
class GaussianSignal(Signal):
"""
2D 가우시안 분포를 기준으로 한 시그널
S 또는 (sx,sy,sdeg) 또는 (sx,sy,srad) 로 정의되어야 한다.
만약 모든 값이 지정되어 들어갈 경우, 다음과 같은 우선순위로 들어간다.
1. ``S`` 가 있을 경우, ``S``
2. ``sdeg`` 가 있을 경우, ``(sx,sy,sdeg)``
3. ``srad`` 가 있을 경우, ``(sx,sy,srad)``
Parameters
----------
x0 : int or float
중심 위치의 x값
y0 : int or float
중심 위치의 y값
amplitude : int or float
기준가우시안분포에서의 신호배수값 (전체 적분값)
S : numpy.ndarray :: shape(2,2) dtype=int or float , optional
2D 가우시안 함수의 분산행렬
sx : int or float , optional
2D 가우시안 함수의 X 방향분산
sy : int or float , optional
2D 가우시안 함수의 Y 방향분산
sdeg : int or float , optional
2D 가우시안 함수의 X,Y방향 분산이 정의되었을 때의 X 축을 기준으로 한 회전각 (Degree)
srad : int or float , optional
2D 가우시안 함수의 X,Y방향 분산이 정의되었을 때의 X 축을 기준으로 한 회전각 (Radian)
Attributes
----------
x0 : int or float
중심 위치의 x값
y0 : int or float
중심 위치의 y값
amplitude : int or float
기준가우시안분포에서의 신호배수값 (전체 적분값)
S : numpy.ndarray :: shape=(2,2) det=1
분포의 분산행렬
"""
def __init__(self, x0=0, y0=0, amplitude=100, S=None, sx=1, sy=1, sdeg=None, srad=None):
self.x0 = x0
self.y0 = y0
self.amplitude = amplitude
if S!=None:
self.S = S
elif sx!=None and sy!=None:
if srad == None and sdeg == None:
self.S = self.GetDispersion(sx, sy, 0)
elif srad == None and sdeg != None:
self.S = self.GetDispersion(sx, sy, np.radian(sdeg))
elif srad != None:
self.S = self.GetDispersion(sx, sy, srad)
else:
assert False
def GetDispersion(self, sx, sy, srad):
"""
*(static method)* X 방향, Y 방향 분산과 분산회전각(Radian) 으로 분산행렬을 구함.
Parameters
----------
sx : x 축방향 분산
sy : y 축방향 분산
srad : x 축을 기준으로 한 분산회전각
Returns
----------
S : numpy.ndarray :: shape=(2,2), det=1, dtype=float
분산행렬
"""
return np.matmul(RMatrix(srad), np.array([[sx,0],[0,sy]]))
def GetStaticPDF(self, X,Y, x0, y0, S):
"""
*(static method)* 수학적 정의로부터 값을 찾아냄.
Parameters
----------
X : int or float or numpy.ndarray
구할 위치의 x값
Y : int or float or numpy.ndarray
구할 위치의 y값
x0 : int or float
가우시안 함수의 중심값
y0 : int or float
가우시안 함수의 중심값
S : numpy.ndarray :: shape=(2,2)
분산행렬
Returns
-------
Z : numpy.ndarray :: shape=X.shape
X 와 Y 위치에서의 크기값 행렬
"""
X1 = X-x0
Y1 = Y-y0
Si = np.linalg.inv(S)
X_ = Si[0][0] * X1 + Si[0][1] * Y1
Y_ = Si[1][0] * X1 + Si[1][1] * Y1
return np.exp(-0.5*(X_**2+Y_**2))/(2*np.pi*np.linalg.det(S))
def GetPDF(self, x, y):
"""
수학적 정의 ``(StaticPDF)`` 에 오브젝트의 중앙위치값인 ``(x0, y0)``, 분산행렬인 ``S`` 를 대입한 가우시안 신호의 실제 확률분포를 구함.
Parameters
----------
x: int or float or np.ndarray
y: int or float or np.ndarray (assert y.shape==x.shape)
Returns
-------
result: numpy.ndarray :: shape=x.shape (if int or float (1,))
``x, y`` 위치에서의 확률값
"""
return self.GetStaticPDF(x, y, self.x0, self.y0, self.S)
def GetSignal(self, x, y):
return self.amplitude * self.GetPDF(x,y)
"""# Digitizer 정의
Digitizer : 시그널을 검출기의 신호로 변환하는 것.
## 초기화
처음 정의할 때 필요한 것
다음 둘 중 하나만 하면 됨. (둘 다 하면, 1번이 인식됨. 필요시 변수 이름을 특정하여 적당한 ``__init__`` 의 변수만 지정 할 것.)
1. 최소, 최대 x,y 값과 양자화수
* x최소값, x최대값, x칸수, y최소값, y최대값, y칸수
2. meshgrid 를 통해 생성한 X 와 Y 위치
## 작동요소
1. ``InjectSignal(signal)`` 변수가 모두 들어가서 적절히 정의가 된 signal 을 주입하면, Digitizer 내의 signals 변수에 리스팅된다.
2. ``GetAnalogSignal(X,Y)`` (``X,Y`` 는 meshgrid 를 통해 형성된 공간좌표값) 을 하면, ``X,Y`` 로 주어진 공간에서의 아날로그 신호를 송출한다.
3. ``GetDigitalSignal_Old()`` 아날로그 시그널을 기준으로, ``Digitizer`` 에 정의된 디지털 그리드에 따라 해당 구역 내에 정의된 시그널 크기를 낸다. (그리드 센터의 값을 구한 다음에 면적을 곱하는 방식)
* ``GetDigitalSignal()`` 그리드 내부를 전부 적분하는 방식
신호가 매우 날카로워서 그리드 센터에 신호가 없을 때의 문제를 해결하기 위함. 그러나 *느림*.
4. ``GetDigitizedSignal()`` 디지털 시그널을 기준으로, ``Digitizer`` 의 디지털 그리드에, 각각의 시그널이 ``on`` 인지 ``off``인지 판단한다
"""
class Digitizer:
"""
신호를 받은 것을 지정된 그리드에 따라 크기로 변환하고, 점화/비점화상태를 구별하여 출력하는 작업을 하는 클래스
``(xmin,xmax,xn,ymin,ymax,yn)`` 와 ``(X,Y)`` 둘 중 하나로 정의되어야 함.
우선순위는 다음과 같음.
1. ``(xmin,xmax,xn,ymin,ymax,yn)``
2. ``(X,Y)``
Parameters
----------
xmin : int or float
x축 방향 좌표값의 최소값
xmax : int or float
x축 방향 좌표값의 최대값
xn : int
x축 방향으로 분할되는 그리드의 조각 수
ymin : int or float
y축 방향 좌표값의 최소값
ymax : int or float
y축 방향 좌표값의 최대값
yn : int
y축 방향으로 분할되는 그리드의 조각 수
X : numpy.ndarray
numpy.meshgrid 로부터 생성된 X 그리드
Y : numpy.ndarray (assert X.shape==Y.shape)
numpy.meshgrid 로부터 생성된 Y 그리드
Attributes
----------
self.threshold : int or float
디지털 시그널에서 디지타이즈 시그널로 변환할 때의 기준역치값.
self.signals : list of Signal (and its subclass)
원본 시그널을 저장하기위한 집합. 리스트의 모든 원소가 ``Signal`` 클래스임.
self.X : numpy.ndarray
검출기 픽셀 의 X 경계값
self.Y : numpy.ndarray (Y.shape == X.shape)
검출기 픽셀 의 Y 경계값
self.centerX : numpy.ndarray (centerX.shape == X.shape - (1,1))
검출기 픽셀 의 X 중심값
self.centerY : numpy.ndarray (centerY.shape == centerX.shape)
검출기 픽셀 의 Y 중심값
"""
def __init__(self, xmin=None, xmax=None, xn=None, ymin=None, ymax=None, yn=None, # Spanning with x size, y size, number of their bins
X=None, Y=None, # Spanning with X,Y matrix (meshgrid)
threshold=25
):
self.threshold = threshold
self.signals = list()
if (xmin!=None and xmax!=None and xn!=None and ymin!=None and ymax!=None and yn!=None) :
self.__SpanningBin(xmin, xmax, xn, ymin, ymax, yn)
return
if (X!=None and Y!=None):
self.__SpanningMeshgrid(X,Y)
return
assert False, "GridRange/nbin or Grid with meshed should be inputted."
def __SpanningBin(self, xmin, xmax, xn, ymin, ymax, yn):
"""
Parameters
----------
xmin : int or float
x축 방향 좌표값의 최소값
xmax : int or float
x축 방향 좌표값의 최대값
xn : int
x축 방향으로 분할되는 그리드의 조각 수
ymin : int or float
y축 방향 좌표값의 최소값
ymax : int or float
y축 방향 좌표값의 최대값
yn : int
y축 방향으로 분할되는 그리드의 조각 수
returns
-------
None
"""
X, Y = np.meshgrid(np.linspace(xmin, xmax, xn+1), np.linspace(ymin, ymax, yn+1))
self.__SpanningMeshgrid(X,Y)
def __SpanningMeshgrid(self, X,Y):
"""
Parameters
----------
X : numpy.ndarray
numpy.meshgrid 로부터 생성된 X 그리드
Y : numpy.ndarray (assert X.shape==Y.shape)
numpy.meshgrid 로부터 생성된 Y 그리드
returns
-------
None
"""
assert X.shape == Y.shape
self.X = X
self.Y = Y
self.centerX = 0.5*(X[:,:-1]+X[:,1:])[:-1,:]
self.centerY = 0.5*(Y[:-1,:]+Y[1:,:])[:,:-1]
def InjectSignal(self, signal):
"""
시그널을 주입하는 메소드.
Parameters
----------
signal : Signal or its subclass
Returns
-------
None
Raises
------
AssertionError
주입한 파라메터가 Signal 이나 Signal 의 서브클래스가 아님.
See Also
--------
.Signal : for Injecting Signal (but it is null, pure-class)
.GaussianSignal : Example (not-null) signal
"""
assert isinstance(signal, Signal)
# self.AnalogSignal = self.AnalogSignal + signal.GetSignal(self.X,self.Y)
self.signals.append(signal)
def ClearSignal(self):
"""
``InjectSignal`` 을 통해 주입된 모든 신호를 제거하여 초기화함.
Parameters
----------
None
Returns
-------
None
"""
self.signals.clear()
def GetAnalogSignal(self, X, Y):
"""
``InjectSignal`` 을 통해 주입된 신호로 주어진 X 와 Y 값 좌표에서의 신호값을 구함.
Parameters
----------
X : int or float or numpy.ndarray
구하고자 하는 위치의 X 좌표
Y : int or float or numpy.ndarray
구하고자 하는 위치의 Y 좌표
Returns
-------
AnalogSignal : 구하고자 하는 위치(단일, ndarray)에서의 시그널 크기
Raises
------
AssertionError
파라메터의 위치정보인 ``X`` 와 ``Y`` 가 ``numpy.ndarray`` 일 때, shape 가 동일하지 않음.
"""
if isinstance(X,np.ndarray) or isinstance(Y,np.ndarray):
assert X.shape == Y.shape
AnalogSignal = np.zeros(X.shape)
else:
AnalogSignal = 0
for signal in self.signals:
AnalogSignal += signal.GetSignal(X,Y)
return AnalogSignal
def GetDigitalSignal_Old(self):
centerSignal_ = self.GetAnalogSignal(self.centerX, self.centerY)
centerSignal = centerSignal_ * (PinMatrix(self.X) - PinMatrix(self.X, False, True))[1:-1,1:-1] * (PinMatrix(self.Y) - PinMatrix(self.Y, True, False))[1:-1,1:-1]
return centerSignal
def GetDigitalSignal_List(self):
OnPixels = self.GetDigitalSignal_Old()>0.1
x = self.centerX[OnPixels]
y = self.centerY[OnPixels]
xmin = self.X[PinMatrixTF(OnPixels, True, True)]
xmax = self.X[PinMatrixTF(OnPixels, False, True)]
ymin = self.Y[PinMatrixTF(OnPixels, True, True)]
ymax = self.Y[PinMatrixTF(OnPixels, True, False)]
X, Y = np.meshgrid(np.arange(0, self.centerX.shape[1], 1), np.arange(0, self.centerX.shape[0], 1))
X, Y = (X[OnPixels], Y[OnPixels])
assert xmin.size == x.size
assert xmin.size == y.size
assert xmin.size == xmax.size
assert xmin.size == ymin.size
assert xmin.size == ymax.size
ansX = list()
ansY = list()
ansZ = list()
for i in range(xmin.size):
ansZ.append(dblquad(lambda x,y : self.GetAnalogSignal(x,y), ymin[i], ymax[i], xmin[i], xmax[i])[0])
ansX.append(X[i])
ansY.append(Y[i])
return np.array(ansX), np.array(ansY), np.array(ansZ)
def GetDigitalSignal(self):
iX,iY,iZ = self.GetDigitalSignal_List()
Z = np.zeros(self.centerX.shape)
for i in range(iX.size):
Z[iY[i], iX[i]] = iZ[i]
return Z
def GetDigitizedSignal_Old(self):
return self.GetDigitalSignal_Old()>self.threshold
def GetDigitizedSignal_List(self):
X,Y,Z = self.GetDigitalSignal_List()
return X[Z>self.threshold], Y[Z>self.threshold]
def GetDigitizedSignal(self):
return self.GetDigitalSignal()>self.threshold |
the-stack_106_30369 | """
DCSO TIE2MISP Parser
Copyright (c) 2017, DCSO GmbH
"""
import datetime
import json
import uuid
from abc import ABCMeta, abstractstaticmethod, abstractmethod
from .misp_attribute import MISPAttribute
from pymisp import PyMISP
import logging
class MISPEvent(metaclass=ABCMeta):
def __init__(self, organisation_name, organisation_uuid, threat_level_id, published, info, date):
dt = datetime.datetime.now()
if not organisation_name or not organisation_uuid:
raise ValueError('Organisation Name and UUID must be set')
if not threat_level_id:
raise ValueError('Threat Level must be set')
if not info:
raise ValueError('Info must be set')
self.__Info = date.strftime("%Y%m%d ") + info
self.__PublishTimestamp = dt.strftime("%s")
self.__Timestamp = dt.strftime("%s")
self.__Analysis = 2
self.__Attribute = list()
self.__Tags = list()
self.__Published = published
self.__Orgc = {'name': organisation_name, 'uuid': organisation_uuid}
self.__Threat_Level_ID = threat_level_id
self.__UUID = uuid.uuid1()
self.__Date = dt.strftime("%Y-%m-%d")
# Getter
@property
def uuid(self):
return self.__UUID
@property
def threat_level_id(self):
return self.__Threat_Level_ID
@property
def published(self):
return self.__Published
@property
def publish_timestamp(self):
return self.__PublishTimestamp
@property
def timestamp(self):
return self.__Timestamp
@property
def attributes(self):
return self.__Attribute
@property
def analysis(self):
return self.__Analysis
@property
def tags(self):
return self.__Tags
@property
def orgc(self):
return self.__Orgc
@property
def date(self):
return self.__Date
@property
def info(self):
return self.__Info
# Setter
@published.setter
def published(self, value):
self.__Published = value
@threat_level_id.setter
def threat_level_id(self, value):
self.__Threat_Level_ID = value
@analysis.setter
def analysis(self, value):
self.__Analysis = value
@staticmethod
@abstractstaticmethod
def parse(misp_event, val, tags):
pass
def serialize(self):
json_object = dict()
json_object['info'] = self.info
json_object['publish_timestamp'] = self.publish_timestamp
json_object['timestamp'] = self.timestamp
json_object['analysis'] = self.analysis
list_attr = list()
for item in self.attributes:
list_attr.append(item.serialize())
list_tags = list()
for item in self.tags:
#list_tags.append(item.serialize())
list_tags.append(item)
json_object['Attribute'] = list_attr
json_object['Tag'] = list_tags
json_object['published'] = self.published
json_object['date'] = self.date
json_object['Orgc'] = self.orgc
json_object['threat_level_id'] = self.threat_level_id
json_object['uuid'] = str(self.uuid)
return json.dumps({"Event": json_object})
# Attributes handling
def append_attribute(self, attribute):
if not isinstance(attribute, MISPAttribute):
raise ValueError('attribute must be a child of Model.MISPAttribute')
self.__Attribute.append(attribute)
# Tag handling
def append_tags(self, tag):
self.__Tags.append(tag)
# PyMISP Functions
def upload(self, config, proxy_addr_misp):
misp = PyMISP(config.misp_api_url, config.misp_api_key, False, debug=False, proxies=proxy_addr_misp)
event = misp.new_event(0, config.event_base_thread_level, 2, self.info)
# Upload all given event tags
for tag in self.tags:
misp.tag(event['Event']['uuid'], tag)
index = 1
length = len(self.attributes)
logging.info("Uploading " + str(length) + " Attributes ")
for attr in self.attributes:
if index % 10 == 0 or index == length:
logging.info('Attribute: ' + str(index) + ' from ' + str(length))
try:
attr.upload(misp, event, config)
except ValueError as e:
if len(e.args) > 0:
logging.warning(e.args[0])
else:
logging.warning('Unknown error occured at uploading attributes')
index += 1
|
the-stack_106_30370 | from Utility.Types.Task.Task import Task
class MeasurementBasedOutlierRemovalTask(Task):
def __init__(self,
nvm_file_object='',
min_measurements=None,
output_file_name_stem=''):
self.__dict__.update(locals())
del self.self # redundant (and a circular reference)
|
the-stack_106_30371 | import torch
import numpy as np
import copy
from PatchMatchOrig import init_nnf, upSample_nnf, avg_vote, propagate, reconstruct_avg
from VGG19 import VGG19
from utils import *
def deep_image_analogy(A, BP, config, writer):
alphas = config['alpha']
nnf_patch_size = config['nnf_patch_size']
radii = config['radii']
params = config['params']
lr = config['lr']
# preparing data
img_A_tensor = torch.FloatTensor(A.transpose(2, 0, 1)).cuda()
img_BP_tensor = torch.FloatTensor(BP.transpose(2, 0, 1)).cuda()
# fake a batch dimension
img_A_tensor = img_A_tensor.unsqueeze(0)
img_BP_tensor = img_BP_tensor.unsqueeze(0)
# 4.1 Preprocessing Step
model = VGG19()
F_A, F_A_size = model.get_features(img_tensor=img_A_tensor.clone(), layers=params['layers'])
F_BP, F_B_size = model.get_features(img_tensor=img_BP_tensor.clone(), layers=params['layers'])
# Init AP&B 's feature maps with F_A&F_BP
F_AP = copy.deepcopy(F_A)
F_B = copy.deepcopy(F_BP)
#Note that the feature_maps now is in the order of [5,4,3,2,1,input]
for curr_layer in range(5):
#ANN init step, coarsest layer is initialized randomly,
#Other layers is initialized using upsample technique described in the paper
print('='*30 + 'deep analogy on layer{}'.format(curr_layer) + '='*30)
if curr_layer == 0:
# initialize NNF
ann_AB = init_nnf(F_A_size[curr_layer][2:], F_B_size[curr_layer][2:])
ann_BA = init_nnf(F_B_size[curr_layer][2:], F_A_size[curr_layer][2:])
else:
# NNF upsampling
ann_AB = upSample_nnf(ann_AB, F_A_size[curr_layer][2:])
ann_BA = upSample_nnf(ann_BA, F_B_size[curr_layer][2:])
# According to Equotion(2), we need to normalize F_A and F_BP
# response denotes the M in Equotion(6)
F_A_BAR, response_A = normalize(F_A[curr_layer])
F_BP_BAR, response_BP = normalize(F_BP[curr_layer])
# F_AP & F_B is reconstructed according to Equotion(4)
# Note that we reuse the varibale F_AP here,
# it denotes the RBprime as is stated in the Equotion(4) which is calculated
# at the end of the previous iteration
F_AP[curr_layer] = blend(response_A, F_A[curr_layer], F_AP[curr_layer], alphas[curr_layer])
F_B[curr_layer] = blend(response_BP, F_BP[curr_layer], F_B[curr_layer], alphas[curr_layer])
visualize_deep_image_analogy(writer, F_A, F_AP, F_B, F_BP, curr_layer)
# Normalize F_AP&F_B as well
F_AP_BAR, _ = normalize(F_AP[curr_layer])
F_B_BAR, _ = normalize(F_B[curr_layer])
# Run PatchMatch algorithm to get mapping AB and BA
ann_AB, _ = propagate(ann_AB, ts2np(F_A_BAR), ts2np(F_AP_BAR), ts2np(F_B_BAR), ts2np(F_BP_BAR),
nnf_patch_size[curr_layer],params['iter'], radii[curr_layer])
ann_BA, _ = propagate(ann_BA, ts2np(F_BP_BAR), ts2np(F_B_BAR), ts2np(F_AP_BAR), ts2np(F_A_BAR),
nnf_patch_size[curr_layer],params['iter'], radii[curr_layer])
if curr_layer >= 4:
break
# The code below is used to initialize the F_AP&F_B in the next layer,
# it generates the R_B' and R_A as is stated in Equotion(4)
# R_B' is stored in F_AP, R_A is stored in F_B
# using backpropagation to approximate feature
# About why we add 2 here:
# https://github.com/msracver/Deep-Image-Analogy/issues/30
next_layer = curr_layer + 2
ann_AB_upnnf2 = upSample_nnf(ann_AB, F_A_size[next_layer][2:])
ann_BA_upnnf2 = upSample_nnf(ann_BA, F_B_size[next_layer][2:])
F_AP_np = avg_vote(ann_AB_upnnf2, ts2np(F_BP[next_layer]), nnf_patch_size[next_layer], F_A_size[next_layer][2:],
F_B_size[next_layer][2:])
F_B_np = avg_vote(ann_BA_upnnf2, ts2np(F_A[next_layer]), nnf_patch_size[next_layer], F_B_size[next_layer][2:],
F_A_size[next_layer][2:])
# Initialize R_B' and R_A
F_AP[next_layer] = np2ts(F_AP_np)
F_B[next_layer] = np2ts(F_B_np)
# Warp F_BP using ann_AB, Warp F_A using ann_BA
target_BP_np = avg_vote(ann_AB, ts2np(F_BP[curr_layer]), nnf_patch_size[curr_layer], F_A_size[curr_layer][2:],
F_B_size[curr_layer][2:])
target_A_np = avg_vote(ann_BA, ts2np(F_A[curr_layer]), nnf_patch_size[curr_layer], F_B_size[curr_layer][2:],
F_A_size[curr_layer][2:])
target_BP = np2ts(target_BP_np)
target_A = np2ts(target_A_np)
#LBFGS algorithm to approximate R_B' and R_A
F_AP[curr_layer+1] = model.get_deconvoluted_feat(writer, target_BP, curr_layer, F_AP[next_layer], feat_name='BP', lr=lr[curr_layer],
blob_layers=params['layers'])
F_B[curr_layer+1] = model.get_deconvoluted_feat(writer, target_A, curr_layer, F_B[next_layer], feat_name='A', lr=lr[curr_layer],
blob_layers=params['layers'])
if type(F_B[curr_layer + 1]) == torch.DoubleTensor:
F_B[curr_layer + 1] = F_B[curr_layer + 1].type(torch.FloatTensor)
F_AP[curr_layer + 1] = F_AP[curr_layer + 1].type(torch.FloatTensor)
elif type(F_B[curr_layer + 1]) == torch.cuda.DoubleTensor:
F_B[curr_layer + 1] = F_B[curr_layer + 1].type(torch.cuda.FloatTensor)
F_AP[curr_layer + 1] = F_AP[curr_layer + 1].type(torch.cuda.FloatTensor)
# Obtain the output according to 4.5
print('='*30 + 'deep analogy on layer5' + '='*30)
img_AP = reconstruct_avg(ann_AB, BP, nnf_patch_size[curr_layer], F_A_size[curr_layer][2:], F_B_size[curr_layer][2:])
img_B = reconstruct_avg(ann_BA, A, nnf_patch_size[curr_layer], F_A_size[curr_layer][2:], F_B_size[curr_layer][2:])
img_AP = np.clip(img_AP/255.0, 0, 1)[:,:,::-1]
img_B = np.clip(img_B/255.0, 0, 1)[:,:,::-1]
return img_AP, img_B
def visualize_deep_image_analogy(writer, F_A, F_AP, F_B, F_BP, curr_layer):
from torchvision.utils import make_grid
name = ['F_A', 'F_AP', 'F_B', 'F_BP']
for i, F in enumerate([F_A, F_AP, F_B, F_BP]):
img = F[curr_layer].detach()[0, :10].unsqueeze(1)
img = make_grid(img, nrow=5, normalize=True)
writer.add_image("feature map {}".format(name[i]), img, curr_layer)
|
the-stack_106_30373 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Hiroyuki Takagi
# Code copied and adapted from pyppeteer (MIT License)
# See for pyppeteer package: https://github.com/pyppeteer/pyppeteer
# See for original code: https://github.com/pyppeteer/pyppeteer/blob/46f04c66c109353e08d873a1019df1cf4dac9dea/pyppeteer/chromium_downloader.py
"""Chromium download module."""
from io import BytesIO
from tools.clog import CLogger
import os
from pathlib import Path
import stat
import sys
import platform
from zipfile import ZipFile
import urllib3
from tqdm import tqdm
import pathlib
log = CLogger("chromium")
log.set_prefix("download")
def current_platform() -> str:
"""Get current platform name by short string."""
if sys.platform.startswith('linux'):
return 'linux'
elif sys.platform.startswith('darwin'):
if "arm" in platform.processor().lower():
return 'mac-arm'
else:
return 'mac'
elif (
sys.platform.startswith('win')
or sys.platform.startswith('msys')
or sys.platform.startswith('cyg')
):
if sys.maxsize > 2 ** 31 - 1:
return 'win64'
return 'win32'
raise OSError('Unsupported platform: ' + sys.platform)
DEFAULT_CHROMIUM_REVISION = '869685'
chromium_revision = os.environ.get(
'CHROMIUM_REVISION', DEFAULT_CHROMIUM_REVISION
)
vaccipy_dir = pathlib.Path(__file__).parent.absolute()
DOWNLOADS_FOLDER = Path(vaccipy_dir) / 'local-chromium'
DEFAULT_DOWNLOAD_HOST = 'https://storage.googleapis.com'
DOWNLOAD_HOST = os.environ.get(
'PYPPETEER_DOWNLOAD_HOST', DEFAULT_DOWNLOAD_HOST
)
BASE_URL = f'{DOWNLOAD_HOST}/chromium-browser-snapshots'
REVISION = os.environ.get('PYPPETEER_CHROMIUM_REVISION', chromium_revision)
NO_PROGRESS_BAR = os.environ.get('PYPPETEER_NO_PROGRESS_BAR', '')
if NO_PROGRESS_BAR.lower() in ('1', 'true'):
NO_PROGRESS_BAR = True # type: ignore
# Windows archive name changed at r591479.
windowsArchive = 'chrome-win' if int(REVISION) > 591479 else 'chrome-win32'
downloadBinURLs = {
'linux': f'{BASE_URL}/Linux_x64/{REVISION}/chrome-linux.zip',
'mac': f'{BASE_URL}/Mac/{REVISION}/chrome-mac.zip',
'mac-arm': f'{BASE_URL}/Mac_Arm/{REVISION}/chrome-mac.zip',
'win32': f'{BASE_URL}/Win/{REVISION}/{windowsArchive}.zip',
'win64': f'{BASE_URL}/Win_x64/{REVISION}/{windowsArchive}.zip',
}
downloadWebdriverURLs = {
'linux': f'{BASE_URL}/Linux_x64/{REVISION}/chromedriver_linux64.zip',
'mac': f'{BASE_URL}/Mac/{REVISION}/chromedriver_mac64.zip',
'mac-arm': f'{BASE_URL}/Mac_Arm/{REVISION}/chromedriver_mac64.zip',
'win32': f'{BASE_URL}/Win/{REVISION}/chromedriver_win32.zip',
'win64': f'{BASE_URL}/Win_x64/{REVISION}/chromedriver_win32.zip',
}
chromiumExecutable = {
'linux': DOWNLOADS_FOLDER / REVISION / 'chrome-linux' / 'chrome',
'mac': (
DOWNLOADS_FOLDER
/ REVISION
/ 'chrome-mac'
/ 'Chromium.app'
/ 'Contents'
/ 'MacOS'
/ 'Chromium'
),
'mac-arm': (
DOWNLOADS_FOLDER
/ REVISION
/ 'chrome-mac'
/ 'Chromium.app'
/ 'Contents'
/ 'MacOS'
/ 'Chromium'
),
'win32': DOWNLOADS_FOLDER / REVISION / windowsArchive / 'chrome.exe',
'win64': DOWNLOADS_FOLDER / REVISION / windowsArchive / 'chrome.exe',
}
webdriverExecutable = {
'linux': DOWNLOADS_FOLDER
/ REVISION
/ 'chromedriver_linux64'
/ 'chromedriver',
'mac': DOWNLOADS_FOLDER / REVISION / 'chromedriver_mac64' / 'chromedriver',
'mac-arm': DOWNLOADS_FOLDER
/ REVISION
/ 'chromedriver_mac64'
/ 'chromedriver',
'win32': DOWNLOADS_FOLDER
/ REVISION
/ 'chromedriver_win32'
/ 'chromedriver.exe',
'win64': DOWNLOADS_FOLDER
/ REVISION
/ 'chromedriver_win32'
/ 'chromedriver.exe',
}
def get_url(binary: str) -> str:
"""Get download url."""
if binary == 'chromium':
return downloadBinURLs[current_platform()]
elif binary == 'webdriver':
return downloadWebdriverURLs[current_platform()]
def download_zip(url: str, binary: str) -> BytesIO:
"""Download data from url."""
log.info(
f'Starte den Download von {binary}. Dieser Vorgang kann einige Minuten dauern.'
)
# Uncomment the statement below to disable HTTPS warnings and allow
# download without certificate verification. This is *strongly* as it
# opens the code to man-in-the-middle (and other) vulnerabilities; see
# https://urllib3.readthedocs.io/en/latest/advanced-usage.html#tls-warnings
# for more.
# urllib3.disable_warnings()
with urllib3.PoolManager() as http:
# Get data from url.
# set preload_content=False means using stream later.
r = http.request('GET', url, preload_content=False)
if r.status >= 400:
raise OSError(
f'{binary} downloadable not found at {url}: '
f'Received {r.data.decode()}.\n'
)
# 10 * 1024
_data = BytesIO()
if NO_PROGRESS_BAR:
for chunk in r.stream(10240):
_data.write(chunk)
else:
try:
total_length = int(r.headers['content-length'])
except (KeyError, ValueError, AttributeError):
total_length = 0
process_bar = tqdm(total=total_length)
for chunk in r.stream(10240):
_data.write(chunk)
process_bar.update(len(chunk))
process_bar.close()
print()
log.info(f'Download von {binary} abgeschlossen.')
return _data
def extract_zip(data: BytesIO, path: Path, binary: str) -> None:
"""Extract zipped data to path."""
# On mac zipfile module cannot extract correctly, so use unzip instead.
if current_platform() == 'mac':
import subprocess
import shutil
zip_path = path / 'temp.zip'
if not path.exists():
path.mkdir(parents=True)
with zip_path.open('wb') as f:
f.write(data.getvalue())
if not shutil.which('unzip'):
raise OSError(
f'Failed to automatically extract {binary}.'
f'Please unzip {zip_path} manually.'
)
proc = subprocess.run(
['unzip', str(zip_path)],
cwd=str(path),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
if proc.returncode != 0:
log.error(proc.stdout.decode())
raise OSError(f'Failed to unzip {zip_path}.')
if chromium_executable().exists() and zip_path.exists():
zip_path.unlink()
else:
with ZipFile(data) as zf:
zf.extractall(str(path))
if binary == 'chromium':
exec_path = chromium_executable()
elif binary == 'webdriver':
exec_path = webdriver_executable()
if not exec_path.exists():
raise IOError(f'Failed to extract {binary}.')
exec_path.chmod(
exec_path.stat().st_mode | stat.S_IXOTH | stat.S_IXGRP | stat.S_IXUSR
)
log.info(f"{binary} exportiert nach '{path}'")
def download_chromium(binary='chromium') -> None:
"""Download and extract chromium."""
extract_zip(
download_zip(get_url(binary), binary),
DOWNLOADS_FOLDER / REVISION,
binary,
)
def download_webdriver(binary='webdriver') -> None:
"""Download and extract webdriver."""
extract_zip(
download_zip(get_url(binary), binary),
DOWNLOADS_FOLDER / REVISION,
binary,
)
def chromium_executable() -> Path:
"""Get path of the chromium executable."""
return chromiumExecutable[current_platform()]
def webdriver_executable() -> Path:
"""Get path of the webdriver executable."""
return webdriverExecutable[current_platform()]
def check_chromium() -> bool:
"""Check if chromium is placed at correct path."""
return chromium_executable().exists()
def check_webdriver() -> bool:
"""Check if webdriver is placed at correct path."""
return webdriver_executable().exists()
if __name__ == '__main__':
if not check_chromium():
download_chromium()
if not check_webdriver():
download_webdriver()
|
the-stack_106_30375 | import unittest
from mdde.config import ConfigRegistry
class ConfigTestCase(unittest.TestCase):
TEST_CONFIG_FILE = '../../debug/registry_config.yml'
def test_initialization(self):
config_container = ConfigRegistry()
config_container.read(self.TEST_CONFIG_FILE)
for node in config_container.get_nodes():
print("{} | {}".format(node.id, node.default))
if __name__ == '__main__':
unittest.main()
|
the-stack_106_30376 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for XLA JIT compiler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bitwise_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import googletest
def nhwc_to_format(x, data_format):
"""Converts a numpy array from NHWC format to `data_format`."""
rank = len(x.shape)
if data_format == "NCHW":
return np.transpose(x, [0, rank - 1] + list(range(1, rank - 1)))
elif data_format == "NHWC":
return x
else:
raise ValueError("Unknown format {}".format(data_format))
class UnaryOpsTest(xla_test.XLATestCase):
"""Test cases for unary operators."""
def _assertOpOutputMatchesExpected(self,
op,
inp,
expected,
equality_test=None,
rtol=1e-3,
atol=1e-5):
"""Verifies that 'op' produces 'expected' when fed input 'inp' .
Args:
op: operator to test
inp: numpy input array to use as input to 'op'.
expected: numpy array representing the expected output of 'op'.
equality_test: either None, or a function that tests two numpy arrays for
equality. If None, self.assertAllClose is used.
rtol: relative tolerance for equality test.
atol: absolute tolerance for equality test.
"""
with self.cached_session() as session:
with self.test_scope():
pinp = array_ops.placeholder(
dtypes.as_dtype(inp.dtype), inp.shape, name="a")
output = op(pinp)
result = session.run(output, {pinp: inp})
if equality_test is None:
self.assertAllCloseAccordingToType(
result, expected, rtol=rtol, atol=atol, bfloat16_rtol=0.03)
else:
equality_test(result, expected, rtol=rtol, atol=atol)
def ListsAreClose(self, result, expected, rtol, atol):
"""Tests closeness of two lists of floats."""
self.assertEqual(len(result), len(expected))
for i in xrange(len(result)):
self.assertAllClose(result[i], expected[i], rtol, atol)
def testAllTypeOps(self):
for dtype in self.numeric_types - {np.int8, np.uint8}:
self._assertOpOutputMatchesExpected(
array_ops.diag, np.array([1, 2, 3, 4], dtype=dtype),
np.array(
[[1, 0, 0, 0], [0, 2, 0, 0], [0, 0, 3, 0], [0, 0, 0, 4]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.diag_part,
np.arange(36).reshape([2, 3, 2, 3]).astype(dtype),
np.array([[0, 7, 14], [21, 28, 35]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.diag, np.array([[1, 2], [3, 4]], dtype=dtype),
np.array(
[[[[1, 0], [0, 0]], [[0, 2], [0, 0]]], [[[0, 0], [3, 0]],
[[0, 0], [0, 4]]]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.identity,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[-1, 1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.matrix_diag, np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([[[1, 0], [0, 2]], [[3, 0], [0, 4]]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.matrix_diag, np.array([1, 2, 3, 4], dtype=dtype),
np.array(
[[1, 0, 0, 0], [0, 2, 0, 0], [0, 0, 3, 0], [0, 0, 0, 4]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.matrix_diag,
np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], dtype=dtype),
np.array(
[[[[1, 0, 0], [0, 2, 0], [0, 0, 3]], [[4, 0, 0], [0, 5, 0], [
0, 0, 6
]]], [[[7, 0, 0], [0, 8, 0], [0, 0, 9]], [[10, 0, 0], [0, 11, 0],
[0, 0, 12]]]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.matrix_diag_part,
np.arange(3 * 2 * 4).reshape([3, 2, 4]).astype(dtype),
np.array([[0, 5], [8, 13], [16, 21]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.prevent_gradient,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[-1, 1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.squeeze,
np.array([[[[[]]]]], dtype=dtype),
expected=np.array([], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.squeeze,
np.array([[[1], [2]]], dtype=dtype),
expected=np.array([1, 2], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.squeeze,
np.array([[[1]], [[2]]], dtype=dtype),
expected=np.array([1, 2], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.squeeze,
np.array([[[1, 2], [3, 4]]], dtype=dtype),
expected=np.array([[1, 2], [3, 4]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.stop_gradient,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[-1, 1]], dtype=dtype))
def testFloatOps(self):
for dtype in self.float_types:
# TODO(b/77694432): Half test failed on CPU, last ran on 04-06-2018.
if dtype == np.float16 and self.device == "XLA_CPU":
continue
x = np.arange(-0.90, 0.90, 0.25)
self._assertOpOutputMatchesExpected(
math_ops.acos, x.astype(dtype), expected=np.arccos(x).astype(dtype))
self._assertOpOutputMatchesExpected(
math_ops.asin, x.astype(dtype), expected=np.arcsin(x).astype(dtype))
x = np.arange(-3, 3).reshape(1, 3, 2)
self._assertOpOutputMatchesExpected(
math_ops.atan, x.astype(dtype), expected=np.arctan(x).astype(dtype))
self._assertOpOutputMatchesExpected(
math_ops.acosh,
np.array([1, 2, 3, 4], dtype=dtype),
expected=np.array(
[0, 1.3169579, 1.76274717, 2.06343707], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.asinh,
np.array([1, 2, 3, 4], dtype=dtype),
expected=np.array(
[0.88137359, 1.44363548, 1.81844646, 2.09471255], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.atanh,
np.array([0.1, 0.2, 0.3, 0.4], dtype=dtype),
expected=np.array(
[0.10033535, 0.20273255, 0.3095196, 0.42364893], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.ceil,
np.array([[-1.7, 1.2]], dtype=dtype),
expected=np.array([[-1, 2]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.cosh,
np.array([1, 2, 3, 4], dtype=dtype),
expected=np.array(
[1.54308063, 3.76219569, 10.067662, 27.30823284], dtype=dtype))
# Disable float16 testing for now
if dtype != np.float16:
x = np.arange(-10, 10, 1).astype(dtype)
with self.cached_session() as session:
erf_x = session.run(math_ops.erf(x))
erfc_x = session.run(math_ops.erfc(x))
self._assertOpOutputMatchesExpected(math_ops.erf, x, expected=erf_x)
self._assertOpOutputMatchesExpected(math_ops.erfc, x, expected=erfc_x)
self._assertOpOutputMatchesExpected(
math_ops.exp,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[0.36787945, 2.7182817]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.expm1,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[-0.63212056, 1.71828183]], dtype=dtype),
rtol=1e-5)
self._assertOpOutputMatchesExpected(
math_ops.floor,
np.array([[-1.7, 1.2]], dtype=dtype),
expected=np.array([[-2, 1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.is_finite,
np.array(
[[np.NINF, -2, -1, 0, 0.5, 1, 2, np.inf, np.nan]], dtype=dtype),
expected=np.array([[0, 1, 1, 1, 1, 1, 1, 0, 0]], dtype=np.bool))
# Tests for tf.nn ops.
self._assertOpOutputMatchesExpected(
nn_ops.l2_loss, np.array([[[]]], dtype=dtype), expected=dtype(0))
self._assertOpOutputMatchesExpected(nn_ops.l2_loss, dtype(4), dtype(8))
self._assertOpOutputMatchesExpected(
nn_ops.l2_loss, np.array([[-2, 4]], dtype=dtype), expected=dtype(10))
self._assertOpOutputMatchesExpected(
math_ops.reciprocal,
np.array([[1, 2]], dtype=dtype),
expected=np.array([[1, 0.5]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.log,
np.array([[1, 2]], dtype=dtype),
expected=np.array([[0, 0.69314718]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.sin,
np.array([[1, 2]], dtype=dtype),
expected=np.array([[0.841478, 0.909302]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.cos,
np.array([[1, 2]], dtype=dtype),
expected=np.array([[0.540297, -0.41614]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.log1p,
np.array([[1e-14, 1e-15, 0.6]], dtype=dtype),
expected=np.log1p(np.array([[1e-14, 1e-15, 0.6]], dtype=dtype)),
rtol=1e-4,
atol=1e-6)
self._assertOpOutputMatchesExpected(
math_ops.rint,
np.array(
[[-1.7, 1.2, 4.0, 0.0], [-3.5, -2.5, -1.5, -0.5],
[0.5, 1.5, 2.5, 3.5]],
dtype=dtype),
expected=np.array(
[[-2, 1, 4, 0], [-4, -2, -2, 0], [0, 2, 2, 4]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.round,
np.array(
[[-1.7, 1.2, 4.0, 0.0], [-3.5, -2.5, -1.5, -0.5],
[0.5, 1.5, 2.5, 3.5]],
dtype=dtype),
expected=np.array(
[[-2, 1, 4, 0], [-4, -2, -2, 0], [0, 2, 2, 4]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.rsqrt,
np.array([[4, 16]], dtype=dtype),
expected=np.array([[0.5, 0.25]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.sigmoid,
np.array([[1, 1, 1, 1], [1, 2, 3, 4]], dtype=dtype),
expected=np.array(
[[0.7310586, 0.7310586, 0.7310586, 0.7310586],
[0.7310586, 0.880797, 0.95257413, 0.98201376]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.sigmoid,
np.array([-300, -150, 0, 150, 300], dtype=dtype),
expected=np.array([0, 0, 0.5, 1, 1], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.sinh,
np.array([1, 2, 3, 4], dtype=dtype),
expected=np.array(
[1.17520119, 3.62686041, 10.01787493, 27.2899172], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.sqrt,
np.array([[4, 9]], dtype=dtype),
expected=np.array([[2, 3]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.tan,
np.array([1, 2, 3, 4], dtype=dtype),
expected=np.array(
[1.55740772, -2.18503986, -0.14254654, 1.15782128], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.tanh,
np.array([[1, 1, 1, 1], [1, 2, 3, 4]], dtype=dtype),
expected=np.array(
[[0.76159418, 0.76159418, 0.76159418, 0.76159418],
[0.76159418, 0.96402758, 0.99505478, 0.99932933]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.log_softmax,
np.array([[1, 1, 1, 1], [1, 2, 3, 4]], dtype=dtype),
expected=np.array(
[[-1.3862944, -1.3862944, -1.3862944, -1.3862944],
[-3.4401896, -2.4401896, -1.4401897, -0.44018969]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.elu,
np.array([[-1, 0, 1, -1e-6]], dtype=dtype),
expected=np.array([[-0.63212056, 0, 1, -9.999995e-07]], dtype=dtype),
rtol=1e-5,
atol=1e-6)
self._assertOpOutputMatchesExpected(
nn_ops.selu,
np.array([[-1, 0, 1, -1e-5]], dtype=dtype),
expected=np.array(
[[-1.11133074, 0., 1.05070099, -1.758090550379974e-05]],
dtype=dtype),
rtol=1e-5,
atol=1e-6)
self._assertOpOutputMatchesExpected(
nn_ops.relu,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[0, 1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.relu6,
np.array([[-0.05, 6.05, 5]], dtype=dtype),
expected=np.array([[0, 6, 5]], dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.softmax,
np.array([1, 2, 3, 4], dtype=dtype),
expected=np.array([0.032058604, 0.087144323, 0.23688284, 0.64391428],
dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.softmax,
np.array([[1, 1, 1, 1], [1, 2, 3, 4]], dtype=dtype),
expected=np.array(
[[0.25, 0.25, 0.25, 0.25],
[0.032058604, 0.087144323, 0.23688284, 0.64391428]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.softmax,
np.array([[[1, 1], [1, 1]], [[1, 2], [3, 4]]], dtype=dtype),
expected=np.array(
[[[0.5, 0.5], [0.5, 0.5]],
[[0.26894142, 0.73105858], [0.26894142, 0.73105858]]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.softsign,
np.array([[-2, -1, 0, 1, 2]], dtype=dtype),
expected=np.array(
[[-0.66666669, -0.5, 0, 0.5, 0.66666669]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.is_finite,
np.array(
[[42, float("inf"), -123], [float("nan"), 0, -0.0]], dtype=dtype),
expected=np.array(
[[True, False, True], [False, True, True]], dtype=np.bool))
self._assertOpOutputMatchesExpected(
math_ops.lgamma,
np.array(0.5, dtype=dtype),
expected=np.array(np.log(np.pi) / 2, dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.lgamma,
np.array(
[[1, 2, 3], [4, 5, 6], [1 / 2, 3 / 2, 5 / 2],
[-3 / 2, -7 / 2, -11 / 2]],
dtype=dtype),
expected=np.array(
[
[0, 0, np.log(2.0)],
[np.log(6.0), np.log(24.0),
np.log(120)],
[
np.log(np.pi) / 2,
np.log(np.pi) / 2 - np.log(2),
np.log(np.pi) / 2 - np.log(4) + np.log(3)
],
[
np.log(np.pi) / 2 - np.log(3) + np.log(4),
np.log(np.pi) / 2 - np.log(105) + np.log(16),
np.log(np.pi) / 2 - np.log(10395) + np.log(64),
],
],
dtype=dtype))
# The actual result is complex. Take the real part.
self._assertOpOutputMatchesExpected(
math_ops.lgamma,
np.array([-1 / 2, -5 / 2, -9 / 2], dtype=dtype),
expected=np.array(
[
np.log(np.pi) / 2 + np.log(2),
np.log(np.pi) / 2 - np.log(15) + np.log(8),
np.log(np.pi) / 2 - np.log(945) + np.log(32),
],
dtype=dtype),
atol=1e-4)
self._assertOpOutputMatchesExpected(
math_ops.digamma,
np.array(
[[1.0, 0.5, 1 / 3.0], [0.25, 1 / 6.0, 0.125], [2.0, 3.0, 4.0],
[6.0, 8.0, 9.0]],
dtype=dtype),
expected=np.array(
[
[
-np.euler_gamma, -2 * np.log(2) - np.euler_gamma,
-np.pi / 2 / np.sqrt(3) - 3 * np.log(3) / 2 -
np.euler_gamma
],
[
-np.pi / 2 - 3 * np.log(2) - np.euler_gamma,
-np.pi * np.sqrt(3) / 2 - 2 * np.log(2) -
3 * np.log(3) / 2 - np.euler_gamma,
-np.pi / 2 - 4 * np.log(2) -
(np.pi + np.log(2 + np.sqrt(2)) - np.log(2 - np.sqrt(2)))
/ np.sqrt(2) - np.euler_gamma
],
[
1 - np.euler_gamma, 1.5 - np.euler_gamma,
11 / 6.0 - np.euler_gamma
],
[
137 / 60.0 - np.euler_gamma, 363 / 140.0 - np.euler_gamma,
761 / 280.0 - np.euler_gamma
],
],
dtype=dtype))
def quantize_and_dequantize_v2(x):
return array_ops.quantize_and_dequantize_v2(
x, -127, 127, signed_input=True, num_bits=8)
self._assertOpOutputMatchesExpected(
quantize_and_dequantize_v2,
np.array([-1, -0.5, 0, 0.3], dtype=dtype),
expected=np.array([-1., -0.5, 0., 0.296875], dtype=dtype))
def quantize_and_dequantize_v3(x):
return array_ops.quantize_and_dequantize_v3(
x, -127, 127, num_bits=8, signed_input=True, range_given=False)
self._assertOpOutputMatchesExpected(
quantize_and_dequantize_v3,
np.array([-1, -0.5, 0, 0.3], dtype=dtype),
expected=np.array([-1., -0.5, 0., 0.296875], dtype=dtype))
def testComplexOps(self):
for dtype in self.complex_types:
self._assertOpOutputMatchesExpected(
math_ops.acosh,
np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype),
expected=np.arccosh(
np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.asinh,
np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype),
expected=np.arcsinh(
np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.atanh,
np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype),
expected=np.arctanh(
np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.cosh,
np.array([1j, 2 - 3j, 3, 4 + 2j], dtype=dtype),
expected=np.cosh(np.array([1j, 2 - 3j, 3, 4 + 2j], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.sinh,
np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype),
expected=np.sinh(np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.exp,
np.array([[-1 + 2j, 3j, 2 - 3j]], dtype=dtype),
expected=np.exp(np.array([[-1 + 2j, 3j, 2 - 3j]], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.expm1,
np.array([[-1 + 2j, 3j, 2 - 3j]], dtype=dtype),
expected=np.expm1(np.array([[-1 + 2j, 3j, 2 - 3j]], dtype=dtype)),
rtol=1e-6,
atol=1e-6)
self._assertOpOutputMatchesExpected(
math_ops.reciprocal,
np.array([[1, 2j, 2 + 3j]], dtype=dtype),
expected=1.0 / np.array([[1, 2j, 2 + 3j]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.log,
np.array([[5j, 3 - 2j]], dtype=dtype),
expected=np.log(np.array([[5j, 3 - 2j]], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.sin,
np.array([[5j, 3 - 2j]], dtype=dtype),
expected=np.sin(np.array([[5j, 3 - 2j]], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.cos,
np.array([[5j, 3 - 2j]], dtype=dtype),
expected=np.cos(np.array([[5j, 3 - 2j]], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.log1p,
np.array([[1e-14, 1e-15j, 0.6 - 0.3j]], dtype=dtype),
expected=np.log1p(
np.array([[1e-14, 1e-15j, 0.6 - 0.3j]], dtype=dtype)),
rtol=1e-4,
atol=1e-6)
val = np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype)
self._assertOpOutputMatchesExpected(
math_ops.rsqrt, val, expected=1 / np.sqrt(val))
self._assertOpOutputMatchesExpected(
math_ops.sigmoid, val, expected=1 / (1 + np.exp(-val)))
self._assertOpOutputMatchesExpected(
math_ops.sqrt, val, expected=np.sqrt(val))
self._assertOpOutputMatchesExpected(
math_ops.tanh,
np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype),
expected=np.tanh(np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.tan,
np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype),
expected=np.tan(np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype)))
ctypes = {np.complex64: np.float32}
self._assertOpOutputMatchesExpected(
math_ops.abs,
np.array([[3 - 4j, -1j, np.inf]], dtype=dtype),
expected=np.array([[5, 1, np.inf]], dtype=ctypes[dtype]))
self._assertOpOutputMatchesExpected(
math_ops.negative,
np.array([[-1 + 2j, -3j]], dtype=dtype),
expected=np.array([[1 - 2j, 3j]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.square,
np.array([[-2 - 3j, 3 + 4j, 5j]], dtype=dtype),
expected=np.array([[-2 - 3j, 3 + 4j, 5j]], dtype=dtype)**2)
self._assertOpOutputMatchesExpected(
array_ops.zeros_like,
np.array([[4j, 3 - 2j], [2, -1j]], dtype=dtype),
expected=np.array([[0, 0], [0, 0]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.ones_like,
np.array([[-4j, 3 + 2j], [2, -1j]], dtype=dtype),
expected=np.array([[1, 1], [1, 1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.angle,
np.array([1 + 3j, -4 + 7j, 2.7, -3j], dtype=dtype),
expected=np.angle(np.array([1 + 3j, -4 + 7j, 2.7, -3j], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.conj,
np.array([1 + 3j, -4 + 7j, 2.7, -3j], dtype=dtype),
expected=np.array([1 - 3j, -4 - 7j, 2.7, 3j], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.imag,
np.array([1 + 3j, -4 + 7j, 2.7, -3j], dtype=dtype),
expected=np.array([3, 7, 0, -3], dtype=ctypes[dtype]))
self._assertOpOutputMatchesExpected(
math_ops.real,
np.array([1 + 3j, -4 + 7j, 2.7, -3j], dtype=dtype),
expected=np.array([1, -4, 2.7, 0], dtype=ctypes[dtype]))
def testIntOps(self):
for dtype in self.int_types:
self._assertOpOutputMatchesExpected(
bitwise_ops.invert,
np.array([0, -1, 1, 16, 42], dtype=dtype),
expected=np.array([-1, 0, -2, -17, -43], dtype=dtype))
def testNumericOps(self):
for dtype in self.numeric_types - {np.int8, np.uint8}:
self._assertOpOutputMatchesExpected(
math_ops.abs,
np.array([[2, -1]], dtype=dtype),
expected=np.array([[2, 1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.negative,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[1, -1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.square,
np.array([[-2, 3]], dtype=dtype),
expected=np.array([[4, 9]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.zeros_like,
np.array([[4, 3], [2, 1]], dtype=dtype),
expected=np.array([[0, 0], [0, 0]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.ones_like,
np.array([[4, 3], [2, 1]], dtype=dtype),
expected=np.array([[1, 1], [1, 1]], dtype=dtype))
# TODO(phawkins): these tests fail unless fastmath optimizations
# are disabled. Use more robust IsInf/IsNaN detection and enable these
# tests.
@unittest.skip("test case fails in fast-math mode")
def testIsInfAndIsNan(self):
for dtype in self.float_types:
self._assertOpOutputMatchesExpected(
math_ops.is_inf,
np.array(
[[np.NINF, -2, -1, 0, 0.5, 1, 2, np.inf, np.nan]], dtype=dtype),
expected=np.array([[1, 0, 0, 0, 0, 0, 0, 1, 0]], dtype=np.bool))
self._assertOpOutputMatchesExpected(
math_ops.is_nan,
np.array(
[[np.NINF, -2, -1, 0, 0.5, 1, 2, np.inf, np.nan]], dtype=dtype),
expected=np.array([[0, 0, 0, 0, 0, 0, 0, 0, 1]], dtype=np.bool))
def testLogicalOps(self):
self._assertOpOutputMatchesExpected(
math_ops.logical_not,
np.array([[True, False], [False, True]], dtype=np.bool),
expected=np.array([[False, True], [True, False]], dtype=np.bool))
def testBiasAddGrad(self):
self._assertOpOutputMatchesExpected(
gen_nn_ops.bias_add_grad,
np.array([[1., 2.], [3., 4.]], dtype=np.float32),
expected=np.array([4., 6.], dtype=np.float32))
self._assertOpOutputMatchesExpected(
lambda x: gen_nn_ops.bias_add_grad(x, data_format="NCHW"),
np.array(
[[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]]], dtype=np.float32),
expected=np.array([10., 26.], dtype=np.float32))
def testCast(self):
shapes = [[], [4], [2, 3], [2, 0, 4]]
types = (
set([dtypes.bool, dtypes.int32, dtypes.float32])
| self.complex_tf_types)
for shape in shapes:
for src_type in types:
for dst_type in types:
src = np.arange(np.prod(shape)).astype(src_type.as_numpy_dtype)
if src_type in self.complex_tf_types:
src += (np.arange(np.prod(shape)) * 2j).astype(
src_type.as_numpy_dtype)
src = src.reshape(shape)
dst = src.astype(dst_type.as_numpy_dtype)
self._assertOpOutputMatchesExpected(
lambda x, dst_type=dst_type: math_ops.cast(x, dst_type),
src,
expected=dst)
def testBitcast(self):
self._assertOpOutputMatchesExpected(
lambda x: array_ops.bitcast(x, dtypes.int32),
np.array([1, 0x3f800000], np.int32),
expected=np.array([1, 0x3f800000], np.int32))
self._assertOpOutputMatchesExpected(
lambda x: array_ops.bitcast(x, dtypes.float32),
np.array([1, 0x3f800000], np.int32),
expected=np.array([1e-45, 1.0], np.float32))
self._assertOpOutputMatchesExpected(
lambda x: array_ops.bitcast(x, dtypes.int32),
np.array([1e-45, 1.0], np.float32),
expected=np.array([1, 0x3f800000], np.int32))
def testInvertPermutation(self):
self._assertOpOutputMatchesExpected(
array_ops.invert_permutation,
np.array([1, 2, 0], np.int32),
expected=np.array([2, 0, 1], dtype=np.int32))
def testRank(self):
rank_op = lambda x: array_ops.rank_internal(x, optimize=False)
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
rank_op, dtype(7), expected=np.int32(0))
self._assertOpOutputMatchesExpected(
rank_op, np.array([[], []], dtype=dtype), expected=np.int32(2))
self._assertOpOutputMatchesExpected(
rank_op, np.array([-1, 1], dtype=dtype), expected=np.int32(1))
self._assertOpOutputMatchesExpected(
rank_op, np.array([[-1, 1]], dtype=dtype), expected=np.int32(2))
self._assertOpOutputMatchesExpected(
rank_op,
np.array([[-1], [1], [4]], dtype=dtype),
expected=np.int32(2))
def testShape(self):
shape_op = lambda x: array_ops.shape_internal(x, optimize=False)
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
shape_op, dtype(7), expected=np.array([], dtype=np.int32))
self._assertOpOutputMatchesExpected(
shape_op,
np.array([[], []], dtype=dtype),
expected=np.array([2, 0], dtype=np.int32))
self._assertOpOutputMatchesExpected(
shape_op,
np.array([-1, 1], dtype=dtype),
expected=np.array([2], dtype=np.int32))
self._assertOpOutputMatchesExpected(
shape_op,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([1, 2], dtype=np.int32))
self._assertOpOutputMatchesExpected(
shape_op,
np.array([[-1], [1], [4]], dtype=dtype),
expected=np.array([3, 1], dtype=np.int32))
def testSize(self):
size_op = lambda x: array_ops.size_internal(x, optimize=False)
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
size_op, dtype(7), expected=np.int32(1))
self._assertOpOutputMatchesExpected(
size_op, np.array([[], []], dtype=dtype), expected=np.int32(0))
self._assertOpOutputMatchesExpected(
size_op, np.array([-1, 1], dtype=dtype), expected=np.int32(2))
self._assertOpOutputMatchesExpected(
size_op, np.array([[-1, 1]], dtype=dtype), expected=np.int32(2))
self._assertOpOutputMatchesExpected(
size_op,
np.array([[-1], [1], [4]], dtype=dtype),
expected=np.int32(3))
def testUnpack(self):
self._assertOpOutputMatchesExpected(
array_ops.unstack,
np.array([[1., 2.], [3., 4.], [5., 6.]], dtype=np.float32),
expected=[
np.array([1., 2.], dtype=np.float32),
np.array([3., 4.], dtype=np.float32),
np.array([5., 6.], dtype=np.float32),
],
equality_test=self.ListsAreClose)
self._assertOpOutputMatchesExpected(
lambda x: array_ops.unstack(x, axis=1),
np.array([[1., 2.], [3., 4.], [5., 6.]], dtype=np.float32),
expected=[
np.array([1., 3., 5.], dtype=np.float32),
np.array([2., 4., 6.], dtype=np.float32),
],
equality_test=self.ListsAreClose)
def testDepthToSpace(self):
def make_op(data_format):
def op(x):
return array_ops.depth_to_space(
x, block_size=2, data_format=data_format)
return op
for dtype in self.numeric_types:
for data_format in ["NCHW", "NHWC"]:
self._assertOpOutputMatchesExpected(
make_op(data_format),
nhwc_to_format(
np.array([[[[1, 2, 3, 4]]]], dtype=dtype), data_format),
expected=nhwc_to_format(
np.array([[[[1], [2]], [[3], [4]]]], dtype=dtype), data_format))
self._assertOpOutputMatchesExpected(
make_op(data_format),
nhwc_to_format(
np.array(
[[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]], dtype=dtype),
data_format),
expected=nhwc_to_format(
np.array(
[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]],
dtype=dtype), data_format))
self._assertOpOutputMatchesExpected(
make_op(data_format),
nhwc_to_format(
np.array(
[[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12],
[13, 14, 15, 16]]]],
dtype=dtype), data_format),
expected=nhwc_to_format(
np.array(
[[[[1], [2], [5], [6]], [[3], [4], [7], [8]],
[[9], [10], [13], [14]], [[11], [12], [15], [16]]]],
dtype=dtype), data_format))
def testSpaceToDepth(self):
def make_op(data_format):
def op(x):
return array_ops.space_to_depth(
x, block_size=2, data_format=data_format)
return op
for dtype in self.numeric_types:
for data_format in ["NCHW", "NHWC"]:
self._assertOpOutputMatchesExpected(
make_op(data_format),
nhwc_to_format(
np.array([[[[1], [2]], [[3], [4]]]], dtype=dtype), data_format),
expected=nhwc_to_format(
np.array([[[[1, 2, 3, 4]]]], dtype=dtype), data_format))
self._assertOpOutputMatchesExpected(
make_op(data_format),
nhwc_to_format(
np.array(
[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]],
dtype=dtype), data_format),
expected=nhwc_to_format(
np.array(
[[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]], dtype=dtype),
data_format))
self._assertOpOutputMatchesExpected(
make_op(data_format),
nhwc_to_format(
np.array(
[[[[1], [2], [5], [6]], [[3], [4], [7], [8]],
[[9], [10], [13], [14]], [[11], [12], [15], [16]]]],
dtype=dtype), data_format),
expected=nhwc_to_format(
np.array(
[[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12],
[13, 14, 15, 16]]]],
dtype=dtype), data_format))
def _assertSoftplusMatchesExpected(self, features, dtype):
features = np.array(features, dtype=dtype)
zero = np.asarray(0).astype(dtype)
expected = np.logaddexp(zero, features)
self._assertOpOutputMatchesExpected(
nn_ops.softplus, features, expected=expected, rtol=1e-6, atol=9.1e-6)
def testSoftplus(self):
for dtype in self.float_types:
self._assertSoftplusMatchesExpected([[-2, 0, 8]], dtype)
self._assertSoftplusMatchesExpected(
[[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]], dtype)
if dtype == dtypes.bfloat16.as_numpy_dtype:
log_eps = np.log(np.finfo(np.float32).eps)
else:
log_eps = np.log(np.finfo(dtype).eps)
one = dtype(1)
ten = dtype(10)
self._assertSoftplusMatchesExpected([
log_eps, log_eps - one, log_eps + one, log_eps - ten, log_eps + ten,
-log_eps, -log_eps - one, -log_eps + one, -log_eps - ten,
-log_eps + ten
], dtype)
if __name__ == "__main__":
googletest.main()
|
the-stack_106_30377 | from typing import Tuple, Union, Dict
import numpy as np
import torch
from pytorch_lightning.core.lightning import LightningModule
from src.data_loader.data_set import Data_Set
from src.data_loader.utils import convert_2_5D_to_3D
from torch import Tensor
from torch.utils.data import DataLoader
from tqdm import tqdm
def calculate_epe_statistics(
predictions: torch.tensor,
ground_truth: torch.tensor,
dim: int,
validitiy_flags=None,
) -> dict:
"""Calculates the eucledian diatnce statistics between the all coordinates. In case of 2.5 D
Args:
predictions (torch.tensor): Predicted coordinates of shape (#sample x 21 x 3)
ground_truth (torch.tensor): True coordinates of shape (#samples x 21 x3)
dim (int): to denote if the predictions and ground truth are 2.5D or 3D.
If 2 is passed .
Returns:
dict: Returns a dictionary containing following keys
'mean_epe', 'median_epe', 'min_epe', 'max_epe'
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if dim == 2:
predictions_ = predictions[:, :, :2].clone()
ground_truth_ = ground_truth[:, :, :2].clone()
else:
if dim != 3:
print("Coordinates treated as 3D")
predictions_ = predictions.clone()
ground_truth_ = ground_truth.clone()
with torch.no_grad():
eucledian_dist = (
torch.sum(((predictions_.to(device) - ground_truth_.to(device)) ** 2), 2)
** 0.5
)
if validitiy_flags is not None:
mean_epe = torch.mean(eucledian_dist[validitiy_flags.view(-1, 21)])
median_epe = torch.median(eucledian_dist[validitiy_flags.view(-1, 21)])
max_epe = torch.max(eucledian_dist[validitiy_flags.view(-1, 21)])
min_epe = torch.min(eucledian_dist[validitiy_flags.view(-1, 21)])
else:
mean_epe = torch.mean(eucledian_dist)
median_epe = torch.median(eucledian_dist)
max_epe = torch.max(eucledian_dist)
min_epe = torch.min(eucledian_dist)
return {
"eucledian_dist": eucledian_dist,
"mean": mean_epe,
"median": median_epe,
"min": min_epe,
"max": max_epe,
}
def calculate_predicted_3D(
joints: torch.tensor, camera_params: torch.tensor, scales: torch.tensor
) -> torch.tensor:
"""calculates the 3D joints from 2.5D joints.
Args:
joints (torch.tensor): predicted joints in 2.5D (#sample x 21 x 3)
camera_params (torch.tensor): camera prameters (#sample x 3 x 3)
scales (torch.tensor): scale for the jointss (#sample x 1)
Returns:
torch.tensor: predicted joints in 3D (#sample x 21 x 3)
"""
predicted_3D_coords = []
for i in tqdm(range(len(joints))):
predicted_3D_coords.append(
convert_2_5D_to_3D(
joints[i].to(torch.device("cpu")),
scales[i].to(torch.device("cpu")),
camera_params[i].to(torch.device("cpu")),
)
)
return torch.stack(predicted_3D_coords, axis=0)
def get_predictions_and_ground_truth(
model: LightningModule, data: Data_Set, **dataloader_args
) -> dict:
"""calculates the predictions by providing the model input image. Also prepares
the necessary transformations required for calculating the statistucs.
Args:
model (LightningModule): A model defined using pytorch lightening.
data (Data_Set): the data for which the model should be evaluated.
**dataloader_args: Argumenst for torch.utls.data.DataLoader.
Adjust num_workers and batch_size for speed.
Returns:
dict: dict with lists of predictions and ground truth. Following keys are
present.
"predictions","ground_truth","ground_truth_3d",
"ground_truth_recreated_3d","predictions_3d","camera_param" and "scale"
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.eval()
model.to(device)
data_loader = DataLoader(data, **dataloader_args)
# initilaize the lists
predictions = []
ground_truth = []
ground_truth_3d = []
ground_truth_recreated_3d = []
scale = []
joints_raw = []
camera_param = []
z_root_calc_denoised = []
validitiy_flags = []
with torch.no_grad():
for i, batch in tqdm(enumerate(data_loader)):
input_tensor = batch["image"].to(device)
# batch["joints_valid"]
validitiy_flags.append(batch["joints_valid"].view(-1, 21).to(device))
ground_truth.append(batch["joints"])
ground_truth_3d.append(batch["joints3D"])
joints_raw.append(batch["joints_raw"])
ground_truth_recreated_3d.append(batch["joints3D_recreated"])
scale.append(batch["scale"].to(device))
camera_param.append(batch["K"].to(device))
predictions.append(model(input_tensor))
if hasattr(model, "denoiser"):
z_root_calc_denoised.append(
model.get_denoised_z_root_calc(predictions[-1], camera_param[-1])
)
predictions = torch.cat(predictions, axis=0)
scale = torch.cat(scale, axis=0)
camera_param = torch.cat(camera_param, axis=0)
predictions_3d = convert_2_5D_to_3D(predictions, scale, camera_param, True)
validitiy_flags = torch.cat(validitiy_flags, axis=0)
if hasattr(model, "denoiser"):
z_root_calc_denoised = torch.cat(z_root_calc_denoised, axis=0)
predictions_3d_denoised = convert_2_5D_to_3D(
predictions, scale, camera_param, True, z_root_calc_denoised
)
denoised_pred = {"predictions_3d_denoised": predictions_3d_denoised}
else:
denoised_pred = {}
# predictions_3d = calculate_predicted_3D(predictions, camera_param, scale)
ground_truth = torch.cat(ground_truth, axis=0)
ground_truth_3d = torch.cat(ground_truth_3d, axis=0)
ground_truth_recreated_3d = torch.cat(ground_truth_recreated_3d, axis=0)
joints_raw = torch.cat(joints_raw, axis=0)
return {
**{
"predictions": predictions,
"ground_truth": ground_truth,
"ground_truth_3d": ground_truth_3d,
"ground_truth_recreated_3d": ground_truth_recreated_3d,
"predictions_3d": predictions_3d,
"camera_param": camera_param,
"scale": scale,
"joints_raw": joints_raw,
"validitiy_flags": validitiy_flags,
},
**denoised_pred,
}
def evaluate(
model: LightningModule,
data: Data_Set,
use_procrustes: bool = True,
**dataloader_args
) -> dict:
"""Computes the predictions and various statistics.
Args:
model (LightningModule): Trained model.
data (Data_Set): data set for evaluation.
Returns:
dict: dictionary containing evaluation
"""
prediction_dict = get_predictions_and_ground_truth(model, data, **dataloader_args)
epe_2D = calculate_epe_statistics(
prediction_dict["predictions"], prediction_dict["ground_truth"], dim=2
)
epe_3D = calculate_epe_statistics(
prediction_dict["predictions_3d"], prediction_dict["ground_truth_3d"], dim=3
)
procrustes_results = (
get_procrustes_statistics(prediction_dict) if use_procrustes else {}
)
# epe_3D_recreated = calculate_epe_statistics(
# prediction_dict["predictions_3d"],
# prediction_dict["ground_truth_recreated_3d"],
# dim=3,
# )
epe_3D_gt_vs_3D_recreated = calculate_epe_statistics(
prediction_dict["ground_truth_3d"],
prediction_dict["ground_truth_recreated_3d"],
dim=3,
)
if hasattr(model, "denoiser"):
epe_3D_gt_vs_denoised = calculate_epe_statistics(
prediction_dict["ground_truth_3d"],
prediction_dict["predictions_3d_denoised"],
dim=3,
)
auc_denoised = np.mean(cal_auc_joints(epe_3D_gt_vs_denoised["eucledian_dist"]))
denoised_results = {
"Mean_EPE_3D_denoised": epe_3D_gt_vs_denoised["mean"].cpu(),
"Median_EPE_3D_denoised": epe_3D_gt_vs_denoised["median"].cpu(),
"auc_denoised": auc_denoised,
}
else:
denoised_results = {}
# y,x = get_pck_curves(epe_3D['eucledian_dist'])
auc = np.mean(cal_auc_joints(epe_3D["eucledian_dist"]))
return {
**{
"Mean_EPE_2D": epe_2D["mean"].cpu(),
"Median_EPE_2D": epe_2D["median"].cpu(),
"Mean_EPE_3D": epe_3D["mean"].cpu(),
"Median_EPE_3D": epe_3D["median"].cpu(),
"Median_EPE_3D_R_V_3D": epe_3D_gt_vs_3D_recreated["median"].cpu(),
"AUC": auc,
},
**denoised_results,
**procrustes_results,
}
def get_pck_curves(
eucledian_dist: torch.Tensor,
threshold_min: float = 0.0,
threshold_max: float = 0.5,
step: float = 0.005,
per_joint: bool = False,
) -> Tuple[np.array, np.array]:
"""Calculates pck curve i.e. percentage of predicted keypoints under a certain
threshold of eucledian distance from the ground truth. The number of thresholds this
is calculated depends upon threshold_max, threshold_min and step.
Args:
eucledian_dist (torch.Tensor): Eucldeian distance between ground truth and
predictions. (#samples x 21 x 3)
threshold_min (float, optional):Minumum threshold that should be tested.
Defaults to 0.0.
threshold_max (float, optional):Maximum threshold to be tested. Defaults to 0.5.
step (float, optional): Defaults to 0.005.
per_joint (bool, optional):If true calculates it seperately for 21 joints.
Defaults to False.
Returns:
Tuple[np.array, np.array]: Returns pck curve (#num_of_thresholds) or
(21 x #num_of_thresholds) and corresponding thesholds (#num_of_thresholds).
"""
thresholds = np.arange(threshold_min, threshold_max, step)
if per_joint:
percent_under_threshold = np.array(
[
torch.mean((eucledian_dist < theta) * 1.0, axis=0).cpu().numpy().T
for theta in thresholds
]
).T
else:
percent_under_threshold = np.array(
[
torch.mean((eucledian_dist < theta) * 1.0).cpu().numpy()
for theta in thresholds
]
)
return percent_under_threshold, thresholds
def cal_auc_joints(
eucledian_dist: torch.Tensor, per_joint=True
) -> Union[np.array, float]:
"""Calculates Area Under the Curve (AUC) for pck curve of the eucledian distance between
predictions and ground truth.
Args:
eucledian_dist (torch.Tensor): Eucldeian distance between ground truth and
predictions. (#samples x 21 x 3)
per_joint (bool, optional):If true calculates it seperately for 21 joints.
Defaults to True.
Returns:
Union[np.array, float]: Either return AUC per joint or overall AUC.
"""
percent_index_threshold, thresholds = get_pck_curves(
eucledian_dist, threshold_min=0.0, threshold_max=0.5, step=0.005, per_joint=True
)
normalizing_factor = np.trapz(y=np.ones(len(thresholds)), x=thresholds)
auc_per_joint = np.array(
[
np.trapz(y=percent_index_threshold[i], x=thresholds) / normalizing_factor
for i in range(21)
]
)
if per_joint:
return auc_per_joint
else:
return np.mean(auc_per_joint)
def calc_procrustes_transform(
X: Tensor, Y: Tensor
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""Calculates procrustes transform of point clouds in batch format.
minimize ||scale x rot_mat x Y +t -X||_F with scale, rot_mat and translation
code adapted from : http://stackoverflow.com/a/18927641/1884420
Args:
X (Tensor): batch x n x p
Y (Tensor): batch x n x k
Note: For joints n =21 and k=p=3
Returns:
y_transform (Tensor): transformed Y to best match X
rot_mat (Tensor): Rotation matrix
scale (Tensor): Scale
translation (Tensor): Translation
"""
if torch.all(X == 0):
print("X contains only NaNs. Not computing PMSE.")
return Y, (torch.tensor([]),) * 3
if torch.all(Y == 0):
print("Y contains only NaNs. Not computing PMSE.")
return Y, (torch.tensor([]),) * 3
with torch.no_grad():
muX = X.mean(dim=1, keepdim=True)
muY = Y.mean(dim=1, keepdim=True)
# Centering and scale normalizing.
X0 = X - muX
Y0 = Y - muY
normX = torch.linalg.norm(X0, dim=[1, 2], ord="fro", keepdim=True)
normY = torch.linalg.norm(Y0, dim=[1, 2], ord="fro", keepdim=True)
# Scale to equal (unit) norm
X0 = X0 / normX
Y0 = Y0 / normY
# Compute optimum rotation matrix of Y
A = torch.bmm(X0.transpose(2, 1), Y0)
U, s, V = torch.svd(A)
rot_mat = torch.bmm(V, U.transpose(2, 1))
# Make sure we have a rotation
det_rot_mat = torch.det(rot_mat)
V[:, :, -1] *= torch.sign(det_rot_mat).view(-1, 1)
s[:, -1] *= torch.sign(det_rot_mat)
rot_mat = torch.matmul(V, U.transpose(2, 1))
scale_ratio = s.sum(dim=1).view(-1, 1, 1)
scale = scale_ratio * normX / normY
translation = muX - scale * torch.matmul(muY, rot_mat)
# y_transform = normX * scale_ratio * torch.matmul(Y0, rot_mat) + muX
y_transform = scale * torch.matmul(Y, rot_mat) + translation
return y_transform, rot_mat, scale, translation
def calc_procrustes_transform2(
X_: Tensor, Y_: Tensor, valid_flag
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""Calculates procrustes transform of point clouds in batch format.
minimize ||scale x rot_mat x Y +t -X||_F with scale, rot_mat and translation
code adapted from : http://stackoverflow.com/a/18927641/1884420
Args:
X (Tensor): batch x n x p
Y (Tensor): batch x n x k
Note: For joints n =21 and k=p=3
Returns:
y_transform (Tensor): transformed Y to best match X
rot_mat (Tensor): Rotation matrix
scale (Tensor): Scale
translation (Tensor): Translation
"""
if torch.all(X_ == 0):
print("X contains only NaNs. Not computing PMSE.")
return Y_, (torch.tensor([]),) * 3
if torch.all(Y_ == 0):
print("Y contains only NaNs. Not computing PMSE.")
return Y_, (torch.tensor([]),) * 3
scales, rot_mats, translations, y_transform = [], [], [], []
for sample in tqdm(range(len(X_))):
X = X_[sample][valid_flag[sample]].view(1, -1, 3)
Y = Y_[sample][valid_flag[sample]].view(1, -1, 3)
with torch.no_grad():
muX = X.mean(dim=1, keepdim=True)
muY = Y.mean(dim=1, keepdim=True)
# Centering and scale normalizing.
X0 = X - muX
Y0 = Y - muY
normX = torch.linalg.norm(X0, dim=[1, 2], ord="fro", keepdim=True)
normY = torch.linalg.norm(Y0, dim=[1, 2], ord="fro", keepdim=True)
# Scale to equal (unit) norm
X0 = X0 / normX
Y0 = Y0 / normY
# Compute optimum rotation matrix of Y
A = torch.bmm(X0.transpose(2, 1), Y0)
U, s, V = torch.svd(A)
rot_mat = torch.bmm(V, U.transpose(2, 1))
# Make sure we have a rotation
det_rot_mat = torch.det(rot_mat)
V[:, :, -1] *= torch.sign(det_rot_mat).view(-1, 1)
s[:, -1] *= torch.sign(det_rot_mat)
rot_mat = torch.matmul(V, U.transpose(2, 1))
scale_ratio = s.sum(dim=1).view(-1, 1, 1)
scale = scale_ratio * normX / normY
translation = muX - scale * torch.matmul(muY, rot_mat)
scales.append(scale)
rot_mats.append(rot_mat)
translations.append(translation)
y_transform.append(scale * torch.matmul(Y_[sample], rot_mat) + translation)
y_transform = torch.cat(y_transform, dim=0)
return y_transform, rot_mats, scales, translations
def get_procrustes_statistics(
pred: Dict[str, Tensor], use_visibitiy=False
) -> Dict[str, Tensor]:
device = pred["predictions"].device
if use_visibitiy:
pred_3d_t, _, _, _ = calc_procrustes_transform2(
pred["joints_raw"].to(device),
pred["predictions_3d"],
pred["validitiy_flags"],
)
else:
pred_3d_t, _, _, _ = calc_procrustes_transform(
pred["joints_raw"].to(device), pred["predictions_3d"]
)
epe_3D_t = calculate_epe_statistics(
pred_3d_t, pred["joints_raw"], dim=3, validitiy_flags=pred["validitiy_flags"]
)
auc_t = np.mean(cal_auc_joints(epe_3D_t["eucledian_dist"]))
procrustes_results = {
"Mean_EPE_3D_procrustes": epe_3D_t["mean"].cpu(),
"Median_EPE_3D_procrustes": epe_3D_t["median"].cpu(),
"auc_procrustes": auc_t,
}
if "predictions_3d_denoised" in pred.keys():
pred_3d_t_denoised, _, _, _ = calc_procrustes_transform(
pred["joints_raw"].to(device), pred["predictions_3d_denoised"]
)
epe_3D_denoised_t = calculate_epe_statistics(
pred_3d_t_denoised, pred["joints_raw"], dim=3
)
auc_denoised_t = np.mean(cal_auc_joints(epe_3D_denoised_t["eucledian_dist"]))
procrustes_results = {
**procrustes_results,
**{
"Mean_EPE_3D_denoised_procrustes": epe_3D_denoised_t["mean"].cpu(),
"Median_EPE_3D_denoised_procrustes": epe_3D_denoised_t["median"].cpu(),
"auc_denoised_procrustes": auc_denoised_t,
},
}
return procrustes_results
# def get_procrustes_statistics2(pred: Dict[str, Tensor]) -> Dict[str, Tensor]:
# device = pred["predictions"].device
# pred_3d_t, _, _, _ = calc_procrustes_transform(
# pred["joints_raw"].to(device), pred["predictions_3d"]
# )
# epe_3D_t = calculate_epe_statistics(pred_3d_t, pred["joints_raw"], dim=3)
# auc_t = np.mean(cal_auc_joints(epe_3D_t["eucledian_dist"]))
# procrustes_results = {
# "Mean_EPE_3D_procrustes": epe_3D_t["mean"].cpu(),
# "Median_EPE_3D_procrustes": epe_3D_t["median"].cpu(),
# "auc_procrustes": auc_t,
# }
# if "predictions_3d_denoised" in pred.keys():
# pred_3d_t_denoised, _, _, _ = calc_procrustes_transform(
# pred["joints_raw"].to(device), pred["predictions_3d_denoised"]
# )
# epe_3D_denoised_t = calculate_epe_statistics(
# pred_3d_t_denoised, pred["joints_raw"], dim=3
# )
# auc_denoised_t = np.mean(cal_auc_joints(epe_3D_denoised_t["eucledian_dist"]))
# procrustes_results = {
# **procrustes_results,
# **{
# "Mean_EPE_3D_denoised_procrustes": epe_3D_denoised_t["mean"].cpu(),
# "Median_EPE_3D_denoised_procrustes": epe_3D_denoised_t["median"].cpu(),
# "auc_denoised_procrustes": auc_denoised_t,
# },
# }
# return procrustes_results
|
the-stack_106_30378 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import numpy as np
# from domainbed.lib import misc
def _hparams(algorithm, dataset, random_seed):
"""
Global registry of hyperparams. Each entry is a (default, random) tuple.
New algorithms / networks / etc. should add entries here.
"""
SMALL_IMAGES = ['Debug28', 'RotatedMNIST', 'ColoredMNIST']
hparams = {}
def _hparam(name, default_val, random_val_fn):
"""Define a hyperparameter. random_val_fn takes a RandomState and
returns a random hyperparameter value."""
assert(name not in hparams)
# random_state = np.random.RandomState(
# misc.seed_hash(random_seed, name)
# )
# hparams[name] = (default_val, random_val_fn(random_state))
hparams[name] = (default_val, default_val)
# Unconditional hparam definitions.
_hparam('data_augmentation', True, lambda r: True)
_hparam('resnet18', True, lambda r: False) # _hparam('resnet18', False, lambda r: False)
_hparam('resnet_dropout', 0., lambda r: r.choice([0., 0.1, 0.5]))
_hparam('class_balanced', False, lambda r: False)
# TODO: nonlinear classifiers disabled
_hparam('nonlinear_classifier', False,
lambda r: bool(r.choice([False, False])))
# Algorithm-specific hparam definitions. Each block of code below
# corresponds to exactly one algorithm.
if algorithm in ['DANN', 'CDANN']:
_hparam('lambda', 1.0, lambda r: 10**r.uniform(-2, 2))
_hparam('weight_decay_d', 0., lambda r: 10**r.uniform(-6, -2))
_hparam('d_steps_per_g_step', 1, lambda r: int(2**r.uniform(0, 3)))
_hparam('grad_penalty', 0., lambda r: 10**r.uniform(-2, 1))
_hparam('beta1', 0.5, lambda r: r.choice([0., 0.5]))
_hparam('mlp_width', 256, lambda r: int(2 ** r.uniform(6, 10)))
_hparam('mlp_depth', 3, lambda r: int(r.choice([3, 4, 5])))
_hparam('mlp_dropout', 0., lambda r: r.choice([0., 0.1, 0.5]))
elif algorithm == 'Fish':
_hparam('meta_lr', 0.5, lambda r:r.choice([0.05, 0.1, 0.5]))
elif algorithm == "RSC":
_hparam('rsc_f_drop_factor', 1/3, lambda r: r.uniform(0, 0.5))
_hparam('rsc_b_drop_factor', 1/3, lambda r: r.uniform(0, 0.5))
elif algorithm == "SagNet":
_hparam('sag_w_adv', 0.1, lambda r: 10**r.uniform(-2, 1))
elif algorithm == "IRM":
_hparam('irm_lambda', 1e2, lambda r: 10**r.uniform(-1, 5))
_hparam('irm_penalty_anneal_iters', 500,
lambda r: int(10**r.uniform(0, 4)))
elif algorithm == "Mixup":
_hparam('mixup_alpha', 0.2, lambda r: 10**r.uniform(-1, -1))
elif algorithm == "GroupDRO":
_hparam('groupdro_eta', 1e-2, lambda r: 10**r.uniform(-3, -1))
elif algorithm == "MMD" or algorithm == "CORAL":
_hparam('mmd_gamma', 1., lambda r: 10**r.uniform(-1, 1))
elif algorithm == "MLDG":
_hparam('mldg_beta', 1., lambda r: 10**r.uniform(-1, 1))
elif algorithm == "MTL":
_hparam('mtl_ema', .99, lambda r: r.choice([0.5, 0.9, 0.99, 1.]))
elif algorithm == "VREx":
_hparam('vrex_lambda', 1e1, lambda r: 10**r.uniform(-1, 5))
_hparam('vrex_penalty_anneal_iters', 500,
lambda r: int(10**r.uniform(0, 4)))
elif algorithm == "SD":
_hparam('sd_reg', 0.1, lambda r: 10**r.uniform(-5, -1))
elif algorithm == "ANDMask":
_hparam('tau', 1, lambda r: r.uniform(0.5, 1.))
elif algorithm == "IGA":
_hparam('penalty', 1000, lambda r: 10**r.uniform(1, 5))
# Dataset-and-algorithm-specific hparam definitions. Each block of code
# below corresponds to exactly one hparam. Avoid nested conditionals.
if dataset in SMALL_IMAGES:
_hparam('lr', 1e-3, lambda r: 10**r.uniform(-4.5, -2.5))
else:
_hparam('lr', 5e-5, lambda r: 10**r.uniform(-5, -3.5))
if dataset in SMALL_IMAGES:
_hparam('weight_decay', 0., lambda r: 0.)
else:
_hparam('weight_decay', 0., lambda r: 10**r.uniform(-6, -2))
if dataset in SMALL_IMAGES:
_hparam('batch_size', 64, lambda r: int(2**r.uniform(3, 9)))
elif algorithm == 'ARM':
_hparam('batch_size', 8, lambda r: 8)
elif dataset == 'DomainNet':
_hparam('batch_size', 32, lambda r: int(2**r.uniform(3, 5)))
else:
_hparam('batch_size', 32, lambda r: int(2**r.uniform(3, 5.5)))
if algorithm in ['DANN', 'CDANN'] and dataset in SMALL_IMAGES:
_hparam('lr_g', 1e-3, lambda r: 10**r.uniform(-4.5, -2.5))
elif algorithm in ['DANN', 'CDANN']:
_hparam('lr_g', 5e-5, lambda r: 10**r.uniform(-5, -3.5))
if algorithm in ['DANN', 'CDANN'] and dataset in SMALL_IMAGES:
_hparam('lr_d', 1e-3, lambda r: 10**r.uniform(-4.5, -2.5))
elif algorithm in ['DANN', 'CDANN']:
_hparam('lr_d', 5e-5, lambda r: 10**r.uniform(-5, -3.5))
if algorithm in ['DANN', 'CDANN'] and dataset in SMALL_IMAGES:
_hparam('weight_decay_g', 0., lambda r: 0.)
elif algorithm in ['DANN', 'CDANN']:
_hparam('weight_decay_g', 0., lambda r: 10**r.uniform(-6, -2))
return hparams
def default_hparams(algorithm, dataset):
return {a: b for a, (b, c) in _hparams(algorithm, dataset, 0).items()}
def random_hparams(algorithm, dataset, seed):
return {a: c for a, (b, c) in _hparams(algorithm, dataset, seed).items()}
|
the-stack_106_30380 | ### Made by Joshua ###
import pygame,time
from random import randint,choice
#import sys
pygame.init()
#log = open('Log.txt','w')
#sys.stdout = log
##CONSTANTS
##Color Constants
# R G B
PURPLE = ( 48, 10, 36)
GREEN = (000,255,000)
COMBLUE = (212,222,255)
ORANGE = (200, 41, 83)
GRAY = ( 50, 50, 60)
CYAN = (255, 10,255)
PINK = (119, 52, 90)
RED = (255, 0, 0)
##Game Constants
GAMETITLE = 'Invaders'
DISPLAYWIDTH = 800
DISPLAYHEIGHT = 600
BGCOLOR = PURPLE
X_MARGIN = 30
Y_MARGIN = 30
FPS = 60
##Player Constants
PLAYERWIDTH = 50
PLAYERHEIGHT = 7
PLAYERSPEED = 7
PLAYERCOLOR = GREEN
PLAYERNAME = 'Player'
##Bullet Constants
BULLETWIDTH = 5
BULLETHEIGHT = 5
BULLETCOLOR = CYAN
BULLETSPEED = 15
##Alien Constants
ALIENHEIGHT = 25
ALIENWIDTH = 25
ALIENSPEED = 8
ALIENROW = 10
ALIENCOLOUMN = 3
ALIENGAP_Y = ALIENHEIGHT + 45
ALIENGAP_X = ALIENWIDTH + 45
ALIENNAME = 'Alien'
ALIENTYPE = ['Blue','White','Green']
##Blocker Constants
BLOCKERHEIGHT = 10
BLOCKERWIDTH = 10
BLOCKERCOLOR = GREEN
BLOCKERGAP = 10
##Initialize Game
Display = pygame.display.set_mode((DISPLAYWIDTH,DISPLAYHEIGHT))
clock = pygame.time.Clock()
pygame.display.set_caption(GAMETITLE)
icon = pygame.image.load('images/enemy1_1.png')
pygame.display.set_icon(icon)
Display.fill(BGCOLOR)
lasersound = pygame.mixer.Sound('laser.ogg')
smallfont = pygame.font.SysFont("orena", 20)
medfont = pygame.font.SysFont("orena", 25)
largefont = pygame.font.SysFont("orena", 30)
numfontsmall = pygame.font.SysFont("Space Invaders",35)
numfontmedium = pygame.font.SysFont('Space Invaders',40)
class Player:
def __init__(self,posx,posy,color,size=(PLAYERHEIGHT,PLAYERWIDTH)):
self.alive = True
self.x = posx
self.y = posy
self.color = color
self.width = size[0]
self.height = size[1]
def render(self):
pygame.draw.rect(Display,self.color,[self.x,self.y,self.height,self.width])
def isalive(self):
return self.alive
class Bullet:
def __init__(self,posx,posy,color,speed=BULLETSPEED,size=(BULLETWIDTH,BULLETHEIGHT)):
self.x = posx
self.y = posy
self.color = color
self.width = size[0]
self.height = size[1]
self.speed = speed
def render(self):
pygame.draw.rect(Display,self.color,[self.x,self.y,self.height,self.width])
class Alien:
def __init__(self,posx,posy,image_no,alientype,size=(ALIENWIDTH,ALIENHEIGHT)):
self.x = posx
self.y = posy
self.width = size[0]
self.height = size[1]
self.image = []
self.type = alientype
if image_no == 0:
self.image.append(pygame.image.load('images/enemy1_1.png'))
self.image.append(pygame.image.load('images/enemy1_2.png'))
elif image_no == 1:
self.image.append(pygame.image.load('images/enemy2_1.png'))
self.image.append(pygame.image.load('images/enemy2_2.png'))
elif image_no == 2:
self.image.append(pygame.image.load('images/enemy3_1.png'))
self.image.append(pygame.image.load('images/enemy3_2.png'))
else:
self.image.append(pygame.image.load('images/enemy2_1.png'))
self.image.append(pygame.image.load('images/enemy2_2.png'))
self.image[0].convert_alpha()
self.image[1].convert_alpha()
self.image[0] = pygame.transform.scale(self.image[0],(self.width,self.height))
self.image[1] = pygame.transform.scale(self.image[1],(self.width,self.height))
def render(self,img=0):
#pygame.draw.rect(Display,PLAYERCOLOR,[self.x,self.y,self.height,self.width],2)
Display.blit(self.image[img],(self.x, self.y))
def intersect(s1_x, s1_y, s2_x, s2_y, height, width):
if(s1_x > s2_x - width) and (s1_x < s2_x + width) and (s1_y>s2_y - height) and (s1_y<s2_y + height):
return True
else:
return False
def text_objects(text,color,size):
if size == "small":
textSurface = smallfont.render(text, True, color)
elif size == "medium":
textSurface = medfont.render(text, True, color)
elif size == "large":
textSurface = largefont.render(text, True, color)
return textSurface, textSurface.get_rect()
def message_to_screen(msg,color, y_displace=0, size = "small"):
textSurf, textRect = text_objects(msg,color, size)
textRect.center = (DISPLAYWIDTH / 2), (DISPLAYHEIGHT / 2)+y_displace
Display.blit(textSurf, textRect)
def score(msg,y_displace):
text = msg
textSurface = numfontsmall.render(text,True,GREEN)
textRect = textSurface.get_rect()
textRect.center = (DISPLAYWIDTH / 2), (DISPLAYHEIGHT / 2)+y_displace
Display.blit(textSurface, textRect)
def live_score(pts):
text = '{}'.format(pts)
textSurface = None
def pause():
paused = True
while paused:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_p:
paused = False
break
Display.fill(BGCOLOR)
message_to_screen('Paused',GRAY,-40,'large')
message_to_screen('Press P to play',GREEN,10,'medium')
pygame.display.update()
def gameintro():
intro = True
global gameover
bullets = []
while intro:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_p:
gameover = False
intro = False
break
if event.key == pygame.K_q:
pygame.quit()
quit()
'''
posx = randint(10,DISPLAYWIDTH)
bullets.append(Bullet(posx,0,PINK))
i = 0
while i < len(bullets):
if bullets[i].y > DISPLAYHEIGHT:
del(bullets[i])
i += 1
for bullet in bullets:
bullet.y += 2
'''
Display.fill(BGCOLOR)
#for bullet in bullets:
#bullet.render()
message_to_screen("Welcome to Invaders",GREEN,-50,'large')
message_to_screen("Press P to Play or Q to quit",COMBLUE,0,"small")
pygame.display.update()
def gameover_screen(time_player,alien_win,player_win):
outro = True
start = time.time()
while outro:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_p:
outro = False
break
if event.key == pygame.K_q:
pygame.quit()
quit()
Display.fill(BGCOLOR)
message_to_screen("Game Over",PINK,-100,"medium")
wonmessage = '{} won'.format(win)
message_to_screen(wonmessage,COMBLUE,-50,"small")
msg = '{}: {} {}: {}'.format(ALIENNAME,alien_win,PLAYERNAME,player_win)
score(msg,0)
pygame.display.update()
end = time.time()
if (end - start) > 2:
outro = False
break
gameexit = False
alien_win = 0
player_win = 0
highscore = 0
while not gameexit:
##Initialize Player
player_x = (DISPLAYWIDTH-X_MARGIN)/2
player_y = (DISPLAYHEIGHT-Y_MARGIN)
player = Player(player_x, player_y, PLAYERCOLOR)
player_x_change = 0
player_life = 5
##Initialize Alien
alien = []
alienpox = X_MARGIN
alienpoy = Y_MARGIN
x_offset = 35
AlienChange_y = 0
for j in range(ALIENCOLOUMN):
for i in range(ALIENROW):
alien.append(Alien(alienpox + i*ALIENGAP_X + x_offset, alienpoy,j,ALIENTYPE[j]))
alienpoy += ALIENGAP_Y
#x_offset += ALIENWIDTH + 10
##Initialize Bullet
bullet = []
#background = []
alienbullet = []
gameover = True
win = ALIENNAME
current_time = time.time()
last_moved_time = time.time()
gamestarttime = time.time()
lastfiredalien = time.time()
points = 0
gameintro()
while not gameover:
##Get Event
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
player_x_change += -PLAYERSPEED
if event.key == pygame.K_RIGHT:
player_x_change += PLAYERSPEED
if event.key == pygame.K_SPACE:
#lasersound.play()
bullet.append(Bullet(player.x + 15,player.y,BULLETCOLOR))
if event.key == pygame.K_p:
pause()
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT:
player_x_change = 0
if event.key == pygame.K_RIGHT:
player_x_change = 0
current_time = time.time()
if len(alien) == 0:
win = PLAYERNAME
gameover = True
break
#Set Alien Speed(Orientation)
if (alien[len(alien)-1].x > DISPLAYWIDTH - X_MARGIN - 50):
ALIENSPEED = -ALIENSPEED
AlienChange_y = ALIENHEIGHT
elif alien[0].x < X_MARGIN:
ALIENSPEED = -ALIENSPEED
AlienChange_y = ALIENHEIGHT
#Set Bullet Position
for bullets in bullet:
bullets.y += -BULLETSPEED
itter = 0
while itter < len(bullet):
if bullet[itter].y < 0:
del(bullet[itter])
itter += 1
### ALIEN BULLET AI ###
if len(alien) != 0:
alienb_pos = alien.index(choice(alien))
if current_time - lastfiredalien > 2:
#print(alienb_pos)
#print(len(alienbullet))
lastfiredalien = time.time()
alienbullet.append(Bullet(alien[alienb_pos].x,alien[alienb_pos].y,ORANGE,8))
#Check Bullet intersection with player
for abullet in alienbullet:
#print('Bullet intersect loop')
if intersect(abullet.x,abullet.y,player.x,player.y,PLAYERHEIGHT,PLAYERWIDTH):
player_life -= 1
for abullets in alienbullet:
#print('Bullet speed set loop')
abullets.y += abullet.speed
itter = 0
while itter < len(alienbullet):
#print('Bullet delete loop')
if alienbullet[itter].y > DISPLAYHEIGHT:
del(alienbullet[itter])
itter += 1
### END ALIEN BULLET AI ###
#Check Bullet Intersection with Alien
k,i = 0,0
hit_bullet = []
hit_alien = []
for bullets in bullet:
i = 0
for aliens in alien:
if intersect(bullets.x,bullets.y,aliens.x,aliens.y,ALIENHEIGHT,ALIENWIDTH):
hit_alien.append(i)
hit_bullet.append(k)
i += 1
k += 1
hit_alien = list(set(hit_alien))
hit_bullet = list(set(hit_bullet))
for i in hit_bullet:
del(bullet[i])
for i in hit_alien:
typ = alien[i].type
if typ == 'Blue':
points += 150
elif typ == 'White':
points += 100
else:
points += 50
del(alien[i])
print(points)
#Set Player Position
player_x += player_x_change
player.x = player_x
time_difference = current_time - last_moved_time
#Set Alien Position
if time_difference > 0.5:
last_moved_time = time.time()
for aliens in alien:
aliens.image[0],aliens.image[1] = aliens.image[1],aliens.image[0]
aliens.x += ALIENSPEED
aliens.y += AlienChange_y
for aliens in alien:
if aliens.y >= player.y:
win = ALIENNAME
gameover = True
break
AlienChange_y = 0
if player_life == 0:
win = ALIENNAME
gameover = True
if player_life == 1:
player.color = RED
elif player_life == 3:
player.color = ORANGE
'''
posx = randint(10,DISPLAYWIDTH)
background.append(Bullet(posx,0,PINK))
i = 0
while i < len(background):
if background[i].y > DISPLAYHEIGHT-20:
del(background[i])
i += 1
for bullets in background:
bullets.y += 8
'''
##Render
Display.fill(BGCOLOR)
'''
for bullets in background:
bullets.render()
'''
player.render()
for abullet in alienbullet:
abullet.render()
for bullets in bullet:
bullets.render()
for aliens in alien:
aliens.render()
pygame.display.update()
clock.tick(FPS)
### End Game Loop
if win == 'Alien':
alien_win += 1
else:
player_win += 1
gameendtime = time.time()
totalgametime = gameendtime - gamestarttime
gameover_screen(totalgametime,alien_win,player_win)
|
the-stack_106_30382 | #!/usr/bin/env python3.4
####################################################################
# KPS_PlotPoly.pyw
# KPS
#
# Author: Kareem Omar
# [email protected]
# https://github.com/komrad36
#
# Last updated Feb 27, 2016
# This application is entirely my own work.
####################################################################
#
# Plots polygons of generated polygon file in 3-D to
# examine and verify correctness.
#
font_size = 21
maximize_plot = True
wireframe = False
poly_color = 'blue'
num_vtx = 4
face_alpha = 0.75
# maximize plots if desired, on any backend
def maximizePlot():
try:
mng = plt.get_current_fig_manager()
backend = plt.get_backend()
if backend == 'TkAgg':
try:
mng.window.state('zoomed')
except:
mng.resize(*mng.window.maxsize())
elif backend == 'wxAgg':
mng.frame.Maximize(True)
elif backend[:2].upper() == 'QT':
mng.window.showMaximized()
else:
return False
return True
except:
return False
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from mpl_toolkits.mplot3d import Axes3D
import sys
if len(sys.argv) != 2:
print('\nUsage: KPS_PlotPoly.py <polygon_file>\n')
else:
name = sys.argv[1]
short_name = name[max(name.rfind('/'), name.rfind('\\')) + 1:]
try:
f = open(name)
except:
print('Failed to open ' + name + '. Aborting.')
plt.rcParams['font.size'] = font_size
fig = plt.figure('KPS - ' + short_name)
ax = fig.gca(projection='3d', aspect='equal')
# load polygons
vtx = [[float(x) for x in line.rstrip('\n').split(',')] for line in f if len(line) > 4 and line[0] != '#']
x, y, z = list(map(list, zip(*vtx)))
poly = [vtx[i:i + num_vtx] for i in range(0, len(vtx), num_vtx)]
ax.set_xlabel(r'$x_{body} [m]$')
ax.set_ylabel(r'$y_{body} [m]$')
ax.set_zlabel(r'$z_{body} [m]$')
x_min = min(x)
x_max = max(x)
x_center = 0.5 * (x_max + x_min)
y_min = min(y)
y_max = max(y)
y_center = 0.5 * (y_max + y_min)
z_min = min(z)
z_max = max(z)
z_center = 0.5 * (z_max + z_min)
total_min = min([x_min, y_min, z_min])
total_max = max([x_max, y_max, z_max])
half_span = 0.5 * (total_max - total_min)
ax.set_xlim3d(x_center - half_span, x_center + half_span)
ax.set_ylim3d(y_center - half_span, y_center + half_span)
ax.set_zlim3d(z_center - half_span, z_center + half_span)
if wireframe:
for i in range(0, len(z), num_vtx):
ax.plot(x[i:i + num_vtx] + [x[i]], y[i:i + num_vtx] + [y[i]], z[i:i + num_vtx] + [z[i]], color=poly_color)
else:
ax.add_collection3d(Poly3DCollection(poly, alpha=face_alpha, edgecolor='k', color=poly_color))
ax.plot([x_min, x_max], [0, 0], [0, 0], color='k', alpha=0.35)
ax.plot([0, 0], [y_min, y_max], [0, 0], color='k', alpha=0.35)
ax.plot([0, 0], [0, 0], [z_min, z_max], color='k', alpha=0.35)
fig.tight_layout()
if maximize_plot:
maximizePlot()
fig.canvas.draw()
plt.show()
|
the-stack_106_30385 | # Copyright 2012 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011,2012 Akira YOSHIYAMA <[email protected]>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This source code is based ./auth_token.py and ./ec2_token.py.
# See them for their copyright.
"""
-------------------
S3 Token Middleware
-------------------
s3token middleware is for authentication with s3api + keystone.
This middleware:
* Gets a request from the s3api middleware with an S3 Authorization
access key.
* Validates s3 token with Keystone.
* Transforms the account name to AUTH_%(tenant_name).
* Optionally can retrieve and cache secret from keystone
to validate signature locally
.. note::
If upgrading from swift3, the ``auth_version`` config option has been
removed, and the ``auth_uri`` option now includes the Keystone API
version. If you previously had a configuration like
.. code-block:: ini
[filter:s3token]
use = egg:swift3#s3token
auth_uri = https://keystonehost:35357
auth_version = 3
you should now use
.. code-block:: ini
[filter:s3token]
use = egg:swift#s3token
auth_uri = https://keystonehost:35357/v3
"""
import base64
import json
from keystoneclient.v3 import client as keystone_client
from keystoneauth1 import session as keystone_session
from keystoneauth1 import loading as keystone_loading
import requests
import six
from six.moves import urllib
from swift.common.swob import Request, HTTPBadRequest, HTTPUnauthorized, \
HTTPException
from swift.common.utils import config_true_value, split_path, get_logger, \
cache_from_env, append_underscore
from swift.common.wsgi import ConfigFileError
PROTOCOL_NAME = 'S3 Token Authentication'
# Headers to purge if they came from (or may have come from) the client
KEYSTONE_AUTH_HEADERS = (
'X-Identity-Status', 'X-Service-Identity-Status',
'X-Domain-Id', 'X-Service-Domain-Id',
'X-Domain-Name', 'X-Service-Domain-Name',
'X-Project-Id', 'X-Service-Project-Id',
'X-Project-Name', 'X-Service-Project-Name',
'X-Project-Domain-Id', 'X-Service-Project-Domain-Id',
'X-Project-Domain-Name', 'X-Service-Project-Domain-Name',
'X-User-Id', 'X-Service-User-Id',
'X-User-Name', 'X-Service-User-Name',
'X-User-Domain-Id', 'X-Service-User-Domain-Id',
'X-User-Domain-Name', 'X-Service-User-Domain-Name',
'X-Roles', 'X-Service-Roles',
'X-Is-Admin-Project',
'X-Service-Catalog',
# Deprecated headers, too...
'X-Tenant-Id',
'X-Tenant-Name',
'X-Tenant',
'X-User',
'X-Role',
)
def parse_v2_response(token):
access_info = token['access']
headers = {
'X-Identity-Status': 'Confirmed',
'X-Roles': ','.join(r['name']
for r in access_info['user']['roles']),
'X-User-Id': access_info['user']['id'],
'X-User-Name': access_info['user']['name'],
'X-Tenant-Id': access_info['token']['tenant']['id'],
'X-Tenant-Name': access_info['token']['tenant']['name'],
'X-Project-Id': access_info['token']['tenant']['id'],
'X-Project-Name': access_info['token']['tenant']['name'],
}
return headers, access_info['token']['tenant']
def parse_v3_response(token):
token = token['token']
headers = {
'X-Identity-Status': 'Confirmed',
'X-Roles': ','.join(r['name']
for r in token['roles']),
'X-User-Id': token['user']['id'],
'X-User-Name': token['user']['name'],
'X-User-Domain-Id': token['user']['domain']['id'],
'X-User-Domain-Name': token['user']['domain']['name'],
'X-Tenant-Id': token['project']['id'],
'X-Tenant-Name': token['project']['name'],
'X-Project-Id': token['project']['id'],
'X-Project-Name': token['project']['name'],
'X-Project-Domain-Id': token['project']['domain']['id'],
'X-Project-Domain-Name': token['project']['domain']['name'],
}
return headers, token['project']
class S3Token(object):
"""Middleware that handles S3 authentication."""
def __init__(self, app, conf):
"""Common initialization code."""
self._app = app
self._logger = get_logger(
conf, log_route=conf.get('log_name', 's3token'))
self._logger.debug('Starting the %s component', PROTOCOL_NAME)
self._timeout = float(conf.get('http_timeout', '10.0'))
if not (0 < self._timeout <= 60):
raise ValueError('http_timeout must be between 0 and 60 seconds')
self._reseller_prefix = append_underscore(
conf.get('reseller_prefix', 'AUTH'))
self._delay_auth_decision = config_true_value(
conf.get('delay_auth_decision'))
# where to find the auth service (we use this to validate tokens)
self._request_uri = conf.get('auth_uri', '').rstrip('/') + '/s3tokens'
parsed = urllib.parse.urlsplit(self._request_uri)
if not parsed.scheme or not parsed.hostname:
raise ConfigFileError(
'Invalid auth_uri; must include scheme and host')
if parsed.scheme not in ('http', 'https'):
raise ConfigFileError(
'Invalid auth_uri; scheme must be http or https')
if parsed.query or parsed.fragment or '@' in parsed.netloc:
raise ConfigFileError('Invalid auth_uri; must not include '
'username, query, or fragment')
# SSL
insecure = config_true_value(conf.get('insecure'))
cert_file = conf.get('certfile')
key_file = conf.get('keyfile')
if insecure:
self._verify = False
elif cert_file and key_file:
self._verify = (cert_file, key_file)
elif cert_file:
self._verify = cert_file
else:
self._verify = None
self._secret_cache_duration = int(conf.get('secret_cache_duration', 0))
if self._secret_cache_duration > 0:
try:
auth_plugin = keystone_loading.get_plugin_loader(
conf.get('auth_type'))
available_auth_options = auth_plugin.get_options()
auth_options = {}
for option in available_auth_options:
name = option.name.replace('-', '_')
value = conf.get(name)
if value:
auth_options[name] = value
auth = auth_plugin.load_from_options(**auth_options)
session = keystone_session.Session(auth=auth)
self.keystoneclient = keystone_client.Client(session=session)
self._logger.info("Caching s3tokens for %s seconds",
self._secret_cache_duration)
except Exception:
self._logger.warning("Unable to load keystone auth_plugin. "
"Secret caching will be unavailable.",
exc_info=True)
self.keystoneclient = None
self._secret_cache_duration = 0
def _deny_request(self, code):
error_cls, message = {
'AccessDenied': (HTTPUnauthorized, 'Access denied'),
'InvalidURI': (HTTPBadRequest,
'Could not parse the specified URI'),
}[code]
resp = error_cls(content_type='text/xml')
error_msg = ('<?xml version="1.0" encoding="UTF-8"?>\r\n'
'<Error>\r\n <Code>%s</Code>\r\n '
'<Message>%s</Message>\r\n</Error>\r\n' %
(code, message))
if six.PY3:
error_msg = error_msg.encode()
resp.body = error_msg
return resp
def _json_request(self, creds_json):
headers = {'Content-Type': 'application/json'}
try:
response = requests.post(self._request_uri,
headers=headers, data=creds_json,
verify=self._verify,
timeout=self._timeout)
except requests.exceptions.RequestException as e:
self._logger.info('HTTP connection exception: %s', e)
raise self._deny_request('InvalidURI')
if response.status_code < 200 or response.status_code >= 300:
self._logger.debug('Keystone reply error: status=%s reason=%s',
response.status_code, response.reason)
raise self._deny_request('AccessDenied')
return response
def __call__(self, environ, start_response):
"""Handle incoming request. authenticate and send downstream."""
req = Request(environ)
self._logger.debug('Calling S3Token middleware.')
# Always drop auth headers if we're first in the pipeline
if 'keystone.token_info' not in req.environ:
req.headers.update({h: None for h in KEYSTONE_AUTH_HEADERS})
try:
parts = split_path(urllib.parse.unquote(req.path), 1, 4, True)
version, account, container, obj = parts
except ValueError:
msg = 'Not a path query: %s, skipping.' % req.path
self._logger.debug(msg)
return self._app(environ, start_response)
# Read request signature and access id.
s3_auth_details = req.environ.get('s3api.auth_details')
if not s3_auth_details:
msg = 'No authorization details from s3api. skipping.'
self._logger.debug(msg)
return self._app(environ, start_response)
access = s3_auth_details['access_key']
if isinstance(access, six.binary_type):
access = access.decode('utf-8')
signature = s3_auth_details['signature']
if isinstance(signature, six.binary_type):
signature = signature.decode('utf-8')
string_to_sign = s3_auth_details['string_to_sign']
if isinstance(string_to_sign, six.text_type):
string_to_sign = string_to_sign.encode('utf-8')
token = base64.urlsafe_b64encode(string_to_sign)
if isinstance(token, six.binary_type):
token = token.decode('ascii')
# NOTE(chmou): This is to handle the special case with nova
# when we have the option s3_affix_tenant. We will force it to
# connect to another account than the one
# authenticated. Before people start getting worried about
# security, I should point that we are connecting with
# username/token specified by the user but instead of
# connecting to its own account we will force it to go to an
# another account. In a normal scenario if that user don't
# have the reseller right it will just fail but since the
# reseller account can connect to every account it is allowed
# by the swift_auth middleware.
force_tenant = None
if ':' in access:
access, force_tenant = access.split(':')
# Authenticate request.
creds = {'credentials': {'access': access,
'token': token,
'signature': signature}}
memcache_client = None
memcache_token_key = 's3secret/%s' % access
if self._secret_cache_duration > 0:
memcache_client = cache_from_env(environ)
cached_auth_data = None
if memcache_client:
cached_auth_data = memcache_client.get(memcache_token_key)
if cached_auth_data:
if len(cached_auth_data) == 4:
# Old versions of swift may have cached token, too,
# but we don't need it
headers, _token, tenant, secret = cached_auth_data
else:
headers, tenant, secret = cached_auth_data
if s3_auth_details['check_signature'](secret):
self._logger.debug("Cached creds valid")
else:
self._logger.debug("Cached creds invalid")
cached_auth_data = None
if not cached_auth_data:
creds_json = json.dumps(creds)
self._logger.debug('Connecting to Keystone sending this JSON: %s',
creds_json)
# NOTE(vish): We could save a call to keystone by having
# keystone return token, tenant, user, and roles
# from this call.
#
# NOTE(chmou): We still have the same problem we would need to
# change token_auth to detect if we already
# identified and not doing a second query and just
# pass it through to swiftauth in this case.
try:
# NB: requests.Response, not swob.Response
resp = self._json_request(creds_json)
except HTTPException as e_resp:
if self._delay_auth_decision:
msg = ('Received error, deferring rejection based on '
'error: %s')
self._logger.debug(msg, e_resp.status)
return self._app(environ, start_response)
else:
msg = 'Received error, rejecting request with error: %s'
self._logger.debug(msg, e_resp.status)
# NB: swob.Response, not requests.Response
return e_resp(environ, start_response)
self._logger.debug('Keystone Reply: Status: %d, Output: %s',
resp.status_code, resp.content)
try:
token = resp.json()
if 'access' in token:
headers, tenant = parse_v2_response(token)
elif 'token' in token:
headers, tenant = parse_v3_response(token)
else:
raise ValueError
if memcache_client:
user_id = headers.get('X-User-Id')
if not user_id:
raise ValueError
try:
cred_ref = self.keystoneclient.ec2.get(
user_id=user_id,
access=access)
memcache_client.set(
memcache_token_key,
(headers, tenant, cred_ref.secret),
time=self._secret_cache_duration)
self._logger.debug("Cached keystone credentials")
except Exception:
self._logger.warning("Unable to cache secret",
exc_info=True)
# Populate the environment similar to auth_token,
# so we don't have to contact Keystone again.
#
# Note that although the strings are unicode following json
# deserialization, Swift's HeaderEnvironProxy handles ensuring
# they're stored as native strings
req.environ['keystone.token_info'] = token
except (ValueError, KeyError, TypeError):
if self._delay_auth_decision:
error = ('Error on keystone reply: %d %s - '
'deferring rejection downstream')
self._logger.debug(error, resp.status_code, resp.content)
return self._app(environ, start_response)
else:
error = ('Error on keystone reply: %d %s - '
'rejecting request')
self._logger.debug(error, resp.status_code, resp.content)
return self._deny_request('InvalidURI')(
environ, start_response)
req.headers.update(headers)
tenant_to_connect = force_tenant or tenant['id']
if six.PY2 and isinstance(tenant_to_connect, six.text_type):
tenant_to_connect = tenant_to_connect.encode('utf-8')
self._logger.debug('Connecting with tenant: %s', tenant_to_connect)
new_tenant_name = '%s%s' % (self._reseller_prefix, tenant_to_connect)
environ['PATH_INFO'] = environ['PATH_INFO'].replace(account,
new_tenant_name)
return self._app(environ, start_response)
def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
def auth_filter(app):
return S3Token(app, conf)
return auth_filter
|
the-stack_106_30386 | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=arguments-differ
"""A set of cluster jobs for Aer."""
from typing import List, Optional, Union, Tuple, Iterable
import time
import logging
import copy
import datetime
import uuid
from qiskit.circuit import QuantumCircuit
from qiskit.pulse import Schedule
from qiskit.qobj import QasmQobj
from qiskit.providers import JobV1 as Job
from qiskit.providers import JobStatus, JobError
from qiskit.result import Result
from .utils import DEFAULT_EXECUTOR, requires_submit
from .aerjob import AerJob
logger = logging.getLogger(__name__)
class AerJobSet(Job):
"""A set of cluster jobs.
An instance of this class is returned when you submit experiments with
executor option.
It provides methods that allow you to interact
with the jobs as a single entity. For example, you can retrieve the results
for all of the jobs using :meth:`result()` and cancel all jobs using
:meth:`cancel()`.
"""
def __init__(self, backend, job_id, func, experiments: List[QasmQobj], executor=None):
"""AerJobSet constructor.
Args:
backend(Aerbackend): Aerbackend.
job_id(int): Job Id.
func(fun): Callabled function.
experiments(List[QasmQobj]): List[QasmQobjs] to execute.
executor(ThreadPoolExecutor or dask.distributed.client): The executor
to be used to submit the job.
"""
super().__init__(backend, job_id)
self._experiments = experiments
# Used for caching
self._future = None
self._futures = []
self._results = None
self._fn = func
self._executor = executor or DEFAULT_EXECUTOR
self._start_time = None
self._end_time = None
def submit(self):
"""Execute this set of jobs on an executor.
Raises:
RuntimeError: If the jobs were already submitted.
"""
if self._futures:
raise RuntimeError(
'The jobs for this managed job set have already been submitted.')
self._future = True
self._start_time = datetime.datetime.now()
for i, exp in enumerate(self._experiments):
job_id = str(uuid.uuid4())
logger.debug("Job %s submitted", i + 1)
aer_job = AerJob(self._backend, job_id, self._fn, exp, self._executor)
aer_job.submit()
aer_job._future.add_done_callback(self._set_end_time)
self._futures.append(aer_job)
@requires_submit
def status(self, worker: Union[None, int, Iterable[int]]
) -> Union[JobStatus, List[JobStatus]]:
"""Return the status of each job in this set.
Args
worker: Worker id. When None, all workers' statuses are returned.
Returns:
A list of job statuses.
"""
if isinstance(worker, int):
aer_job = self._futures[worker]
return aer_job.satus()
elif isinstance(worker, Iterable):
job_list = []
for worker_id in worker:
aer_job = self._futures[worker_id]
job_list.append(aer_job.status())
return job_list
else:
return [aer.status() for aer in self._futures]
@requires_submit
def result(self,
timeout: Optional[float] = None,
) -> Result:
"""Return the results of the jobs as a single Result object.
This call will block until all job results become available or
the timeout is reached.
Args:
timeout: Number of seconds to wait for job results.
Returns:
qiskit.Result: Result object
Raises:
JobError: if unable to retrieve all job results before the
specified timeout.
"""
res = self.worker_results(worker=None, timeout=timeout)
return self._combine_results(res)
@requires_submit
def worker_results(self,
worker: Union[None, int, Iterable[int]],
timeout: Optional[float] = None,
) -> Union[Result, List[Result]]:
"""Return the result of the jobs specified with worker_id.
When the worker is None, this call return all worker's result.
Args:
worker: Worker id to wait for job result.
timeout: Number of seconds to wait for job results.
Returns:
qiskit.Result: Result object
instance that can be used to retrieve results
for individual experiments.
Raises:
JobError: if unable to retrieve all job results before the
specified timeout.
"""
# We'd like to use futures.as_completed or futures.wait
# however this excludes the use of dask as executor
# because dask's futures are not ~exactly~ the same.
res = []
if isinstance(worker, int):
return self._get_worker_result(worker, timeout)
elif isinstance(worker, Iterable):
for worker_id in worker:
res.append(self._get_worker_result(worker_id, timeout))
return res
else:
for worker_id in range(len(self._futures)):
res.append(self._get_worker_result(worker_id, timeout))
return res
def _get_worker_result(self, worker: int, timeout: Optional[float] = None):
"""Return the result of the jobs specified with worker_id.
this call return all worker's result specified worker and
block until job result become available or the timeout is reached.
Analogous to dask.client.gather()
Args:
worker: Worker id to wait for job result.
timeout: Number of seconds to wait for job results.
Returns:
qiskit.Result: Result object
instance that can be used to retrieve a result.
Raises:
JobError: if unable to retrieve all job results before the
specified timeout.
"""
start_time = time.time()
original_timeout = timeout
aer_job = self._futures[worker]
try:
result = aer_job.result(timeout=timeout)
if result is None or not result.success:
if result:
logger.warning('ClusterJob %s Error: %s', aer_job.name(), result.header)
else:
logger.warning('ClusterJob %s did not return a result', aer_job.name())
except JobError:
raise JobError(
'Timeout while waiting for the results of experiment {}'.format(
aer_job.name()))
if timeout:
timeout = original_timeout - (time.time() - start_time)
if timeout <= 0:
raise JobError(
"Timeout while waiting for JobSet results")
return result
def _combine_results(self,
results: List[Union[Result, None]] = None
) -> Result:
"""Combine results from all jobs into a single `Result`.
Note:
Since the order of the results must match the order of the initial
experiments, job results can only be combined if all jobs succeeded.
Args:
results: Result will be combined.
Returns:
A :class:`~qiskit.result.Result` object that contains results from
all jobs.
Raises:
JobError: If results cannot be combined because some jobs failed.
"""
if not results:
raise JobError(
"Results cannot be combined - no results.")
# find first non-null result and copy it's config
_result = next((r for r in results if r is not None), None)
if _result:
combined_result = copy.deepcopy(_result)
combined_result.results = []
else:
raise JobError(
"Results cannot be combined - no results.")
for each_result in results:
if each_result is not None:
combined_result.results.extend(each_result.results)
combined_result_dict = combined_result.to_dict()
if self._end_time is None:
self._end_time = datetime.datetime.now()
if self._start_time:
_time_taken = self._end_time - self._start_time
combined_result_dict["time_taken"] = _time_taken.total_seconds()
else:
combined_result_dict["time_taken"] = 0
combined_result_dict["date"] = datetime.datetime.isoformat(self._end_time)
combined_result = Result.from_dict(combined_result_dict)
return combined_result
@requires_submit
def cancel(self) -> None:
"""Cancel all jobs in this job set."""
for aer_job in self._futures:
aer_job.cancel()
@requires_submit
def job(self, experiment: Union[str, QuantumCircuit, Schedule]) -> Tuple[AerJob, int]:
"""Retrieve the job used to submit the specified experiment and its index.
Args:
experiment: Retrieve the job used to submit this experiment. Several
types are accepted for convenience:
* str: The name of the experiment.
* QuantumCircuit: The name of the circuit instance will be used.
* Schedule: The name of the schedule instance will be used.
Returns:
A tuple of the job used to submit the experiment and the experiment index.
Raises:
JobError: If the job for the experiment could not be found.
"""
worker_index = self.worker(experiment)
return self.worker_job(worker_index)
@requires_submit
def worker(self,
experiment: Union[str, QuantumCircuit, Schedule]
) -> Union[int, List[int]]:
"""Retrieve the index of job.
Args:
experiment: Retrieve the job used to submit this experiment. Several
types are accepted for convenience:
* str: The name of the experiment.
* QuantumCircuit: The name of the circuit instance will be used.
* Schedule: The name of the schedule instance will be used.
Returns:
list or integer value of the job id
Raises:
JobError: If the job for the experiment could not be found.
"""
if isinstance(experiment, (QuantumCircuit, Schedule)):
experiment = experiment.name
job_list = []
for job in self._futures:
for i, exp in enumerate(job.qobj().experiments):
if hasattr(exp.header, 'name') and exp.header.name == experiment:
job_list.append(i)
if len(job_list) == 1:
return job_list[0]
elif len(job_list) > 1:
return job_list
raise JobError(
'Unable to find the job for experiment {}.'.format(experiment))
@requires_submit
def worker_job(self,
worker: Union[None, int, Iterable[int]]
) -> Union[AerJob, List[AerJob]]:
"""Retrieve the job specified with job's id
Args:
worker: retrive job used to submit with this job id.
Returns:
A list of :class:`~qiskit.providers.aer.AerJob`
instances that represents the submitted jobs.
Raises:
JobError: If the job for the experiment could not be found.
"""
aer_jobs = []
if isinstance(worker, int):
return self._futures[worker]
elif isinstance(worker, Iterable):
for worker_id in worker:
aer_jobs.append(self._futures[worker_id])
return aer_jobs
else:
return self._futures
def _set_end_time(self, future):
"""Set job's end time to calculate "time_taken" value
Args:
future(concurrent.futures or dask.distributed.futures): callback future object
"""
# pylint: disable=unused-argument
self._end_time = datetime.datetime.now()
def executor(self):
"""Return the executor for this job"""
return self._executor
|
the-stack_106_30387 | """PyTorch compatible samplers.
These determine the order of iteration through a dataset.
Authors:
* Aku Rouhe 2020
* Samuele Cornell 2020
* Ralf Leibold 2020
"""
import torch
import logging
from operator import itemgetter
from torch.utils.data import (
RandomSampler,
WeightedRandomSampler,
DistributedSampler,
Sampler,
)
import numpy as np
from typing import List
from speechbrain.dataio.dataset import DynamicItemDataset
logger = logging.getLogger(__name__)
class ReproducibleRandomSampler(RandomSampler):
"""A modification of RandomSampler which always returns the same values.
Also look at `torch.utils.data.RandomSampler`. This has mostly
the same behaviour and arguments, except for adding 'seed' and 'epoch' and
not supporting 'generator'.
Note
----
Call `set_epoch` before every epoch. Otherwise, the sampler will produce the
same sequence of indices every epoch.
Arguments
---------
data_source : Dataset
The data source to sample indices for.
seed : int
The base seed to use for the random number generator. It is recommended
to use a value which has a good mix of 0 and 1 bits.
epoch : int
The epoch to start at.
Example
-------
>>> import torch
>>> from speechbrain.utils.checkpoints import Checkpointer
>>> from speechbrain.dataio.dataloader import SaveableDataLoader
>>> # An example "dataset"
>>> dataset = torch.arange(10).unsqueeze(1)
>>> # Create the random sampler:
>>> sampler = ReproducibleRandomSampler(dataset)
>>> dataloader = SaveableDataLoader(dataset, sampler = sampler,
... num_workers = 3)
>>> # Setup the checkpointer.
>>> # Note that the sampler doesn't need to be saved itself.
>>> tmpdir = getfixture('tmpdir')
>>> checkpointer = Checkpointer(tmpdir, {"dataloader": dataloader})
>>> # Iterate:
>>> subset = []
>>> for i, data_point in enumerate(dataloader):
... # Say you save a checkpoint on the fourth batch:
... if i == 3:
... _ = checkpointer.save_checkpoint(end_of_epoch = False)
... # So let's save the numbers you would get if you continue
... if i >= 4:
... subset.append(data_point.item())
>>> # What if instead you had to restart the experiment?
>>> new_sampler = ReproducibleRandomSampler(dataset)
>>> new_dataloader = SaveableDataLoader(dataset, sampler = new_sampler,
... num_workers = 3)
>>> new_checkpointer = Checkpointer(tmpdir, {"dataloader": new_dataloader})
>>> _ = new_checkpointer.recover_if_possible()
>>> # You'll get the same random order again:
>>> new_subset = [data_point.item() for data_point in new_dataloader]
>>> assert subset == new_subset
"""
def __init__(self, data_source, seed=563375142, epoch=0, **kwargs):
if "generator" in kwargs:
MSG = (
"Cannot give a separate generator when using "
+ "ReproducibleRandomSampler"
)
raise ValueError(MSG)
super().__init__(data_source, **kwargs)
self.seed = int(seed)
self.epoch = epoch
self.generator = torch.Generator()
def set_epoch(self, epoch):
"""
You can also just access self.epoch, but we maintain this interface
to mirror torch.utils.data.distributed.DistributedSampler
"""
self.epoch = epoch
def __iter__(self):
self.generator.manual_seed(self.seed + self.epoch)
return super().__iter__()
class ReproducibleWeightedRandomSampler(WeightedRandomSampler):
"""A reproducible modification of WeightedRandomSampler.
Also look at `torch.utils.data.WeightedRandomSampler`. This has the
the same behaviour and arguments, except for adding 'seed' and 'epoch' and
not supporting 'generator'.
Note
----
Call `set_epoch` before every epoch. Otherwise, the sampler will produce the
same sequence of indices every epoch.
Arguments
---------
weights : sequence of float
Weights for each index. Doesn't need to sum to one.
num_samples : int
Number of samples to draw
replacement : bool
To draw with replacement or not (within an epoch of num_samples).
seed : int
The base seed to use for the random number generator. It is recommended
to use a value which has a good mix of 0 and 1 bits.
epoch : int
The epoch to start at.
Example
-------
>>> a = ReproducibleWeightedRandomSampler([0.1, 0.9, 0.4, 0.7, 3.0, 0.6], 5, replacement=True)
>>> b = ReproducibleWeightedRandomSampler([0.1, 0.9, 0.4, 0.7, 3.0, 0.6], 5, replacement=True)
>>> list(a)
[3, 1, 4, 4, 4]
>>> list(b)
[3, 1, 4, 4, 4]
>>> a.set_epoch(1)
>>> list(a)
[4, 5, 4, 4, 3]
>>> b.set_epoch(1)
>>> list(b)
[4, 5, 4, 4, 3]
"""
def __init__(
self,
weights,
num_samples,
replacement,
seed=129491412,
epoch=0,
**kwargs,
):
if "generator" in kwargs:
MSG = (
"Cannot give a separate generator when using "
+ "ReproducibleRandomSampler"
)
raise ValueError(MSG)
super().__init__(weights, num_samples, replacement, **kwargs)
self.seed = int(seed)
self.epoch = epoch
self.generator = torch.Generator()
def set_epoch(self, epoch):
"""
You can also just access self.epoch, but we maintain this interface
to mirror torch.utils.data.distributed.DistributedSampler
"""
self.epoch = epoch
def __iter__(self):
self.generator.manual_seed(self.seed + self.epoch)
return super().__iter__()
class ConcatDatasetBatchSampler(Sampler):
"""This sampler is built to work with a standard Pytorch ConcatDataset.
It is used to retrieve elements from the different concatenated datasets placing them in the same batch
with proportion specified by batch_sizes, e.g 8, 16 means each batch will
be of 24 elements with the first 8 belonging to the first dataset in ConcatDataset
object and the last 16 to the second.
More than two datasets are supported, in that case you need to provide 3 batch
sizes.
Note
----
Batched are drawn from the datasets till the one with smallest length is exhausted.
Thus number of examples in your training epoch is dictated by the dataset
whose length is the smallest.
Arguments
---------
samplers : int
The base seed to use for the random number generator. It is recommended
to use a value which has a good mix of 0 and 1 bits.
batch_sizes: list
Batch sizes.
epoch : int
The epoch to start at.
Example
-------
>>> import torch
>>> from speechbrain.dataio.sampler import ConcatDatasetBatchSampler, ReproducibleRandomSampler
>>> from speechbrain.dataio.sampler import ReproducibleRandomSampler
>>> from speechbrain.dataio.dataloader import SaveableDataLoader
>>> # example "datasets"
>>> dataset1 = torch.arange(0, 10).unsqueeze(1)
>>> dataset2 = torch.arange(20, 40).unsqueeze(1)
>>> tot_dataset = torch.utils.data.ConcatDataset([dataset1, dataset2])
>>> sampler1 = ReproducibleRandomSampler(dataset1)
>>> sampler2 = ReproducibleRandomSampler(dataset2)
>>> tot_sampler = ConcatDatasetBatchSampler([sampler1, sampler2], [2, 4])
>>> dataloader = SaveableDataLoader(tot_dataset, batch_sampler = tot_sampler,
... num_workers = 3)
>>> for data_point in dataloader:
... assert len(data_point) == 6
... for i in range(2):
... assert data_point[i] in [x for x in range(0, 10)]
... for i in range(2, 4):
... assert data_point[i] in [x for x in range(10, 40)]
"""
def __init__(self, samplers, batch_sizes: (tuple, list), epoch=0) -> None:
if not isinstance(samplers, (list, tuple)):
raise ValueError(
"samplers should be a list or tuple of Pytorch Samplers, "
"but got samplers={}".format(batch_sizes)
)
if not isinstance(batch_sizes, (list, tuple)):
raise ValueError(
"batch_sizes should be a list or tuple of integers, "
"but got batch_sizes={}".format(batch_sizes)
)
if not len(batch_sizes) == len(samplers):
raise ValueError(
"batch_sizes and samplers should be have same length"
)
self.batch_sizes = batch_sizes
self.samplers = samplers
self.offsets = [0] + np.cumsum(
[len(x) for x in self.samplers]
).tolist()[:-1]
self.epoch = epoch
self.set_epoch(self.epoch)
def _iter_one_dataset(self, c_batch_size, c_sampler, c_offset):
batch = []
for idx in c_sampler:
batch.append(c_offset + idx)
if len(batch) == c_batch_size:
yield batch
def set_epoch(self, epoch):
"""You can also just access self.epoch, but we maintain this interface
to mirror ``torch.utils.data.distributed.DistributedSampler``.
"""
if hasattr(self.samplers[0], "epoch"):
for s in self.samplers:
s.set_epoch(epoch)
def __iter__(self):
iterators = [iter(i) for i in self.samplers]
tot_batch = []
for b_num in range(len(self)):
for samp_idx in range(len(self.samplers)):
c_batch = []
while len(c_batch) < self.batch_sizes[samp_idx]:
c_batch.append(
self.offsets[samp_idx] + next(iterators[samp_idx])
)
tot_batch.extend(c_batch)
yield tot_batch
tot_batch = []
def __len__(self):
min_len = float("inf")
for idx, sampler in enumerate(self.samplers):
c_len = len(sampler) // self.batch_sizes[idx]
min_len = min(c_len, min_len)
return min_len
class DynamicBatchSampler(Sampler):
"""This BatchSampler batches examples together by grouping them by their length.
Every example in the batch have approximately the same length and
thus padding is minimized.
This enables faster training on datasets
where length of examples can vary significantly (e.g Librispeech).
Inspired by: https://www.tensorflow.org/api_docs/python/tf/data/experimental/bucket_by_sequence_length
Dynamic batching is performed by specifying a max_batch_length which is the
upper limit for the sum of the length of examples in a batch:
e.g., if ex1 has length 4, ex2 length 5 andn if max_batch_length is set to 6
ex1 and ex2 will be placed, alone, in two distinct batches.
Length for each example can be obtained in two manners.
If the input dataset is a DynamicItemDataset it can be obtained by specifying a
length_func. Default assumes a "duration" entry is in the annotation.
Length for each example can also be passed to this class upon instantiation
by specifying a list containing the length for each example and passing it to
lengths_list.
Examples are grouped together by defining a set of possible discrete intervals
(buckets) multiple of a left_bucket_length.
A bucket_length_multiplier is used to specify the number of possible buckets.
E.g., if max_batch_length = 32 and left_bucket_length = 10, bucket_length_multiplier = 2
there will be 3 buckets: [0, 10), [10, 20), [20, 40).
A common choice would be setting left_bucket_length to approximately the length
of your shortest example in the dataset.
Decreasing bucket_length_multiplier creates more buckets in the whole interval
of [left_bucket_length, max_batch_size]: e.g. if max_batch_length = 32 and left_bucket_length = 10,
bucket_length_multiplier = 1.5 the number of buckets increases to 8.
With right boundaries: [10 12 14 17 21 25 30 36].
Thus examples with length less than 10 are all grouped together but more buckets
are created for longer examples.
Note that the bucket boundary grows exponentially using the multiplier.
The buckets can also be specified by passing a list to the bucket_boundaries
argument instead of specifying a left_bucket_length and a bucket_length_multiplier.
Example
-------
>>> import torch
>>> import speechbrain as sb
>>> from speechbrain.dataio.sampler import DynamicBatchSampler
>>> from speechbrain.dataio.dataset import DynamicItemDataset
>>> from speechbrain.dataio.dataloader import SaveableDataLoader
>>> from speechbrain.dataio.batch import PaddedBatch
>>> import numpy as np
>>> item_lengths = sorted([np.random.randint(10, 100) for x in range(20)])
>>> dataset = {"ex_{}".format(x) : {"wav" :torch.randn(x)} for x in item_lengths}
>>> dataset = DynamicItemDataset(dataset)
>>> dataset.set_output_keys(["wav"])
>>> length_func = lambda x : len(x) # trivial in this example
>>> bsampler = DynamicBatchSampler(dataset, 20, 10, 1.1, length_func, shuffle=False)
>>> dataloader = SaveableDataLoader(dataset, batch_sampler=bsampler, collate_fn=PaddedBatch)
>>> for i, b in enumerate(dataloader):
... data, length = b["wav"]
>>> assert data.shape[-1] == max(item_lengths)
Arguments
---------
dataset : torch.utils.data.Dataset
Pytorch Dataset from which elements will be sampled.
max_batch_length : int
Upper limit for the sum of the length of examples in a batch.
Should be chosen based on your GPU memory.
left_bucket_length : int
Minimum length of a bucket. Specifies resolution of buckets and thus this sampler
stochasticity. A common choice is to set this to length of your
shortest example.
bucket_length_multiplier : float
Multiplier for bucket length, specifies number of buckets from left_bucket_length to
max_batch_length.
length_func : callable
Function used to get length of each example from the dataset.
This argument can be used only when the dataset is a Speechbrain DynamicItemDataset object.
Can be anything: e.g. lambda x: x["duration"]*16000 returns number of samples
if duration key in the annotation is in seconds and the file has 16kHz sampling freq.
shuffle : bool
Whether or not shuffle examples between each epoch.
bucket_boundaries : list
Overrides bucket_length_multiplier and left_bucket_length by specifying manually
the buckets right boundaries.
lengths_list: list
Overrides length_func by passing a list containing the length of each example
in the dataset. This argument must be set when the dataset is a plain
Pytorch Dataset object and not a DynamicItemDataset object as length_func
cannot be used on Pytorch Datasets.
epoch : int
The epoch to start at.
drop_last : bool
If ``True``, the sampler will drop the last examples which
have not been grouped.
"""
def __init__(
self,
dataset,
max_batch_length: int,
left_bucket_length: int,
bucket_length_multiplier: float = 1.1,
length_func=lambda x: x["duration"],
shuffle: bool = True,
bucket_boundaries: List[int] = [],
lengths_list: List[int] = None,
seed: int = 42,
epoch: int = 0,
drop_last: bool = False,
):
self._dataset = dataset
self._ex_lengths = {}
ex_ids = self._dataset.data_ids
if lengths_list is not None:
# take length of examples from this argument and bypass length_key
for indx in range(len(lengths_list)):
self._ex_lengths[str(indx)] = lengths_list[indx]
else:
# use length func
if not isinstance(dataset, DynamicItemDataset):
raise NotImplementedError(
"Dataset should be a Speechbrain DynamicItemDataset when using length function"
)
for indx in range(len(self._dataset)):
self._ex_lengths[str(indx)] = length_func(
self._dataset.data[ex_ids[indx]]
)
if bucket_boundaries is not None:
if not all([x >= 1 for x in bucket_boundaries]):
raise ValueError(
"All elements in bucket boundaries should be >= 1."
)
if not len(set(bucket_boundaries)) == len(bucket_boundaries):
raise ValueError(
"Bucket_boundaries should not contain duplicates."
)
self._bucket_boundaries = np.array(
self._get_data_boundaries(
max_batch_length=max_batch_length,
bucket_boundaries=bucket_boundaries,
left_bucket_length=left_bucket_length,
bucket_length_multiplier=bucket_length_multiplier,
)
)
self._max_batch_length = max_batch_length
self._shuffle = shuffle
self._seed = seed
self._drop_last = drop_last
# Calculate bucket lengths
self._bucket_lens = [
max(1, int(max_batch_length / self._bucket_boundaries[i]))
for i in range(len(self._bucket_boundaries))
] + [1]
self._epoch = epoch
self._generate_batches()
def _get_data_boundaries(
self,
max_batch_length: int,
bucket_boundaries: List[int],
left_bucket_length: int,
bucket_length_multiplier: float,
) -> List[int]:
if not bucket_boundaries:
if left_bucket_length <= 0:
raise ValueError(
"left_bucket_length must be >0 if no bucket_boundaries set"
)
if bucket_length_multiplier < 1.0:
raise ValueError(
"bucket_length_multiplier must be >1.0 if no bucket_boundaries set"
)
bucket_boundaries = {left_bucket_length}
bucket_boundary = float(left_bucket_length)
while True:
bucket_boundary *= bucket_length_multiplier
if bucket_boundary >= max_batch_length:
break
bucket_boundaries.add(bucket_boundary)
return list(sorted(bucket_boundaries))
def _generate_batches(self):
logger.info("DynamicBatchSampler: Generating dynamic batches")
if self._shuffle:
# deterministically shuffle based on epoch and seed
g = torch.Generator()
g.manual_seed(self._seed + self._epoch)
sampler = torch.randperm(len(self._dataset), generator=g).tolist() # type: ignore
else:
sampler = range(len(self._dataset)) # type: ignore
self._batches = []
bucket_batches = [[] for i in self._bucket_lens]
bucket_stats = [0 for i in self._bucket_lens]
for idx in sampler:
item_len = self._ex_lengths[str(idx)]
bucket_id = np.searchsorted(self._bucket_boundaries, item_len)
bucket_batches[bucket_id].append(idx)
bucket_stats[bucket_id] += 1
if len(bucket_batches[bucket_id]) >= self._bucket_lens[bucket_id]:
self._batches.append(bucket_batches[bucket_id])
bucket_batches[bucket_id] = []
# Dump remaining batches - we might even want to shuffle those
if not self._drop_last:
for batch in bucket_batches:
if batch:
self._batches.append(batch)
if self._epoch == 0: # only log at first epoch
logger.info(
"DynamicBatchSampler: Created {} batches, {} buckets used.".format(
len(self._batches), len(self._bucket_boundaries)
)
)
boundaries = [0] + self._bucket_boundaries.tolist()
for i in range(len(self._bucket_boundaries)):
logger.info(
"DynamicBatchSampler: Bucket {} with boundary {}-{} and batch_size {} has {} examples.".format(
i,
np.around(boundaries[i], 2),
np.around(boundaries[i + 1], 2),
self._bucket_lens[i],
bucket_stats[i],
)
)
def __iter__(self):
for batch in self._batches:
yield batch
if self._shuffle: # re-generate batches only if shuffling
self._generate_batches()
def set_epoch(self, epoch):
"""
You can also just access self.epoch, but we maintain this interface
to mirror torch.utils.data.distributed.DistributedSampler
"""
self._epoch = epoch
self._generate_batches()
def __len__(self):
return len(self._batches)
# Heavily inspired by Catalyst, which is under Apache 2.0 licence.
# https://github.com/catalyst-team/catalyst/blob/51428d7756e62b9b8ee5379f38e9fd576eeb36e5/catalyst/data/sampler.py#L522
class DistributedSamplerWrapper(DistributedSampler):
"""This wrapper allows using any sampler with Distributed Data Parallel (DDP) correctly.
Passing blindly the sampler to each DDP process will cause to have access
within each process to all the data in the dataset instead of only a subset
of it which is unique to each process. This wrapper prevents this and
allows to use only a subset of the original data for each process.
NOTE
----
This is is automatically applied to any sampler in the Brain class when DDP
training is used.
"""
def __init__(self, sampler, *args, **kwargs):
# DistributedSampler only calls len() on dataset
# so a sampler is fine to pass there, as well.
super().__init__(dataset=sampler, *args, **kwargs)
self.sampler = sampler
def __iter__(self):
# It is easiest to use a random access interface to the wrapped
# sampler's indices, so we just fetch all indices from the wrapped
# sampler
sampler_indices = list(self.sampler.__iter__())
indices_of_indices = super().__iter__()
# Itemgetter fetches the wrapped sampler indices from the positions
# pointed to by DistributedSampler
return iter(itemgetter(*indices_of_indices)(sampler_indices))
def set_epoch(self, epoch):
"""Pass set_epoch() through to DistributedSampler and the wrapper one"""
super().set_epoch(epoch)
if hasattr(self.sampler, "set_epoch"):
self.sampler.set_epoch(epoch)
|
the-stack_106_30388 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common import template_format
from heat.engine import resource
from heat.tests.common import HeatTestCase
from heat.tests import utils
from heat.engine import parser
rds_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "RDS Test",
"Parameters" : {
"KeyName" : {
"Description" : "KeyName",
"Type" : "String",
"Default" : "test"
}
},
"Resources" : {
"DatabaseServer": {
"Type": "AWS::RDS::DBInstance",
"Properties": {
"DBName" : "wordpress",
"Engine" : "MySQL",
"MasterUsername" : "admin",
"DBInstanceClass" : "db.m1.small",
"DBSecurityGroups" : [],
"AllocatedStorage" : "5",
"MasterUserPassword": "admin"
}
}
}
}
'''
class DBInstance(resource.Resource):
"""This is copied from the old DBInstance
to verify the schema of the new TemplateResource.
"""
properties_schema = {
'DBSnapshotIdentifier': {'Type': 'String',
'Implemented': False},
'AllocatedStorage': {'Type': 'String',
'Required': True},
'AvailabilityZone': {'Type': 'String',
'Implemented': False},
'BackupRetentionPeriod': {'Type': 'String',
'Implemented': False},
'DBInstanceClass': {'Type': 'String',
'Required': True},
'DBName': {'Type': 'String',
'Required': False},
'DBParameterGroupName': {'Type': 'String',
'Implemented': False},
'DBSecurityGroups': {'Type': 'List',
'Required': False, 'Default': []},
'DBSubnetGroupName': {'Type': 'String',
'Implemented': False},
'Engine': {'Type': 'String',
'AllowedValues': ['MySQL'],
'Required': True},
'EngineVersion': {'Type': 'String',
'Implemented': False},
'LicenseModel': {'Type': 'String',
'Implemented': False},
'MasterUsername': {'Type': 'String',
'Required': True},
'MasterUserPassword': {'Type': 'String',
'Required': True},
'Port': {'Type': 'String',
'Default': '3306',
'Required': False},
'PreferredBackupWindow': {'Type': 'String',
'Implemented': False},
'PreferredMaintenanceWindow': {'Type': 'String',
'Implemented': False},
'MultiAZ': {'Type': 'Boolean',
'Implemented': False},
}
# We only support a couple of the attributes right now
attributes_schema = {
"Endpoint.Address": "Connection endpoint for the database.",
"Endpoint.Port": ("The port number on which the database accepts "
"connections.")
}
class DBInstanceTest(HeatTestCase):
def setUp(self):
super(DBInstanceTest, self).setUp()
utils.setup_dummy_db()
def test_dbinstance(self):
"""test that the Template is parsable and
publishes the correct properties.
"""
templ = parser.Template(template_format.parse(rds_template))
stack = parser.Stack(utils.dummy_context(), 'test_stack',
templ)
res = stack['DatabaseServer']
self.assertEqual(None, res._validate_against_facade(DBInstance))
|
the-stack_106_30391 | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Copyright (c) 2014-2016 The Syscoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test rpc http basics
#
from test_framework.test_framework import SyscoinTestFramework
from test_framework.util import *
try:
import http.client as httplib
except ImportError:
import httplib
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
class HTTPBasicsTest (SyscoinTestFramework):
def setup_nodes(self):
return start_nodes(4, self.options.tmpdir)
def run_test(self):
#################################################
# lowlevel check for http persistent connection #
#################################################
url = urlparse.urlparse(self.nodes[0].url)
authpair = url.username + ':' + url.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1) #must also response with a correct json-rpc message
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
conn.close()
#same should be if we add keep-alive because this should be the std. behaviour
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection": "keep-alive"}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1) #must also response with a correct json-rpc message
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
conn.close()
#now do the same with "Connection: close"
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection":"close"}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock==None) #now the connection must be closed after the response
#node1 (2nd node) is running with disabled keep-alive option
urlNode1 = urlparse.urlparse(self.nodes[1].url)
authpair = urlNode1.username + ':' + urlNode1.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = httplib.HTTPConnection(urlNode1.hostname, urlNode1.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
#node2 (third node) is running with standard keep-alive parameters which means keep-alive is on
urlNode2 = urlparse.urlparse(self.nodes[2].url)
authpair = urlNode2.username + ':' + urlNode2.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = httplib.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #connection must be closed because syscoind should use keep-alive by default
# Check excessive request size
conn = httplib.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*1000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, httplib.NOT_FOUND)
conn = httplib.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*10000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, httplib.BAD_REQUEST)
if __name__ == '__main__':
HTTPBasicsTest ().main ()
|
the-stack_106_30392 | from __future__ import absolute_import, division, print_function
DISPLAY_WIDTH = 'display_width'
ARITHMETIC_JOIN = 'arithmetic_join'
ENABLE_CFTIMEINDEX = 'enable_cftimeindex'
FILE_CACHE_MAXSIZE = 'file_cache_maxsize'
CMAP_SEQUENTIAL = 'cmap_sequential'
CMAP_DIVERGENT = 'cmap_divergent'
OPTIONS = {
DISPLAY_WIDTH: 80,
ARITHMETIC_JOIN: 'inner',
ENABLE_CFTIMEINDEX: False,
FILE_CACHE_MAXSIZE: 128,
CMAP_SEQUENTIAL: 'viridis',
CMAP_DIVERGENT: 'RdBu_r',
}
_JOIN_OPTIONS = frozenset(['inner', 'outer', 'left', 'right', 'exact'])
def _positive_integer(value):
return isinstance(value, int) and value > 0
_VALIDATORS = {
DISPLAY_WIDTH: _positive_integer,
ARITHMETIC_JOIN: _JOIN_OPTIONS.__contains__,
ENABLE_CFTIMEINDEX: lambda value: isinstance(value, bool),
FILE_CACHE_MAXSIZE: _positive_integer,
}
def _set_file_cache_maxsize(value):
from ..backends.file_manager import FILE_CACHE
FILE_CACHE.maxsize = value
_SETTERS = {
FILE_CACHE_MAXSIZE: _set_file_cache_maxsize,
}
class set_options(object):
"""Set options for xarray in a controlled context.
Currently supported options:
- ``display_width``: maximum display width for ``repr`` on xarray objects.
Default: ``80``.
- ``arithmetic_join``: DataArray/Dataset alignment in binary operations.
Default: ``'inner'``.
- ``enable_cftimeindex``: flag to enable using a ``CFTimeIndex``
for time indexes with non-standard calendars or dates outside the
Timestamp-valid range. Default: ``False``.
- ``file_cache_maxsize``: maximum number of open files to hold in xarray's
global least-recently-usage cached. This should be smaller than your
system's per-process file descriptor limit, e.g., ``ulimit -n`` on Linux.
Default: 128.
- ``cmap_sequential``: colormap to use for nondivergent data plots.
Default: ``viridis``. If string, must be matplotlib built-in colormap.
Can also be a Colormap object (e.g. mpl.cm.magma)
- ``cmap_divergent``: colormap to use for divergent data plots.
Default: ``RdBu_r``. If string, must be matplotlib built-in colormap.
Can also be a Colormap object (e.g. mpl.cm.magma)
f You can use ``set_options`` either as a context manager:
>>> ds = xr.Dataset({'x': np.arange(1000)})
>>> with xr.set_options(display_width=40):
... print(ds)
<xarray.Dataset>
Dimensions: (x: 1000)
Coordinates:
* x (x) int64 0 1 2 3 4 5 6 ...
Data variables:
*empty*
Or to set global options:
>>> xr.set_options(display_width=80)
"""
def __init__(self, **kwargs):
self.old = OPTIONS.copy()
for k, v in kwargs.items():
if k not in OPTIONS:
raise ValueError(
'argument name %r is not in the set of valid options %r'
% (k, set(OPTIONS)))
if k in _VALIDATORS and not _VALIDATORS[k](v):
raise ValueError(
'option %r given an invalid value: %r' % (k, v))
self._apply_update(kwargs)
def _apply_update(self, options_dict):
for k, v in options_dict.items():
if k in _SETTERS:
_SETTERS[k](v)
OPTIONS.update(options_dict)
def __enter__(self):
return
def __exit__(self, type, value, traceback):
OPTIONS.clear()
self._apply_update(self.old)
|
the-stack_106_30393 | import pytest
import os
from selenium import webdriver
from _pytest.runner import runtestprotocol
@pytest.fixture
def driver(request):
sauce_username = os.environ["SAUCE_USERNAME"]
sauce_access_key = os.environ["SAUCE_ACCESS_KEY"]
remote_url = "http://{}:{}@ondemand.saucelabs.com/wd/hub".format(sauce_username, sauce_access_key)
sauceOptions = {
"screenResolution": "1280x768",
"platformName": "Windows 10",
"browserVersion": "61.0",
"seleniumVersion": "3.11.0",
'name': 'Pytest Chrome W3C Sample'
}
chromeOpts = {
'platformName':"Windows 10",
'browserName': "chrome",
'browserVersion': '61.0',
'goog:chromeOptions': {'w3c': True},
'sauce:options': sauceOptions
}
browser = webdriver.Remote(remote_url, desired_capabilities=chromeOpts)
yield browser
browser.quit()
def pytest_runtest_protocol(item, nextitem, driver):
reports = runtestprotocol(item, nextitem=nextitem)
for report in reports:
if report.when == 'call':
driver.execute_script('sauce:job-result={}'.format(report.outcome))
return True
def test_should_open_safari(driver):
driver.get("http://www.saucedemo.com")
actual_title = driver.title
expected_title = "Swag Labs"
assert expected_title == actual_title
|
the-stack_106_30395 | """The tests for the MQTT discovery."""
import asyncio
from unittest.mock import patch
from homeassistant.components.mqtt.discovery import async_start
from tests.common import async_fire_mqtt_message, mock_coro
@asyncio.coroutine
def test_subscribing_config_topic(hass, mqtt_mock):
"""Test setting up discovery."""
hass_config = {}
discovery_topic = 'homeassistant'
yield from async_start(hass, discovery_topic, hass_config)
assert mqtt_mock.async_subscribe.called
call_args = mqtt_mock.async_subscribe.mock_calls[0][1]
assert call_args[0] == discovery_topic + '/#'
assert call_args[1] == 0
@asyncio.coroutine
@patch('homeassistant.components.mqtt.discovery.async_load_platform')
def test_invalid_topic(mock_load_platform, hass, mqtt_mock):
"""Test sending to invalid topic."""
mock_load_platform.return_value = mock_coro()
yield from async_start(hass, 'homeassistant', {})
async_fire_mqtt_message(hass, 'homeassistant/binary_sensor/bla/not_config',
'{}')
yield from hass.async_block_till_done()
assert not mock_load_platform.called
@asyncio.coroutine
@patch('homeassistant.components.mqtt.discovery.async_load_platform')
def test_invalid_json(mock_load_platform, hass, mqtt_mock, caplog):
"""Test sending in invalid JSON."""
mock_load_platform.return_value = mock_coro()
yield from async_start(hass, 'homeassistant', {})
async_fire_mqtt_message(hass, 'homeassistant/binary_sensor/bla/config',
'not json')
yield from hass.async_block_till_done()
assert 'Unable to parse JSON' in caplog.text
assert not mock_load_platform.called
@asyncio.coroutine
@patch('homeassistant.components.mqtt.discovery.async_load_platform')
def test_only_valid_components(mock_load_platform, hass, mqtt_mock, caplog):
"""Test for a valid component."""
mock_load_platform.return_value = mock_coro()
yield from async_start(hass, 'homeassistant', {})
async_fire_mqtt_message(hass, 'homeassistant/climate/bla/config', '{}')
yield from hass.async_block_till_done()
assert 'Component climate is not supported' in caplog.text
assert not mock_load_platform.called
@asyncio.coroutine
def test_correct_config_discovery(hass, mqtt_mock, caplog):
"""Test sending in correct JSON."""
yield from async_start(hass, 'homeassistant', {})
async_fire_mqtt_message(hass, 'homeassistant/binary_sensor/bla/config',
'{ "name": "Beer" }')
yield from hass.async_block_till_done()
state = hass.states.get('binary_sensor.beer')
assert state is not None
assert state.name == 'Beer'
@asyncio.coroutine
def test_non_duplicate_discovery(hass, mqtt_mock, caplog):
"""Test for a non duplicate component."""
yield from async_start(hass, 'homeassistant', {})
async_fire_mqtt_message(hass, 'homeassistant/binary_sensor/bla/config',
'{ "name": "Beer" }')
async_fire_mqtt_message(hass, 'homeassistant/binary_sensor/bla/config',
'{ "name": "Beer" }')
yield from hass.async_block_till_done()
state = hass.states.get('binary_sensor.beer')
state_duplicate = hass.states.get('binary_sensor.beer1')
assert state is not None
assert state.name == 'Beer'
assert state_duplicate is None
assert 'Component has already been discovered: ' \
'binary_sensor bla' in caplog.text
|
the-stack_106_30396 | #!/usr/bin/env python
#
# Copyright 2019 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pygame
from cmath import isclose
from vqe_playground.utils.colors import WHITE, BLACK, LIGHT_GREY
from vqe_playground.utils.fonts import *
class NumberPicker(pygame.sprite.Sprite):
"""Displays a number that may be modified by clicking and dragging"""
def __init__(self, number, width, height, enabled=True):
pygame.sprite.Sprite.__init__(self)
self.number = None
self.width = width
self.height = height
self.enabled = enabled
self.image = None
self.rect = None
self.background_color = WHITE
self.font_color = BLACK
self.font = ARIAL_36
self.set_number(number)
self.draw_number_picker()
# def update(self):
# self.draw_number_picker()
#
def set_number(self, number):
self.number = number
def draw_number_picker(self):
self.image = pygame.Surface([self.width, self.height])
self.image.convert()
self.image.fill(WHITE if self.enabled else LIGHT_GREY)
self.rect = self.image.get_rect()
rectangle = pygame.Rect(0, 0, self.width, self.height)
pygame.draw.rect(self.image, BLACK, rectangle, 1)
if not isclose(self.number, 0):
text_surface = self.font.render(str(self.number), False, BLACK)
text_xpos = (self.rect.width - text_surface.get_rect().width) / 2
text_ypos = (self.rect.height - text_surface.get_rect().height) / 2
self.image.blit(text_surface, (text_xpos, text_ypos))
|
the-stack_106_30397 | import search
from math import(cos, pi)
# # A sample map problem
# sumner_map = search.UndirectedGraph(dict(
# Portland=dict(Mitchellville=7, Fairfield=17, Cottontown=18),
# Cottontown=dict(Portland=18),
# Fairfield=dict(Mitchellville=21, Portland=17),
# Mitchellville=dict(Portland=7, Fairfield=21),
# ))
#
# sumner_puzzle = search.GraphProblem('Cottontown', 'Mitchellville', sumner_map)
#
# sumner_puzzle.label = 'Sumner'
# sumner_puzzle.description = '''
# An abbreviated map of Sumner County, TN.
# This map is unique, to the best of my knowledge.
# '''
ashgabat_map = search.UndirectedGraph(dict(
Kommunizm=dict(Bezmein=10, Bagyr=14, Pilmile=60),
Pewrize=dict(Bagyr=10, Shirvan=100, Faruj=130),
Bagyr=dict(Bezmein=8, Kipchak=9, Pewrize=10, Kommunizm=14),
Bezmein=dict(Bagyr=8, Kipchak=5, Kommunizm=10),
Kipchak=dict(Bezmein=5, Bagyr=9),
Shirvan=dict(Pewrize=100, Bojnourd=50, Faruj=42),
Faruj=dict(Shirvan=42, Pewrize=130, Bojnourd=98),
Bojnourd=dict(Faruj=98, Shirvan=50, Pilmile=50),
Pilmile=dict(Bojnourd=50, Kommunizm=60),
))
ashgabat_puzzle = search.GraphProblem('Bojnourd', 'Kipchak', ashgabat_map)
ashgabat_puzzle.label = 'Ashgabat'
ashgabat_puzzle.description = '''
An abbreviated map of Ashgabat, Turkmenistan.
This map is unique, to the best of my knowledge.
'''
romania_map = search.UndirectedGraph(dict(
A=dict(Z=75,S=140,T=118),
Z=dict(O=71,A=75),
S=dict(O=151,R=80,F=99),
T=dict(A=118,L=111),
O=dict(Z=71,S=151),
L=dict(T=111,M=70),
M=dict(L=70,D=75),
D=dict(M=75,C=120),
R=dict(S=80,C=146,P=97),
C=dict(R=146,P=138,D=120),
F=dict(S=99,B=211),
P=dict(R=97,C=138,B=101),
B=dict(G=90,P=101,F=211),
))
romania_puzzle = search.GraphProblem('A', 'B', romania_map)
romania_puzzle.label = 'Romania'
romania_puzzle.description = '''
The simplified map of Romania, per
Russall & Norvig, 3rd Ed., p. 68.
'''
# A trivial Problem definition
class LightSwitch(search.Problem):
def actions(self, state):
return ['up', 'down']
def result(self, state, action):
if action == 'up':
return 'on'
else:
return 'off'
def goal_test(self, state):
return state == 'on'
def h(self, node):
state = node.state
if self.goal_test(state):
return 0
else:
return 1
SinglesInitState = [[0,0,3], [0,1,4], [1,0,3], [1,1,5]]
class Singles(search.Problem):
def __init__(self, initial):
self.width = 2
self.height = 2
self.initial = initial
def actions(self, state):
return [[0,0, 0], [0,1,1],[1,0,2], [1,1,3]]
# def state2String(self, myState):
# answer = ''
# for x in myState:
# for y in x:
# answer += y + ','
# answer = answer[:-1]
# answer += '|'
# return answer[:-1]
# def string2State(self, myString):
# state = myString.split('|')
# count = 0
# for x in state:
# state[count] = x.split(',')
# count += count
# return state
# def searchAction(self, x, y):
# return
def result(self, state, action):
if action[0]-1 != -1 and state[action[2]-1][2] != 0:
return state
if action[0]+1 != self.width and state[action[2]+1][2] != 0:
return state
if action[1]-1 != -1 and state[action[2]-self.width][2] != 0:
return state
if action[1]+1 != self.height and state[action[2]+self.width][2] != 0:
return state
state[action[2]][2] = 0
return state
def goal_test(self, state):
return state == state
def h(self, node):
state = node.state
if self.goal_test(state):
return 0
else:
return 1
#swiss_puzzle = search.GraphProblem('A', 'Z', sumner_map)
switch_puzzle = LightSwitch('off')
switch_puzzle.label = 'Light Switch'
singles_puzzle = Singles(SinglesInitState)
singles_puzzle.label = 'Singles Puzzle'
mySearches = [
# swiss_puzzle,
ashgabat_puzzle,
romania_puzzle,
switch_puzzle,
singles_puzzle
]
mySearchMethods = [
]
|
the-stack_106_30399 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.mgmt.core import ARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.credentials import TokenCredential
from ._configuration import PolicyClientConfiguration
from .operations import PolicyAssignmentsOperations
from .operations import PolicyDefinitionsOperations
from . import models
class PolicyClient(object):
"""To manage and control access to your resources, you can define customized policies and assign them at a scope.
:ivar policy_assignments: PolicyAssignmentsOperations operations
:vartype policy_assignments: azure.mgmt.resource.policy.v2015_10_01_preview.operations.PolicyAssignmentsOperations
:ivar policy_definitions: PolicyDefinitionsOperations operations
:vartype policy_definitions: azure.mgmt.resource.policy.v2015_10_01_preview.operations.PolicyDefinitionsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
base_url=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
if not base_url:
base_url = 'https://management.azure.com'
self._config = PolicyClientConfiguration(credential, subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.policy_assignments = PolicyAssignmentsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.policy_definitions = PolicyDefinitionsOperations(
self._client, self._config, self._serialize, self._deserialize)
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> PolicyClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
|
the-stack_106_30404 | import speech_recognition as sr
import pyttsx3
# Funções
def mensagem(msg):
print('=' * 30)
print(msg)
print('=' * 30)
def comando_de_Voz(msg):
engine.say(msg)
engine.runAndWait()
engine.stop()
def ajuste_ruido_ambiente(msg):
global audio
r.adjust_for_ambient_noise(fonte)
comando_de_Voz(msg)
audio = r.listen(fonte)
print('Enviando para reconhecimento...')
engine = pyttsx3.init()
r = sr.Recognizer()
mic = sr.Microphone()
audio = 0
mensagem('ASSISTENTE VIRTUAL COM PYTHON')
nome = input('\nQual o seu nome? ')
with mic as fonte:
print('Me chame pelo meu nome!')
ajuste_ruido_ambiente('Me chame pelo meu nome!')
try:
text = r.recognize_google(audio, language="pt-BR")
print(f'{nome}, você disse: {text}')
comando_de_Voz(f'{nome}, você disse: {text}')
text = text.lower()
if text == "python":
print(f'Seja bem-vinda, {nome}')
comando_de_Voz(f'Seja bem-vinda, {nome}')
while True:
mensagem('Em que posso te ajudar? ')
ajuste_ruido_ambiente('Em que posso te ajudar?')
try:
text = r.recognize_google(audio, language="pt-BR")
print(text)
comando_de_Voz(text)
text = text.lower()
if text == 'dispensado':
print('Certo, até mais.')
comando_de_Voz('Certo, até mais.')
break
except:
print('Não entendi o que você falou!')
comando_de_Voz('Não entendi o que você falou!')
else:
print('Você errou o meu nome!')
comando_de_Voz('Você errou o meu nome!')
except:
print('Não entendi o que você falou!')
comando_de_Voz('Não entendi o que você falou!') |
the-stack_106_30407 | from __future__ import division, print_function, absolute_import
import math
import warnings
from collections import namedtuple
import numpy as np
from numpy import (isscalar, r_, log, around, unique, asarray,
zeros, arange, sort, amin, amax, any, atleast_1d,
sqrt, ceil, floor, array, poly1d, compress,
pi, exp, ravel, count_nonzero, sin, cos, arctan2, hypot)
from numpy.testing.decorators import setastest
from scipy._lib.six import string_types
from scipy import optimize
from scipy import special
from . import statlib
from . import stats
from .stats import find_repeats, _contains_nan
from .contingency import chi2_contingency
from . import distributions
from ._distn_infrastructure import rv_generic
__all__ = ['mvsdist',
'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot',
'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot',
'shapiro', 'anderson', 'ansari', 'bartlett', 'levene', 'binom_test',
'fligner', 'mood', 'wilcoxon', 'median_test',
'pdf_fromgamma', 'circmean', 'circvar', 'circstd', 'anderson_ksamp'
]
Mean = namedtuple('Mean', ('statistic', 'minmax'))
Variance = namedtuple('Variance', ('statistic', 'minmax'))
Std_dev = namedtuple('Std_dev', ('statistic', 'minmax'))
def bayes_mvs(data, alpha=0.90):
r"""
Bayesian confidence intervals for the mean, var, and std.
Parameters
----------
data : array_like
Input data, if multi-dimensional it is flattened to 1-D by `bayes_mvs`.
Requires 2 or more data points.
alpha : float, optional
Probability that the returned confidence interval contains
the true parameter.
Returns
-------
mean_cntr, var_cntr, std_cntr : tuple
The three results are for the mean, variance and standard deviation,
respectively. Each result is a tuple of the form::
(center, (lower, upper))
with `center` the mean of the conditional pdf of the value given the
data, and `(lower, upper)` a confidence interval, centered on the
median, containing the estimate to a probability ``alpha``.
See Also
--------
mvsdist
Notes
-----
Each tuple of mean, variance, and standard deviation estimates represent
the (center, (lower, upper)) with center the mean of the conditional pdf
of the value given the data and (lower, upper) is a confidence interval
centered on the median, containing the estimate to a probability
``alpha``.
Converts data to 1-D and assumes all data has the same mean and variance.
Uses Jeffrey's prior for variance and std.
Equivalent to ``tuple((x.mean(), x.interval(alpha)) for x in mvsdist(dat))``
References
----------
T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and
standard-deviation from data", http://scholarsarchive.byu.edu/facpub/278,
2006.
Examples
--------
First a basic example to demonstrate the outputs:
>>> from scipy import stats
>>> data = [6, 9, 12, 7, 8, 8, 13]
>>> mean, var, std = stats.bayes_mvs(data)
>>> mean
Mean(statistic=9.0, minmax=(7.1036502226125329, 10.896349777387467))
>>> var
Variance(statistic=10.0, minmax=(3.176724206..., 24.45910382...))
>>> std
Std_dev(statistic=2.9724954732045084, minmax=(1.7823367265645143, 4.9456146050146295))
Now we generate some normally distributed random data, and get estimates of
mean and standard deviation with 95% confidence intervals for those
estimates:
>>> n_samples = 100000
>>> data = stats.norm.rvs(size=n_samples)
>>> res_mean, res_var, res_std = stats.bayes_mvs(data, alpha=0.95)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.hist(data, bins=100, normed=True, label='Histogram of data')
>>> ax.vlines(res_mean.statistic, 0, 0.5, colors='r', label='Estimated mean')
>>> ax.axvspan(res_mean.minmax[0],res_mean.minmax[1], facecolor='r',
... alpha=0.2, label=r'Estimated mean (95% limits)')
>>> ax.vlines(res_std.statistic, 0, 0.5, colors='g', label='Estimated scale')
>>> ax.axvspan(res_std.minmax[0],res_std.minmax[1], facecolor='g', alpha=0.2,
... label=r'Estimated scale (95% limits)')
>>> ax.legend(fontsize=10)
>>> ax.set_xlim([-4, 4])
>>> ax.set_ylim([0, 0.5])
>>> plt.show()
"""
m, v, s = mvsdist(data)
if alpha >= 1 or alpha <= 0:
raise ValueError("0 < alpha < 1 is required, but alpha=%s was given."
% alpha)
m_res = Mean(m.mean(), m.interval(alpha))
v_res = Variance(v.mean(), v.interval(alpha))
s_res = Std_dev(s.mean(), s.interval(alpha))
return m_res, v_res, s_res
def mvsdist(data):
"""
'Frozen' distributions for mean, variance, and standard deviation of data.
Parameters
----------
data : array_like
Input array. Converted to 1-D using ravel.
Requires 2 or more data-points.
Returns
-------
mdist : "frozen" distribution object
Distribution object representing the mean of the data
vdist : "frozen" distribution object
Distribution object representing the variance of the data
sdist : "frozen" distribution object
Distribution object representing the standard deviation of the data
See Also
--------
bayes_mvs
Notes
-----
The return values from ``bayes_mvs(data)`` is equivalent to
``tuple((x.mean(), x.interval(0.90)) for x in mvsdist(data))``.
In other words, calling ``<dist>.mean()`` and ``<dist>.interval(0.90)``
on the three distribution objects returned from this function will give
the same results that are returned from `bayes_mvs`.
References
----------
T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and
standard-deviation from data", http://scholarsarchive.byu.edu/facpub/278,
2006.
Examples
--------
>>> from scipy import stats
>>> data = [6, 9, 12, 7, 8, 8, 13]
>>> mean, var, std = stats.mvsdist(data)
We now have frozen distribution objects "mean", "var" and "std" that we can
examine:
>>> mean.mean()
9.0
>>> mean.interval(0.95)
(6.6120585482655692, 11.387941451734431)
>>> mean.std()
1.1952286093343936
"""
x = ravel(data)
n = len(x)
if n < 2:
raise ValueError("Need at least 2 data-points.")
xbar = x.mean()
C = x.var()
if n > 1000: # gaussian approximations for large n
mdist = distributions.norm(loc=xbar, scale=math.sqrt(C / n))
sdist = distributions.norm(loc=math.sqrt(C), scale=math.sqrt(C / (2. * n)))
vdist = distributions.norm(loc=C, scale=math.sqrt(2.0 / n) * C)
else:
nm1 = n - 1
fac = n * C / 2.
val = nm1 / 2.
mdist = distributions.t(nm1, loc=xbar, scale=math.sqrt(C / nm1))
sdist = distributions.gengamma(val, -2, scale=math.sqrt(fac))
vdist = distributions.invgamma(val, scale=fac)
return mdist, vdist, sdist
def kstat(data, n=2):
r"""
Return the nth k-statistic (1<=n<=4 so far).
The nth k-statistic k_n is the unique symmetric unbiased estimator of the
nth cumulant kappa_n.
Parameters
----------
data : array_like
Input array. Note that n-D input gets flattened.
n : int, {1, 2, 3, 4}, optional
Default is equal to 2.
Returns
-------
kstat : float
The nth k-statistic.
See Also
--------
kstatvar: Returns an unbiased estimator of the variance of the k-statistic.
moment: Returns the n-th central moment about the mean for a sample.
Notes
-----
For a sample size n, the first few k-statistics are given by:
.. math::
k_{1} = \mu
k_{2} = \frac{n}{n-1} m_{2}
k_{3} = \frac{ n^{2} } {(n-1) (n-2)} m_{3}
k_{4} = \frac{ n^{2} [(n + 1)m_{4} - 3(n - 1) m^2_{2}]} {(n-1) (n-2) (n-3)}
where :math:`\mu` is the sample mean, :math:`m_2` is the sample
variance, and :math:`m_i` is the i-th sample central moment.
References
----------
http://mathworld.wolfram.com/k-Statistic.html
http://mathworld.wolfram.com/Cumulant.html
Examples
--------
>>> from scipy import stats
>>> rndm = np.random.RandomState(1234)
As sample size increases, n-th moment and n-th k-statistic converge to the
same number (although they aren't identical). In the case of the normal
distribution, they converge to zero.
>>> for n in [2, 3, 4, 5, 6, 7]:
... x = rndm.normal(size=10**n)
... m, k = stats.moment(x, 3), stats.kstat(x, 3)
... print("%.3g %.3g %.3g" % (m, k, m-k))
-0.631 -0.651 0.0194
0.0282 0.0283 -8.49e-05
-0.0454 -0.0454 1.36e-05
7.53e-05 7.53e-05 -2.26e-09
0.00166 0.00166 -4.99e-09
-2.88e-06 -2.88e-06 8.63e-13
"""
if n > 4 or n < 1:
raise ValueError("k-statistics only supported for 1<=n<=4")
n = int(n)
S = np.zeros(n + 1, np.float64)
data = ravel(data)
N = data.size
# raise ValueError on empty input
if N == 0:
raise ValueError("Data input must not be empty")
# on nan input, return nan without warning
if np.isnan(np.sum(data)):
return np.nan
for k in range(1, n + 1):
S[k] = np.sum(data**k, axis=0)
if n == 1:
return S[1] * 1.0/N
elif n == 2:
return (N*S[2] - S[1]**2.0) / (N*(N - 1.0))
elif n == 3:
return (2*S[1]**3 - 3*N*S[1]*S[2] + N*N*S[3]) / (N*(N - 1.0)*(N - 2.0))
elif n == 4:
return ((-6*S[1]**4 + 12*N*S[1]**2 * S[2] - 3*N*(N-1.0)*S[2]**2 -
4*N*(N+1)*S[1]*S[3] + N*N*(N+1)*S[4]) /
(N*(N-1.0)*(N-2.0)*(N-3.0)))
else:
raise ValueError("Should not be here.")
def kstatvar(data, n=2):
r"""
Returns an unbiased estimator of the variance of the k-statistic.
See `kstat` for more details of the k-statistic.
Parameters
----------
data : array_like
Input array. Note that n-D input gets flattened.
n : int, {1, 2}, optional
Default is equal to 2.
Returns
-------
kstatvar : float
The nth k-statistic variance.
See Also
--------
kstat: Returns the n-th k-statistic.
moment: Returns the n-th central moment about the mean for a sample.
Notes
-----
The variances of the first few k-statistics are given by:
.. math::
var(k_{1}) = \frac{\kappa^2}{n}
var(k_{2}) = \frac{\kappa^4}{n} + \frac{2\kappa^2_{2}}{n - 1}
var(k_{3}) = \frac{\kappa^6}{n} + \frac{9 \kappa_2 \kappa_4}{n - 1} +
\frac{9 \kappa^2_{3}}{n - 1} +
\frac{6 n \kappa^3_{2}}{(n-1) (n-2)}
var(k_{4}) = \frac{\kappa^8}{n} + \frac{16 \kappa_2 \kappa_6}{n - 1} +
\frac{48 \kappa_{3} \kappa_5}{n - 1} +
\frac{34 \kappa^2_{4}}{n-1} + \frac{72 n \kappa^2_{2} \kappa_4}{(n - 1) (n - 2)} +
\frac{144 n \kappa_{2} \kappa^2_{3}}{(n - 1) (n - 2)} +
\frac{24 (n + 1) n \kappa^4_{2}}{(n - 1) (n - 2) (n - 3)}
"""
data = ravel(data)
N = len(data)
if n == 1:
return kstat(data, n=2) * 1.0/N
elif n == 2:
k2 = kstat(data, n=2)
k4 = kstat(data, n=4)
return (2*N*k2**2 + (N-1)*k4) / (N*(N+1))
else:
raise ValueError("Only n=1 or n=2 supported.")
def _calc_uniform_order_statistic_medians(n):
"""
Approximations of uniform order statistic medians.
Parameters
----------
n : int
Sample size.
Returns
-------
v : 1d float array
Approximations of the order statistic medians.
References
----------
.. [1] James J. Filliben, "The Probability Plot Correlation Coefficient
Test for Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
Examples
--------
Order statistics of the uniform distribution on the unit interval
are marginally distributed according to beta distributions.
The expectations of these order statistic are evenly spaced across
the interval, but the distributions are skewed in a way that
pushes the medians slightly towards the endpoints of the unit interval:
>>> n = 4
>>> k = np.arange(1, n+1)
>>> from scipy.stats import beta
>>> a = k
>>> b = n-k+1
>>> beta.mean(a, b)
array([ 0.2, 0.4, 0.6, 0.8])
>>> beta.median(a, b)
array([ 0.15910358, 0.38572757, 0.61427243, 0.84089642])
The Filliben approximation uses the exact medians of the smallest
and greatest order statistics, and the remaining medians are approximated
by points spread evenly across a sub-interval of the unit interval:
>>> from scipy.morestats import _calc_uniform_order_statistic_medians
>>> _calc_uniform_order_statistic_medians(n)
array([ 0.15910358, 0.38545246, 0.61454754, 0.84089642])
This plot shows the skewed distributions of the order statistics
of a sample of size four from a uniform distribution on the unit interval:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0.0, 1.0, num=50, endpoint=True)
>>> pdfs = [beta.pdf(x, a[i], b[i]) for i in range(n)]
>>> plt.figure()
>>> plt.plot(x, pdfs[0], x, pdfs[1], x, pdfs[2], x, pdfs[3])
"""
v = np.zeros(n, dtype=np.float64)
v[-1] = 0.5**(1.0 / n)
v[0] = 1 - v[-1]
i = np.arange(2, n)
v[1:-1] = (i - 0.3175) / (n + 0.365)
return v
def _parse_dist_kw(dist, enforce_subclass=True):
"""Parse `dist` keyword.
Parameters
----------
dist : str or stats.distributions instance.
Several functions take `dist` as a keyword, hence this utility
function.
enforce_subclass : bool, optional
If True (default), `dist` needs to be a
`_distn_infrastructure.rv_generic` instance.
It can sometimes be useful to set this keyword to False, if a function
wants to accept objects that just look somewhat like such an instance
(for example, they have a ``ppf`` method).
"""
if isinstance(dist, rv_generic):
pass
elif isinstance(dist, string_types):
try:
dist = getattr(distributions, dist)
except AttributeError:
raise ValueError("%s is not a valid distribution name" % dist)
elif enforce_subclass:
msg = ("`dist` should be a stats.distributions instance or a string "
"with the name of such a distribution.")
raise ValueError(msg)
return dist
def _add_axis_labels_title(plot, xlabel, ylabel, title):
"""Helper function to add axes labels and a title to stats plots"""
try:
if hasattr(plot, 'set_title'):
# Matplotlib Axes instance or something that looks like it
plot.set_title(title)
plot.set_xlabel(xlabel)
plot.set_ylabel(ylabel)
else:
# matplotlib.pyplot module
plot.title(title)
plot.xlabel(xlabel)
plot.ylabel(ylabel)
except:
# Not an MPL object or something that looks (enough) like it.
# Don't crash on adding labels or title
pass
def probplot(x, sparams=(), dist='norm', fit=True, plot=None, rvalue=False):
"""
Calculate quantiles for a probability plot, and optionally show the plot.
Generates a probability plot of sample data against the quantiles of a
specified theoretical distribution (the normal distribution by default).
`probplot` optionally calculates a best-fit line for the data and plots the
results using Matplotlib or a given plot function.
Parameters
----------
x : array_like
Sample/response data from which `probplot` creates the plot.
sparams : tuple, optional
Distribution-specific shape parameters (shape parameters plus location
and scale).
dist : str or stats.distributions instance, optional
Distribution or distribution function name. The default is 'norm' for a
normal probability plot. Objects that look enough like a
stats.distributions instance (i.e. they have a ``ppf`` method) are also
accepted.
fit : bool, optional
Fit a least-squares regression (best-fit) line to the sample data if
True (default).
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
Returns
-------
(osm, osr) : tuple of ndarrays
Tuple of theoretical quantiles (osm, or order statistic medians) and
ordered responses (osr). `osr` is simply sorted input `x`.
For details on how `osm` is calculated see the Notes section.
(slope, intercept, r) : tuple of floats, optional
Tuple containing the result of the least-squares fit, if that is
performed by `probplot`. `r` is the square root of the coefficient of
determination. If ``fit=False`` and ``plot=None``, this tuple is not
returned.
Notes
-----
Even if `plot` is given, the figure is not shown or saved by `probplot`;
``plt.show()`` or ``plt.savefig('figname.png')`` should be used after
calling `probplot`.
`probplot` generates a probability plot, which should not be confused with
a Q-Q or a P-P plot. Statsmodels has more extensive functionality of this
type, see ``statsmodels.api.ProbPlot``.
The formula used for the theoretical quantiles (horizontal axis of the
probability plot) is Filliben's estimate::
quantiles = dist.ppf(val), for
0.5**(1/n), for i = n
val = (i - 0.3175) / (n + 0.365), for i = 2, ..., n-1
1 - 0.5**(1/n), for i = 1
where ``i`` indicates the i-th ordered value and ``n`` is the total number
of values.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> nsample = 100
>>> np.random.seed(7654321)
A t distribution with small degrees of freedom:
>>> ax1 = plt.subplot(221)
>>> x = stats.t.rvs(3, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A t distribution with larger degrees of freedom:
>>> ax2 = plt.subplot(222)
>>> x = stats.t.rvs(25, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A mixture of two normal distributions with broadcasting:
>>> ax3 = plt.subplot(223)
>>> x = stats.norm.rvs(loc=[0,5], scale=[1,1.5],
... size=(nsample//2,2)).ravel()
>>> res = stats.probplot(x, plot=plt)
A standard normal distribution:
>>> ax4 = plt.subplot(224)
>>> x = stats.norm.rvs(loc=0, scale=1, size=nsample)
>>> res = stats.probplot(x, plot=plt)
Produce a new figure with a loggamma distribution, using the ``dist`` and
``sparams`` keywords:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> x = stats.loggamma.rvs(c=2.5, size=500)
>>> res = stats.probplot(x, dist=stats.loggamma, sparams=(2.5,), plot=ax)
>>> ax.set_title("Probplot for loggamma dist with shape parameter 2.5")
Show the results with Matplotlib:
>>> plt.show()
"""
x = np.asarray(x)
_perform_fit = fit or (plot is not None)
if x.size == 0:
if _perform_fit:
return (x, x), (np.nan, np.nan, 0.0)
else:
return x, x
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
dist = _parse_dist_kw(dist, enforce_subclass=False)
if sparams is None:
sparams = ()
if isscalar(sparams):
sparams = (sparams,)
if not isinstance(sparams, tuple):
sparams = tuple(sparams)
osm = dist.ppf(osm_uniform, *sparams)
osr = sort(x)
if _perform_fit:
# perform a linear least squares fit.
slope, intercept, r, prob, sterrest = stats.linregress(osm, osr)
if plot is not None:
plot.plot(osm, osr, 'bo', osm, slope*osm + intercept, 'r-')
_add_axis_labels_title(plot, xlabel='Theoretical quantiles',
ylabel='Ordered Values',
title='Probability Plot')
# Add R^2 value to the plot as text
if rvalue:
xmin = amin(osm)
xmax = amax(osm)
ymin = amin(x)
ymax = amax(x)
posx = xmin + 0.70 * (xmax - xmin)
posy = ymin + 0.01 * (ymax - ymin)
plot.text(posx, posy, "$R^2=%1.4f$" % r**2)
if fit:
return (osm, osr), (slope, intercept, r)
else:
return osm, osr
def ppcc_max(x, brack=(0.0, 1.0), dist='tukeylambda'):
"""
Calculate the shape parameter that maximizes the PPCC
The probability plot correlation coefficient (PPCC) plot can be used to
determine the optimal shape parameter for a one-parameter family of
distributions. ppcc_max returns the shape parameter that would maximize the
probability plot correlation coefficient for the given data to a
one-parameter family of distributions.
Parameters
----------
x : array_like
Input array.
brack : tuple, optional
Triple (a,b,c) where (a<b<c). If bracket consists of two numbers (a, c)
then they are assumed to be a starting interval for a downhill bracket
search (see `scipy.optimize.brent`).
dist : str or stats.distributions instance, optional
Distribution or distribution function name. Objects that look enough
like a stats.distributions instance (i.e. they have a ``ppf`` method)
are also accepted. The default is ``'tukeylambda'``.
Returns
-------
shape_value : float
The shape parameter at which the probability plot correlation
coefficient reaches its max value.
See also
--------
ppcc_plot, probplot, boxcox
Notes
-----
The brack keyword serves as a starting point which is useful in corner
cases. One can use a plot to obtain a rough visual estimate of the location
for the maximum to start the search near it.
References
----------
.. [1] J.J. Filliben, "The Probability Plot Correlation Coefficient Test for
Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
.. [2] http://www.itl.nist.gov/div898/handbook/eda/section3/ppccplot.htm
Examples
--------
First we generate some random data from a Tukey-Lambda distribution,
with shape parameter -0.7:
>>> from scipy import stats
>>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000,
... random_state=1234567) + 1e4
Now we explore this data with a PPCC plot as well as the related
probability plot and Box-Cox normplot. A red line is drawn where we
expect the PPCC value to be maximal (at the shape parameter -0.7 used
above):
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure(figsize=(8, 6))
>>> ax = fig.add_subplot(111)
>>> res = stats.ppcc_plot(x, -5, 5, plot=ax)
We calculate the value where the shape should reach its maximum and a red
line is drawn there. The line should coincide with the highest point in the
ppcc_plot.
>>> max = stats.ppcc_max(x)
>>> ax.vlines(max, 0, 1, colors='r', label='Expected shape value')
>>> plt.show()
"""
dist = _parse_dist_kw(dist)
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
osr = sort(x)
# this function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation)
# and returns 1-r so that a minimization function maximizes the
# correlation
def tempfunc(shape, mi, yvals, func):
xvals = func(mi, shape)
r, prob = stats.pearsonr(xvals, yvals)
return 1 - r
return optimize.brent(tempfunc, brack=brack, args=(osm_uniform, osr, dist.ppf))
def ppcc_plot(x, a, b, dist='tukeylambda', plot=None, N=80):
"""
Calculate and optionally plot probability plot correlation coefficient.
The probability plot correlation coefficient (PPCC) plot can be used to
determine the optimal shape parameter for a one-parameter family of
distributions. It cannot be used for distributions without shape parameters
(like the normal distribution) or with multiple shape parameters.
By default a Tukey-Lambda distribution (`stats.tukeylambda`) is used. A
Tukey-Lambda PPCC plot interpolates from long-tailed to short-tailed
distributions via an approximately normal one, and is therefore particularly
useful in practice.
Parameters
----------
x : array_like
Input array.
a, b: scalar
Lower and upper bounds of the shape parameter to use.
dist : str or stats.distributions instance, optional
Distribution or distribution function name. Objects that look enough
like a stats.distributions instance (i.e. they have a ``ppf`` method)
are also accepted. The default is ``'tukeylambda'``.
plot : object, optional
If given, plots PPCC against the shape parameter.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
N : int, optional
Number of points on the horizontal axis (equally distributed from
`a` to `b`).
Returns
-------
svals : ndarray
The shape values for which `ppcc` was calculated.
ppcc : ndarray
The calculated probability plot correlation coefficient values.
See also
--------
ppcc_max, probplot, boxcox_normplot, tukeylambda
References
----------
J.J. Filliben, "The Probability Plot Correlation Coefficient Test for
Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
Examples
--------
First we generate some random data from a Tukey-Lambda distribution,
with shape parameter -0.7:
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234567)
>>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
Now we explore this data with a PPCC plot as well as the related
probability plot and Box-Cox normplot. A red line is drawn where we
expect the PPCC value to be maximal (at the shape parameter -0.7 used
above):
>>> fig = plt.figure(figsize=(12, 4))
>>> ax1 = fig.add_subplot(131)
>>> ax2 = fig.add_subplot(132)
>>> ax3 = fig.add_subplot(133)
>>> res = stats.probplot(x, plot=ax1)
>>> res = stats.boxcox_normplot(x, -5, 5, plot=ax2)
>>> res = stats.ppcc_plot(x, -5, 5, plot=ax3)
>>> ax3.vlines(-0.7, 0, 1, colors='r', label='Expected shape value')
>>> plt.show()
"""
if b <= a:
raise ValueError("`b` has to be larger than `a`.")
svals = np.linspace(a, b, num=N)
ppcc = np.empty_like(svals)
for k, sval in enumerate(svals):
_, r2 = probplot(x, sval, dist=dist, fit=True)
ppcc[k] = r2[-1]
if plot is not None:
plot.plot(svals, ppcc, 'x')
_add_axis_labels_title(plot, xlabel='Shape Values',
ylabel='Prob Plot Corr. Coef.',
title='(%s) PPCC Plot' % dist)
return svals, ppcc
def boxcox_llf(lmb, data):
r"""The boxcox log-likelihood function.
Parameters
----------
lmb : scalar
Parameter for Box-Cox transformation. See `boxcox` for details.
data : array_like
Data to calculate Box-Cox log-likelihood for. If `data` is
multi-dimensional, the log-likelihood is calculated along the first
axis.
Returns
-------
llf : float or ndarray
Box-Cox log-likelihood of `data` given `lmb`. A float for 1-D `data`,
an array otherwise.
See Also
--------
boxcox, probplot, boxcox_normplot, boxcox_normmax
Notes
-----
The Box-Cox log-likelihood function is defined here as
.. math::
llf = (\lambda - 1) \sum_i(\log(x_i)) -
N/2 \log(\sum_i (y_i - \bar{y})^2 / N),
where ``y`` is the Box-Cox transformed input data ``x``.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes
>>> np.random.seed(1245)
Generate some random variates and calculate Box-Cox log-likelihood values
for them for a range of ``lmbda`` values:
>>> x = stats.loggamma.rvs(5, loc=10, size=1000)
>>> lmbdas = np.linspace(-2, 10)
>>> llf = np.zeros(lmbdas.shape, dtype=float)
>>> for ii, lmbda in enumerate(lmbdas):
... llf[ii] = stats.boxcox_llf(lmbda, x)
Also find the optimal lmbda value with `boxcox`:
>>> x_most_normal, lmbda_optimal = stats.boxcox(x)
Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a
horizontal line to check that that's really the optimum:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(lmbdas, llf, 'b.-')
>>> ax.axhline(stats.boxcox_llf(lmbda_optimal, x), color='r')
>>> ax.set_xlabel('lmbda parameter')
>>> ax.set_ylabel('Box-Cox log-likelihood')
Now add some probability plots to show that where the log-likelihood is
maximized the data transformed with `boxcox` looks closest to normal:
>>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right'
>>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs):
... xt = stats.boxcox(x, lmbda=lmbda)
... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt)
... ax_inset = inset_axes(ax, width="20%", height="20%", loc=loc)
... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-')
... ax_inset.set_xticklabels([])
... ax_inset.set_yticklabels([])
... ax_inset.set_title('$\lambda=%1.2f$' % lmbda)
>>> plt.show()
"""
data = np.asarray(data)
N = data.shape[0]
if N == 0:
return np.nan
y = boxcox(data, lmb)
y_mean = np.mean(y, axis=0)
llf = (lmb - 1) * np.sum(np.log(data), axis=0)
llf -= N / 2.0 * np.log(np.sum((y - y_mean)**2. / N, axis=0))
return llf
def _boxcox_conf_interval(x, lmax, alpha):
# Need to find the lambda for which
# f(x,lmbda) >= f(x,lmax) - 0.5*chi^2_alpha;1
fac = 0.5 * distributions.chi2.ppf(1 - alpha, 1)
target = boxcox_llf(lmax, x) - fac
def rootfunc(lmbda, data, target):
return boxcox_llf(lmbda, data) - target
# Find positive endpoint of interval in which answer is to be found
newlm = lmax + 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm += 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmplus = optimize.brentq(rootfunc, lmax, newlm, args=(x, target))
# Now find negative interval in the same way
newlm = lmax - 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm -= 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmminus = optimize.brentq(rootfunc, newlm, lmax, args=(x, target))
return lmminus, lmplus
def boxcox(x, lmbda=None, alpha=None):
r"""
Return a positive dataset transformed by a Box-Cox power transformation.
Parameters
----------
x : ndarray
Input array. Should be 1-dimensional.
lmbda : {None, scalar}, optional
If `lmbda` is not None, do the transformation for that value.
If `lmbda` is None, find the lambda that maximizes the log-likelihood
function and return it as the second output argument.
alpha : {None, float}, optional
If ``alpha`` is not None, return the ``100 * (1-alpha)%`` confidence
interval for `lmbda` as the third output argument.
Must be between 0.0 and 1.0.
Returns
-------
boxcox : ndarray
Box-Cox power transformed array.
maxlog : float, optional
If the `lmbda` parameter is None, the second returned argument is
the lambda that maximizes the log-likelihood function.
(min_ci, max_ci) : tuple of float, optional
If `lmbda` parameter is None and ``alpha`` is not None, this returned
tuple of floats represents the minimum and maximum confidence limits
given ``alpha``.
See Also
--------
probplot, boxcox_normplot, boxcox_normmax, boxcox_llf
Notes
-----
The Box-Cox transform is given by::
y = (x**lmbda - 1) / lmbda, for lmbda > 0
log(x), for lmbda = 0
`boxcox` requires the input data to be positive. Sometimes a Box-Cox
transformation provides a shift parameter to achieve this; `boxcox` does
not. Such a shift parameter is equivalent to adding a positive constant to
`x` before calling `boxcox`.
The confidence limits returned when ``alpha`` is provided give the interval
where:
.. math::
llf(\hat{\lambda}) - llf(\lambda) < \frac{1}{2}\chi^2(1 - \alpha, 1),
with ``llf`` the log-likelihood function and :math:`\chi^2` the chi-squared
function.
References
----------
G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal of the
Royal Statistical Society B, 26, 211-252 (1964).
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
We generate some random variates from a non-normal distribution and make a
probability plot for it, to show it is non-normal in the tails:
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(211)
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> prob = stats.probplot(x, dist=stats.norm, plot=ax1)
>>> ax1.set_xlabel('')
>>> ax1.set_title('Probplot against normal distribution')
We now use `boxcox` to transform the data so it's closest to normal:
>>> ax2 = fig.add_subplot(212)
>>> xt, _ = stats.boxcox(x)
>>> prob = stats.probplot(xt, dist=stats.norm, plot=ax2)
>>> ax2.set_title('Probplot after Box-Cox transformation')
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
return x
if any(x <= 0):
raise ValueError("Data must be positive.")
if lmbda is not None: # single transformation
return special.boxcox(x, lmbda)
# If lmbda=None, find the lmbda that maximizes the log-likelihood function.
lmax = boxcox_normmax(x, method='mle')
y = boxcox(x, lmax)
if alpha is None:
return y, lmax
else:
# Find confidence interval
interval = _boxcox_conf_interval(x, lmax, alpha)
return y, lmax, interval
def boxcox_normmax(x, brack=(-2.0, 2.0), method='pearsonr'):
"""Compute optimal Box-Cox transform parameter for input data.
Parameters
----------
x : array_like
Input array.
brack : 2-tuple, optional
The starting interval for a downhill bracket search with
`optimize.brent`. Note that this is in most cases not critical; the
final result is allowed to be outside this bracket.
method : str, optional
The method to determine the optimal transform parameter (`boxcox`
``lmbda`` parameter). Options are:
'pearsonr' (default)
Maximizes the Pearson correlation coefficient between
``y = boxcox(x)`` and the expected values for ``y`` if `x` would be
normally-distributed.
'mle'
Minimizes the log-likelihood `boxcox_llf`. This is the method used
in `boxcox`.
'all'
Use all optimization methods available, and return all results.
Useful to compare different methods.
Returns
-------
maxlog : float or ndarray
The optimal transform parameter found. An array instead of a scalar
for ``method='all'``.
See Also
--------
boxcox, boxcox_llf, boxcox_normplot
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234) # make this example reproducible
Generate some data and determine optimal ``lmbda`` in various ways:
>>> x = stats.loggamma.rvs(5, size=30) + 5
>>> y, lmax_mle = stats.boxcox(x)
>>> lmax_pearsonr = stats.boxcox_normmax(x)
>>> lmax_mle
7.177...
>>> lmax_pearsonr
7.916...
>>> stats.boxcox_normmax(x, method='all')
array([ 7.91667384, 7.17718692])
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> prob = stats.boxcox_normplot(x, -10, 10, plot=ax)
>>> ax.axvline(lmax_mle, color='r')
>>> ax.axvline(lmax_pearsonr, color='g', ls='--')
>>> plt.show()
"""
def _pearsonr(x, brack):
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
xvals = distributions.norm.ppf(osm_uniform)
def _eval_pearsonr(lmbda, xvals, samps):
# This function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation) and
# returns ``1 - r`` so that a minimization function maximizes the
# correlation.
y = boxcox(samps, lmbda)
yvals = np.sort(y)
r, prob = stats.pearsonr(xvals, yvals)
return 1 - r
return optimize.brent(_eval_pearsonr, brack=brack, args=(xvals, x))
def _mle(x, brack):
def _eval_mle(lmb, data):
# function to minimize
return -boxcox_llf(lmb, data)
return optimize.brent(_eval_mle, brack=brack, args=(x,))
def _all(x, brack):
maxlog = np.zeros(2, dtype=float)
maxlog[0] = _pearsonr(x, brack)
maxlog[1] = _mle(x, brack)
return maxlog
methods = {'pearsonr': _pearsonr,
'mle': _mle,
'all': _all}
if method not in methods.keys():
raise ValueError("Method %s not recognized." % method)
optimfunc = methods[method]
return optimfunc(x, brack)
def boxcox_normplot(x, la, lb, plot=None, N=80):
"""Compute parameters for a Box-Cox normality plot, optionally show it.
A Box-Cox normality plot shows graphically what the best transformation
parameter is to use in `boxcox` to obtain a distribution that is close
to normal.
Parameters
----------
x : array_like
Input array.
la, lb : scalar
The lower and upper bounds for the ``lmbda`` values to pass to `boxcox`
for Box-Cox transformations. These are also the limits of the
horizontal axis of the plot if that is generated.
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
N : int, optional
Number of points on the horizontal axis (equally distributed from
`la` to `lb`).
Returns
-------
lmbdas : ndarray
The ``lmbda`` values for which a Box-Cox transform was done.
ppcc : ndarray
Probability Plot Correlelation Coefficient, as obtained from `probplot`
when fitting the Box-Cox transformed input `x` against a normal
distribution.
See Also
--------
probplot, boxcox, boxcox_normmax, boxcox_llf, ppcc_max
Notes
-----
Even if `plot` is given, the figure is not shown or saved by
`boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')``
should be used after calling `probplot`.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Generate some non-normally distributed data, and create a Box-Cox plot:
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> prob = stats.boxcox_normplot(x, -20, 20, plot=ax)
Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in
the same plot:
>>> _, maxlog = stats.boxcox(x)
>>> ax.axvline(maxlog, color='r')
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
return x
if lb <= la:
raise ValueError("`lb` has to be larger than `la`.")
lmbdas = np.linspace(la, lb, num=N)
ppcc = lmbdas * 0.0
for i, val in enumerate(lmbdas):
# Determine for each lmbda the correlation coefficient of transformed x
z = boxcox(x, lmbda=val)
_, r2 = probplot(z, dist='norm', fit=True)
ppcc[i] = r2[-1]
if plot is not None:
plot.plot(lmbdas, ppcc, 'x')
_add_axis_labels_title(plot, xlabel='$\lambda$',
ylabel='Prob Plot Corr. Coef.',
title='Box-Cox Normality Plot')
return lmbdas, ppcc
def shapiro(x, a=None, reta=False):
"""
Perform the Shapiro-Wilk test for normality.
The Shapiro-Wilk test tests the null hypothesis that the
data was drawn from a normal distribution.
Parameters
----------
x : array_like
Array of sample data.
a : array_like, optional
Array of internal parameters used in the calculation. If these
are not given, they will be computed internally. If x has length
n, then a must have length n/2.
reta : bool, optional
Whether or not to return the internally computed a values. The
default is False.
Returns
-------
W : float
The test statistic.
p-value : float
The p-value for the hypothesis test.
a : array_like, optional
If `reta` is True, then these are the internally computed "a"
values that may be passed into this function on future calls.
See Also
--------
anderson : The Anderson-Darling test for normality
kstest : The Kolmogorov-Smirnov test for goodness of fit.
Notes
-----
The algorithm used is described in [4]_ but censoring parameters as
described are not implemented. For N > 5000 the W test statistic is accurate
but the p-value may not be.
The chance of rejecting the null hypothesis when it is true is close to 5%
regardless of sample size.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
.. [2] Shapiro, S. S. & Wilk, M.B (1965). An analysis of variance test for
normality (complete samples), Biometrika, Vol. 52, pp. 591-611.
.. [3] Razali, N. M. & Wah, Y. B. (2011) Power comparisons of Shapiro-Wilk,
Kolmogorov-Smirnov, Lilliefors and Anderson-Darling tests, Journal of
Statistical Modeling and Analytics, Vol. 2, pp. 21-33.
.. [4] ALGORITHM AS R94 APPL. STATIST. (1995) VOL. 44, NO. 4.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
>>> x = stats.norm.rvs(loc=5, scale=3, size=100)
>>> stats.shapiro(x)
(0.9772805571556091, 0.08144091814756393)
"""
if a is not None or reta:
warnings.warn("input parameters 'a' and 'reta' are scheduled to be "
"removed in version 0.18.0", FutureWarning)
x = np.ravel(x)
N = len(x)
if N < 3:
raise ValueError("Data must be at least length 3.")
if a is None:
a = zeros(N, 'f')
init = 0
else:
if len(a) != N // 2:
raise ValueError("len(a) must equal len(x)/2")
init = 1
y = sort(x)
a, w, pw, ifault = statlib.swilk(y, a[:N//2], init)
if ifault not in [0, 2]:
warnings.warn("Input data for shapiro has range zero. The results "
"may not be accurate.")
if N > 5000:
warnings.warn("p-value may not be accurate for N > 5000.")
if reta:
return w, pw, a
else:
return w, pw
# Values from Stephens, M A, "EDF Statistics for Goodness of Fit and
# Some Comparisons", Journal of he American Statistical
# Association, Vol. 69, Issue 347, Sept. 1974, pp 730-737
_Avals_norm = array([0.576, 0.656, 0.787, 0.918, 1.092])
_Avals_expon = array([0.922, 1.078, 1.341, 1.606, 1.957])
# From Stephens, M A, "Goodness of Fit for the Extreme Value Distribution",
# Biometrika, Vol. 64, Issue 3, Dec. 1977, pp 583-588.
_Avals_gumbel = array([0.474, 0.637, 0.757, 0.877, 1.038])
# From Stephens, M A, "Tests of Fit for the Logistic Distribution Based
# on the Empirical Distribution Function.", Biometrika,
# Vol. 66, Issue 3, Dec. 1979, pp 591-595.
_Avals_logistic = array([0.426, 0.563, 0.660, 0.769, 0.906, 1.010])
AndersonResult = namedtuple('AndersonResult', ('statistic',
'critical_values',
'significance_level'))
def anderson(x, dist='norm'):
"""
Anderson-Darling test for data coming from a particular distribution
The Anderson-Darling test is a modification of the Kolmogorov-
Smirnov test `kstest` for the null hypothesis that a sample is
drawn from a population that follows a particular distribution.
For the Anderson-Darling test, the critical values depend on
which distribution is being tested against. This function works
for normal, exponential, logistic, or Gumbel (Extreme Value
Type I) distributions.
Parameters
----------
x : array_like
array of sample data
dist : {'norm','expon','logistic','gumbel','gumbel_l', gumbel_r',
'extreme1'}, optional
the type of distribution to test against. The default is 'norm'
and 'extreme1', 'gumbel_l' and 'gumbel' are synonyms.
Returns
-------
statistic : float
The Anderson-Darling test statistic
critical_values : list
The critical values for this distribution
significance_level : list
The significance levels for the corresponding critical values
in percents. The function returns critical values for a
differing set of significance levels depending on the
distribution that is being tested against.
Notes
-----
Critical values provided are for the following significance levels:
normal/exponenential
15%, 10%, 5%, 2.5%, 1%
logistic
25%, 10%, 5%, 2.5%, 1%, 0.5%
Gumbel
25%, 10%, 5%, 2.5%, 1%
If A2 is larger than these critical values then for the corresponding
significance level, the null hypothesis that the data come from the
chosen distribution can be rejected.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
.. [2] Stephens, M. A. (1974). EDF Statistics for Goodness of Fit and
Some Comparisons, Journal of the American Statistical Association,
Vol. 69, pp. 730-737.
.. [3] Stephens, M. A. (1976). Asymptotic Results for Goodness-of-Fit
Statistics with Unknown Parameters, Annals of Statistics, Vol. 4,
pp. 357-369.
.. [4] Stephens, M. A. (1977). Goodness of Fit for the Extreme Value
Distribution, Biometrika, Vol. 64, pp. 583-588.
.. [5] Stephens, M. A. (1977). Goodness of Fit with Special Reference
to Tests for Exponentiality , Technical Report No. 262,
Department of Statistics, Stanford University, Stanford, CA.
.. [6] Stephens, M. A. (1979). Tests of Fit for the Logistic Distribution
Based on the Empirical Distribution Function, Biometrika, Vol. 66,
pp. 591-595.
"""
if dist not in ['norm', 'expon', 'gumbel', 'gumbel_l',
'gumbel_r', 'extreme1', 'logistic']:
raise ValueError("Invalid distribution; dist must be 'norm', "
"'expon', 'gumbel', 'extreme1' or 'logistic'.")
y = sort(x)
xbar = np.mean(x, axis=0)
N = len(y)
if dist == 'norm':
s = np.std(x, ddof=1, axis=0)
w = (y - xbar) / s
logcdf = distributions.norm.logcdf(w)
logsf = distributions.norm.logsf(w)
sig = array([15, 10, 5, 2.5, 1])
critical = around(_Avals_norm / (1.0 + 4.0/N - 25.0/N/N), 3)
elif dist == 'expon':
w = y / xbar
logcdf = distributions.expon.logcdf(w)
logsf = distributions.expon.logsf(w)
sig = array([15, 10, 5, 2.5, 1])
critical = around(_Avals_expon / (1.0 + 0.6/N), 3)
elif dist == 'logistic':
def rootfunc(ab, xj, N):
a, b = ab
tmp = (xj - a) / b
tmp2 = exp(tmp)
val = [np.sum(1.0/(1+tmp2), axis=0) - 0.5*N,
np.sum(tmp*(1.0-tmp2)/(1+tmp2), axis=0) + N]
return array(val)
sol0 = array([xbar, np.std(x, ddof=1, axis=0)])
sol = optimize.fsolve(rootfunc, sol0, args=(x, N), xtol=1e-5)
w = (y - sol[0]) / sol[1]
logcdf = distributions.logistic.logcdf(w)
logsf = distributions.logistic.logsf(w)
sig = array([25, 10, 5, 2.5, 1, 0.5])
critical = around(_Avals_logistic / (1.0 + 0.25/N), 3)
elif dist == 'gumbel_r':
xbar, s = distributions.gumbel_r.fit(x)
w = (y - xbar) / s
logcdf = distributions.gumbel_r.logcdf(w)
logsf = distributions.gumbel_r.logsf(w)
sig = array([25, 10, 5, 2.5, 1])
critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3)
else: # (dist == 'gumbel') or (dist == 'gumbel_l') or (dist == 'extreme1')
xbar, s = distributions.gumbel_l.fit(x)
w = (y - xbar) / s
logcdf = distributions.gumbel_l.logcdf(w)
logsf = distributions.gumbel_l.logsf(w)
sig = array([25, 10, 5, 2.5, 1])
critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3)
i = arange(1, N + 1)
A2 = -N - np.sum((2*i - 1.0) / N * (logcdf + logsf[::-1]), axis=0)
return AndersonResult(A2, critical, sig)
def _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 7 of Scholz and Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2aKN : float
The A2aKN statistics of Scholz and Stephens 1987.
"""
A2akN = 0.
Z_ssorted_left = Z.searchsorted(Zstar, 'left')
if N == Zstar.size:
lj = 1.
else:
lj = Z.searchsorted(Zstar, 'right') - Z_ssorted_left
Bj = Z_ssorted_left + lj / 2.
for i in arange(0, k):
s = np.sort(samples[i])
s_ssorted_right = s.searchsorted(Zstar, side='right')
Mij = s_ssorted_right.astype(float)
fij = s_ssorted_right - s.searchsorted(Zstar, 'left')
Mij -= fij / 2.
inner = lj / float(N) * (N*Mij - Bj*n[i])**2 / (Bj*(N - Bj) - N*lj/4.)
A2akN += inner.sum() / n[i]
A2akN *= (N - 1.) / N
return A2akN
def _anderson_ksamp_right(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 6 of Scholz & Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2KN : float
The A2KN statistics of Scholz and Stephens 1987.
"""
A2kN = 0.
lj = Z.searchsorted(Zstar[:-1], 'right') - Z.searchsorted(Zstar[:-1],
'left')
Bj = lj.cumsum()
for i in arange(0, k):
s = np.sort(samples[i])
Mij = s.searchsorted(Zstar[:-1], side='right')
inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / (Bj * (N - Bj))
A2kN += inner.sum() / n[i]
return A2kN
Anderson_ksampResult = namedtuple('Anderson_ksampResult',
('statistic', 'critical_values',
'significance_level'))
def anderson_ksamp(samples, midrank=True):
"""The Anderson-Darling test for k-samples.
The k-sample Anderson-Darling test is a modification of the
one-sample Anderson-Darling test. It tests the null hypothesis
that k-samples are drawn from the same population without having
to specify the distribution function of that population. The
critical values depend on the number of samples.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample data in arrays.
midrank : bool, optional
Type of Anderson-Darling test which is computed. Default
(True) is the midrank test applicable to continuous and
discrete populations. If False, the right side empirical
distribution is used.
Returns
-------
statistic : float
Normalized k-sample Anderson-Darling test statistic.
critical_values : array
The critical values for significance levels 25%, 10%, 5%, 2.5%, 1%.
significance_level : float
An approximate significance level at which the null hypothesis for the
provided samples can be rejected.
Raises
------
ValueError
If less than 2 samples are provided, a sample is empty, or no
distinct observations are in the samples.
See Also
--------
ks_2samp : 2 sample Kolmogorov-Smirnov test
anderson : 1 sample Anderson-Darling test
Notes
-----
[1]_ Defines three versions of the k-sample Anderson-Darling test:
one for continuous distributions and two for discrete
distributions, in which ties between samples may occur. The
default of this routine is to compute the version based on the
midrank empirical distribution function. This test is applicable
to continuous and discrete data. If midrank is set to False, the
right side empirical distribution is used for a test for discrete
data. According to [1]_, the two discrete test statistics differ
only slightly if a few collisions due to round-off errors occur in
the test not adjusted for ties between samples.
.. versionadded:: 0.14.0
References
----------
.. [1] Scholz, F. W and Stephens, M. A. (1987), K-Sample
Anderson-Darling Tests, Journal of the American Statistical
Association, Vol. 82, pp. 918-924.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(314159)
The null hypothesis that the two random samples come from the same
distribution can be rejected at the 5% level because the returned
test value is greater than the critical value for 5% (1.961) but
not at the 2.5% level. The interpolation gives an approximate
significance level of 3.1%:
>>> stats.anderson_ksamp([np.random.normal(size=50),
... np.random.normal(loc=0.5, size=30)])
(2.4615796189876105,
array([ 0.325, 1.226, 1.961, 2.718, 3.752]),
0.03134990135800783)
The null hypothesis cannot be rejected for three samples from an
identical distribution. The approximate p-value (87%) has to be
computed by extrapolation and may not be very accurate:
>>> stats.anderson_ksamp([np.random.normal(size=50),
... np.random.normal(size=30), np.random.normal(size=20)])
(-0.73091722665244196,
array([ 0.44925884, 1.3052767 , 1.9434184 , 2.57696569, 3.41634856]),
0.8789283903979661)
"""
k = len(samples)
if (k < 2):
raise ValueError("anderson_ksamp needs at least two samples")
samples = list(map(np.asarray, samples))
Z = np.sort(np.hstack(samples))
N = Z.size
Zstar = np.unique(Z)
if Zstar.size < 2:
raise ValueError("anderson_ksamp needs more than one distinct "
"observation")
n = np.array([sample.size for sample in samples])
if any(n == 0):
raise ValueError("anderson_ksamp encountered sample without "
"observations")
if midrank:
A2kN = _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N)
else:
A2kN = _anderson_ksamp_right(samples, Z, Zstar, k, n, N)
H = (1. / n).sum()
hs_cs = (1. / arange(N - 1, 1, -1)).cumsum()
h = hs_cs[-1] + 1
g = (hs_cs / arange(2, N)).sum()
a = (4*g - 6) * (k - 1) + (10 - 6*g)*H
b = (2*g - 4)*k**2 + 8*h*k + (2*g - 14*h - 4)*H - 8*h + 4*g - 6
c = (6*h + 2*g - 2)*k**2 + (4*h - 4*g + 6)*k + (2*h - 6)*H + 4*h
d = (2*h + 6)*k**2 - 4*h*k
sigmasq = (a*N**3 + b*N**2 + c*N + d) / ((N - 1.) * (N - 2.) * (N - 3.))
m = k - 1
A2 = (A2kN - m) / math.sqrt(sigmasq)
# The b_i values are the interpolation coefficients from Table 2
# of Scholz and Stephens 1987
b0 = np.array([0.675, 1.281, 1.645, 1.96, 2.326])
b1 = np.array([-0.245, 0.25, 0.678, 1.149, 1.822])
b2 = np.array([-0.105, -0.305, -0.362, -0.391, -0.396])
critical = b0 + b1 / math.sqrt(m) + b2 / m
pf = np.polyfit(critical, log(np.array([0.25, 0.1, 0.05, 0.025, 0.01])), 2)
if A2 < critical.min() or A2 > critical.max():
warnings.warn("approximate p-value will be computed by extrapolation")
p = math.exp(np.polyval(pf, A2))
return Anderson_ksampResult(A2, critical, p)
AnsariResult = namedtuple('AnsariResult', ('statistic', 'pvalue'))
def ansari(x, y):
"""
Perform the Ansari-Bradley test for equal scale parameters
The Ansari-Bradley test is a non-parametric test for the equality
of the scale parameter of the distributions from which two
samples were drawn.
Parameters
----------
x, y : array_like
arrays of sample data
Returns
-------
statistic : float
The Ansari-Bradley test statistic
pvalue : float
The p-value of the hypothesis test
See Also
--------
fligner : A non-parametric test for the equality of k variances
mood : A non-parametric test for the equality of two scale parameters
Notes
-----
The p-value given is exact when the sample sizes are both less than
55 and there are no ties, otherwise a normal approximation for the
p-value is used.
References
----------
.. [1] Sprent, Peter and N.C. Smeeton. Applied nonparametric statistical
methods. 3rd ed. Chapman and Hall/CRC. 2001. Section 5.8.2.
"""
x, y = asarray(x), asarray(y)
n = len(x)
m = len(y)
if m < 1:
raise ValueError("Not enough other observations.")
if n < 1:
raise ValueError("Not enough test observations.")
N = m + n
xy = r_[x, y] # combine
rank = stats.rankdata(xy)
symrank = amin(array((rank, N - rank + 1)), 0)
AB = np.sum(symrank[:n], axis=0)
uxy = unique(xy)
repeats = (len(uxy) != len(xy))
exact = ((m < 55) and (n < 55) and not repeats)
if repeats and (m < 55 or n < 55):
warnings.warn("Ties preclude use of exact statistic.")
if exact:
astart, a1, ifault = statlib.gscale(n, m)
ind = AB - astart
total = np.sum(a1, axis=0)
if ind < len(a1)/2.0:
cind = int(ceil(ind))
if ind == cind:
pval = 2.0 * np.sum(a1[:cind+1], axis=0) / total
else:
pval = 2.0 * np.sum(a1[:cind], axis=0) / total
else:
find = int(floor(ind))
if ind == floor(ind):
pval = 2.0 * np.sum(a1[find:], axis=0) / total
else:
pval = 2.0 * np.sum(a1[find+1:], axis=0) / total
return AnsariResult(AB, min(1.0, pval))
# otherwise compute normal approximation
if N % 2: # N odd
mnAB = n * (N+1.0)**2 / 4.0 / N
varAB = n * m * (N+1.0) * (3+N**2) / (48.0 * N**2)
else:
mnAB = n * (N+2.0) / 4.0
varAB = m * n * (N+2) * (N-2.0) / 48 / (N-1.0)
if repeats: # adjust variance estimates
# compute np.sum(tj * rj**2,axis=0)
fac = np.sum(symrank**2, axis=0)
if N % 2: # N odd
varAB = m * n * (16*N*fac - (N+1)**4) / (16.0 * N**2 * (N-1))
else: # N even
varAB = m * n * (16*fac - N*(N+2)**2) / (16.0 * N * (N-1))
z = (AB - mnAB) / sqrt(varAB)
pval = distributions.norm.sf(abs(z)) * 2.0
return AnsariResult(AB, pval)
BartlettResult = namedtuple('BartlettResult', ('statistic', 'pvalue'))
def bartlett(*args):
"""
Perform Bartlett's test for equal variances
Bartlett's test tests the null hypothesis that all input samples
are from populations with equal variances. For samples
from significantly non-normal populations, Levene's test
`levene` is more robust.
Parameters
----------
sample1, sample2,... : array_like
arrays of sample data. May be different lengths.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value of the test.
See Also
--------
fligner : A non-parametric test for the equality of k variances
levene : A robust parametric test for equality of k variances
Notes
-----
Conover et al. (1981) examine many of the existing parametric and
nonparametric tests by extensive simulations and they conclude that the
tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be
superior in terms of robustness of departures from normality and power [3]_.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm
.. [2] Snedecor, George W. and Cochran, William G. (1989), Statistical
Methods, Eighth Edition, Iowa State University Press.
.. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and
Hypothesis Testing based on Quadratic Inference Function. Technical
Report #99-03, Center for Likelihood Studies, Pennsylvania State
University.
.. [4] Bartlett, M. S. (1937). Properties of Sufficiency and Statistical
Tests. Proceedings of the Royal Society of London. Series A,
Mathematical and Physical Sciences, Vol. 160, No.901, pp. 268-282.
"""
# Handle empty input
for a in args:
if np.asanyarray(a).size == 0:
return BartlettResult(np.nan, np.nan)
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
Ni = zeros(k)
ssq = zeros(k, 'd')
for j in range(k):
Ni[j] = len(args[j])
ssq[j] = np.var(args[j], ddof=1)
Ntot = np.sum(Ni, axis=0)
spsq = np.sum((Ni - 1)*ssq, axis=0) / (1.0*(Ntot - k))
numer = (Ntot*1.0 - k) * log(spsq) - np.sum((Ni - 1.0)*log(ssq), axis=0)
denom = 1.0 + 1.0/(3*(k - 1)) * ((np.sum(1.0/(Ni - 1.0), axis=0)) -
1.0/(Ntot - k))
T = numer / denom
pval = distributions.chi2.sf(T, k - 1) # 1 - cdf
return BartlettResult(T, pval)
LeveneResult = namedtuple('LeveneResult', ('statistic', 'pvalue'))
def levene(*args, **kwds):
"""
Perform Levene test for equal variances.
The Levene test tests the null hypothesis that all input samples
are from populations with equal variances. Levene's test is an
alternative to Bartlett's test `bartlett` in the case where
there are significant deviations from normality.
Parameters
----------
sample1, sample2, ... : array_like
The sample data, possibly with different lengths
center : {'mean', 'median', 'trimmed'}, optional
Which function of the data to use in the test. The default
is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value for the test.
Notes
-----
Three variations of Levene's test are possible. The possibilities
and their recommended usages are:
* 'median' : Recommended for skewed (non-normal) distributions>
* 'mean' : Recommended for symmetric, moderate-tailed distributions.
* 'trimmed' : Recommended for heavy-tailed distributions.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm
.. [2] Levene, H. (1960). In Contributions to Probability and Statistics:
Essays in Honor of Harold Hotelling, I. Olkin et al. eds.,
Stanford University Press, pp. 278-292.
.. [3] Brown, M. B. and Forsythe, A. B. (1974), Journal of the American
Statistical Association, 69, 364-367
"""
# Handle keyword arguments.
center = 'median'
proportiontocut = 0.05
for kw, value in kwds.items():
if kw not in ['center', 'proportiontocut']:
raise TypeError("levene() got an unexpected keyword "
"argument '%s'" % kw)
if kw == 'center':
center = value
else:
proportiontocut = value
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
Ni = zeros(k)
Yci = zeros(k, 'd')
if center not in ['mean', 'median', 'trimmed']:
raise ValueError("Keyword argument <center> must be 'mean', 'median'"
" or 'trimmed'.")
if center == 'median':
func = lambda x: np.median(x, axis=0)
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(np.sort(arg), proportiontocut)
for arg in args)
func = lambda x: np.mean(x, axis=0)
for j in range(k):
Ni[j] = len(args[j])
Yci[j] = func(args[j])
Ntot = np.sum(Ni, axis=0)
# compute Zij's
Zij = [None] * k
for i in range(k):
Zij[i] = abs(asarray(args[i]) - Yci[i])
# compute Zbari
Zbari = zeros(k, 'd')
Zbar = 0.0
for i in range(k):
Zbari[i] = np.mean(Zij[i], axis=0)
Zbar += Zbari[i] * Ni[i]
Zbar /= Ntot
numer = (Ntot - k) * np.sum(Ni * (Zbari - Zbar)**2, axis=0)
# compute denom_variance
dvar = 0.0
for i in range(k):
dvar += np.sum((Zij[i] - Zbari[i])**2, axis=0)
denom = (k - 1.0) * dvar
W = numer / denom
pval = distributions.f.sf(W, k-1, Ntot-k) # 1 - cdf
return LeveneResult(W, pval)
@setastest(False)
def binom_test(x, n=None, p=0.5, alternative='two-sided'):
"""
Perform a test that the probability of success is p.
This is an exact, two-sided test of the null hypothesis
that the probability of success in a Bernoulli experiment
is `p`.
Parameters
----------
x : integer or array_like
the number of successes, or if x has length 2, it is the
number of successes and the number of failures.
n : integer
the number of trials. This is ignored if x gives both the
number of successes and failures
p : float, optional
The hypothesized probability of success. 0 <= p <= 1. The
default value is p = 0.5
alternative : {'two-sided', 'greater', 'less'}, optional
Indicates the alternative hypothesis. The default value is
'two-sided'.
Returns
-------
p-value : float
The p-value of the hypothesis test
References
----------
.. [1] http://en.wikipedia.org/wiki/Binomial_test
"""
x = atleast_1d(x).astype(np.integer)
if len(x) == 2:
n = x[1] + x[0]
x = x[0]
elif len(x) == 1:
x = x[0]
if n is None or n < x:
raise ValueError("n must be >= x")
n = np.int_(n)
else:
raise ValueError("Incorrect length for x.")
if (p > 1.0) or (p < 0.0):
raise ValueError("p must be in range [0,1]")
if alternative not in ('two-sided', 'less', 'greater'):
raise ValueError("alternative not recognized\n"
"should be 'two-sided', 'less' or 'greater'")
if alternative == 'less':
pval = distributions.binom.cdf(x, n, p)
return pval
if alternative == 'greater':
pval = distributions.binom.sf(x-1, n, p)
return pval
# if alternative was neither 'less' nor 'greater', then it's 'two-sided'
d = distributions.binom.pmf(x, n, p)
rerr = 1 + 1e-7
if x == p * n:
# special case as shortcut, would also be handled by `else` below
pval = 1.
elif x < p * n:
i = np.arange(np.ceil(p * n), n+1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = (distributions.binom.cdf(x, n, p) +
distributions.binom.sf(n - y, n, p))
else:
i = np.arange(np.floor(p*n) + 1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = (distributions.binom.cdf(y-1, n, p) +
distributions.binom.sf(x-1, n, p))
return min(1.0, pval)
def _apply_func(x, g, func):
# g is list of indices into x
# separating x into different groups
# func should be applied over the groups
g = unique(r_[0, g, len(x)])
output = []
for k in range(len(g) - 1):
output.append(func(x[g[k]:g[k+1]]))
return asarray(output)
FlignerResult = namedtuple('FlignerResult', ('statistic', 'pvalue'))
def fligner(*args, **kwds):
"""
Perform Fligner-Killeen test for equality of variance.
Fligner's test tests the null hypothesis that all input samples
are from populations with equal variances. Fligner-Killeen's test is
distribution free when populations are identical [2]_.
Parameters
----------
sample1, sample2, ... : array_like
Arrays of sample data. Need not be the same length.
center : {'mean', 'median', 'trimmed'}, optional
Keyword argument controlling which function of the data is used in
computing the test statistic. The default is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value for the hypothesis test.
See Also
--------
bartlett : A parametric test for equality of k variances in normal samples
levene : A robust parametric test for equality of k variances
Notes
-----
As with Levene's test there are three variants of Fligner's test that
differ by the measure of central tendency used in the test. See `levene`
for more information.
Conover et al. (1981) examine many of the existing parametric and
nonparametric tests by extensive simulations and they conclude that the
tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be
superior in terms of robustness of departures from normality and power [3]_.
References
----------
.. [1] http://www.stat.psu.edu/~bgl/center/tr/TR993.ps
.. [2] Fligner, M.A. and Killeen, T.J. (1976). Distribution-free two-sample
tests for scale. 'Journal of the American Statistical Association.'
71(353), 210-213.
.. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and
Hypothesis Testing based on Quadratic Inference Function. Technical
Report #99-03, Center for Likelihood Studies, Pennsylvania State
University.
.. [4] Conover, W. J., Johnson, M. E. and Johnson M. M. (1981). A
comparative study of tests for homogeneity of variances, with
applications to the outer continental shelf biding data.
Technometrics, 23(4), 351-361.
"""
# Handle empty input
for a in args:
if np.asanyarray(a).size == 0:
return FlignerResult(np.nan, np.nan)
# Handle keyword arguments.
center = 'median'
proportiontocut = 0.05
for kw, value in kwds.items():
if kw not in ['center', 'proportiontocut']:
raise TypeError("fligner() got an unexpected keyword "
"argument '%s'" % kw)
if kw == 'center':
center = value
else:
proportiontocut = value
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
if center not in ['mean', 'median', 'trimmed']:
raise ValueError("Keyword argument <center> must be 'mean', 'median'"
" or 'trimmed'.")
if center == 'median':
func = lambda x: np.median(x, axis=0)
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(arg, proportiontocut) for arg in args)
func = lambda x: np.mean(x, axis=0)
Ni = asarray([len(args[j]) for j in range(k)])
Yci = asarray([func(args[j]) for j in range(k)])
Ntot = np.sum(Ni, axis=0)
# compute Zij's
Zij = [abs(asarray(args[i]) - Yci[i]) for i in range(k)]
allZij = []
g = [0]
for i in range(k):
allZij.extend(list(Zij[i]))
g.append(len(allZij))
ranks = stats.rankdata(allZij)
a = distributions.norm.ppf(ranks / (2*(Ntot + 1.0)) + 0.5)
# compute Aibar
Aibar = _apply_func(a, g, np.sum) / Ni
anbar = np.mean(a, axis=0)
varsq = np.var(a, axis=0, ddof=1)
Xsq = np.sum(Ni * (asarray(Aibar) - anbar)**2.0, axis=0) / varsq
pval = distributions.chi2.sf(Xsq, k - 1) # 1 - cdf
return FlignerResult(Xsq, pval)
def mood(x, y, axis=0):
"""
Perform Mood's test for equal scale parameters.
Mood's two-sample test for scale parameters is a non-parametric
test for the null hypothesis that two samples are drawn from the
same distribution with the same scale parameter.
Parameters
----------
x, y : array_like
Arrays of sample data.
axis : int, optional
The axis along which the samples are tested. `x` and `y` can be of
different length along `axis`.
If `axis` is None, `x` and `y` are flattened and the test is done on
all values in the flattened arrays.
Returns
-------
z : scalar or ndarray
The z-score for the hypothesis test. For 1-D inputs a scalar is
returned.
p-value : scalar ndarray
The p-value for the hypothesis test.
See Also
--------
fligner : A non-parametric test for the equality of k variances
ansari : A non-parametric test for the equality of 2 variances
bartlett : A parametric test for equality of k variances in normal samples
levene : A parametric test for equality of k variances
Notes
-----
The data are assumed to be drawn from probability distributions ``f(x)``
and ``f(x/s) / s`` respectively, for some probability density function f.
The null hypothesis is that ``s == 1``.
For multi-dimensional arrays, if the inputs are of shapes
``(n0, n1, n2, n3)`` and ``(n0, m1, n2, n3)``, then if ``axis=1``, the
resulting z and p values will have shape ``(n0, n2, n3)``. Note that
``n1`` and ``m1`` don't have to be equal, but the other dimensions do.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(1234)
>>> x2 = np.random.randn(2, 45, 6, 7)
>>> x1 = np.random.randn(2, 30, 6, 7)
>>> z, p = stats.mood(x1, x2, axis=1)
>>> p.shape
(2, 6, 7)
Find the number of points where the difference in scale is not significant:
>>> (p > 0.1).sum()
74
Perform the test with different scales:
>>> x1 = np.random.randn(2, 30)
>>> x2 = np.random.randn(2, 35) * 10.0
>>> stats.mood(x1, x2, axis=1)
(array([-5.7178125 , -5.25342163]), array([ 1.07904114e-08, 1.49299218e-07]))
"""
x = np.asarray(x, dtype=float)
y = np.asarray(y, dtype=float)
if axis is None:
x = x.flatten()
y = y.flatten()
axis = 0
# Determine shape of the result arrays
res_shape = tuple([x.shape[ax] for ax in range(len(x.shape)) if ax != axis])
if not (res_shape == tuple([y.shape[ax] for ax in range(len(y.shape)) if
ax != axis])):
raise ValueError("Dimensions of x and y on all axes except `axis` "
"should match")
n = x.shape[axis]
m = y.shape[axis]
N = m + n
if N < 3:
raise ValueError("Not enough observations.")
xy = np.concatenate((x, y), axis=axis)
if axis != 0:
xy = np.rollaxis(xy, axis)
xy = xy.reshape(xy.shape[0], -1)
# Generalized to the n-dimensional case by adding the axis argument, and
# using for loops, since rankdata is not vectorized. For improving
# performance consider vectorizing rankdata function.
all_ranks = np.zeros_like(xy)
for j in range(xy.shape[1]):
all_ranks[:, j] = stats.rankdata(xy[:, j])
Ri = all_ranks[:n]
M = np.sum((Ri - (N + 1.0) / 2)**2, axis=0)
# Approx stat.
mnM = n * (N * N - 1.0) / 12
varM = m * n * (N + 1.0) * (N + 2) * (N - 2) / 180
z = (M - mnM) / sqrt(varM)
# sf for right tail, cdf for left tail. Factor 2 for two-sidedness
z_pos = z > 0
pval = np.zeros_like(z)
pval[z_pos] = 2 * distributions.norm.sf(z[z_pos])
pval[~z_pos] = 2 * distributions.norm.cdf(z[~z_pos])
if res_shape == ():
# Return scalars, not 0-D arrays
z = z[0]
pval = pval[0]
else:
z.shape = res_shape
pval.shape = res_shape
return z, pval
WilcoxonResult = namedtuple('WilcoxonResult', ('statistic', 'pvalue'))
def wilcoxon(x, y=None, zero_method="wilcox", correction=False):
"""
Calculate the Wilcoxon signed-rank test.
The Wilcoxon signed-rank test tests the null hypothesis that two
related paired samples come from the same distribution. In particular,
it tests whether the distribution of the differences x - y is symmetric
about zero. It is a non-parametric version of the paired T-test.
Parameters
----------
x : array_like
The first set of measurements.
y : array_like, optional
The second set of measurements. If `y` is not given, then the `x`
array is considered to be the differences between the two sets of
measurements.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt":
Pratt treatment: includes zero-differences in the ranking process
(more conservative)
"wilcox":
Wilcox treatment: discards all zero-differences
"zsplit":
Zero rank split: just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the
z-statistic. Default is False.
Returns
-------
statistic : float
The sum of the ranks of the differences above or below zero, whichever
is smaller.
pvalue : float
The two-sided p-value for the test.
Notes
-----
Because the normal approximation is used for the calculations, the
samples used should be large. A typical rule is to require that
n > 20.
References
----------
.. [1] http://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test
"""
if zero_method not in ["wilcox", "pratt", "zsplit"]:
raise ValueError("Zero method should be either 'wilcox' "
"or 'pratt' or 'zsplit'")
if y is None:
d = asarray(x)
else:
x, y = map(asarray, (x, y))
if len(x) != len(y):
raise ValueError('Unequal N in wilcoxon. Aborting.')
d = x - y
if zero_method == "wilcox":
# Keep all non-zero differences
d = compress(np.not_equal(d, 0), d, axis=-1)
count = len(d)
if count < 10:
warnings.warn("Warning: sample size too small for normal approximation.")
r = stats.rankdata(abs(d))
r_plus = np.sum((d > 0) * r, axis=0)
r_minus = np.sum((d < 0) * r, axis=0)
if zero_method == "zsplit":
r_zero = np.sum((d == 0) * r, axis=0)
r_plus += r_zero / 2.
r_minus += r_zero / 2.
T = min(r_plus, r_minus)
mn = count * (count + 1.) * 0.25
se = count * (count + 1.) * (2. * count + 1.)
if zero_method == "pratt":
r = r[d != 0]
replist, repnum = find_repeats(r)
if repnum.size != 0:
# Correction for repeated elements.
se -= 0.5 * (repnum * (repnum * repnum - 1)).sum()
se = sqrt(se / 24)
correction = 0.5 * int(bool(correction)) * np.sign(T - mn)
z = (T - mn - correction) / se
prob = 2. * distributions.norm.sf(abs(z))
return WilcoxonResult(T, prob)
@setastest(False)
def median_test(*args, **kwds):
"""
Mood's median test.
Test that two or more samples come from populations with the same median.
Let ``n = len(args)`` be the number of samples. The "grand median" of
all the data is computed, and a contingency table is formed by
classifying the values in each sample as being above or below the grand
median. The contingency table, along with `correction` and `lambda_`,
are passed to `scipy.stats.chi2_contingency` to compute the test statistic
and p-value.
Parameters
----------
sample1, sample2, ... : array_like
The set of samples. There must be at least two samples.
Each sample must be a one-dimensional sequence containing at least
one value. The samples are not required to have the same length.
ties : str, optional
Determines how values equal to the grand median are classified in
the contingency table. The string must be one of::
"below":
Values equal to the grand median are counted as "below".
"above":
Values equal to the grand median are counted as "above".
"ignore":
Values equal to the grand median are not counted.
The default is "below".
correction : bool, optional
If True, *and* there are just two samples, apply Yates' correction
for continuity when computing the test statistic associated with
the contingency table. Default is True.
lambda_ : float or str, optional.
By default, the statistic computed in this test is Pearson's
chi-squared statistic. `lambda_` allows a statistic from the
Cressie-Read power divergence family to be used instead. See
`power_divergence` for details.
Default is 1 (Pearson's chi-squared statistic).
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
stat : float
The test statistic. The statistic that is returned is determined by
`lambda_`. The default is Pearson's chi-squared statistic.
p : float
The p-value of the test.
m : float
The grand median.
table : ndarray
The contingency table. The shape of the table is (2, n), where
n is the number of samples. The first row holds the counts of the
values above the grand median, and the second row holds the counts
of the values below the grand median. The table allows further
analysis with, for example, `scipy.stats.chi2_contingency`, or with
`scipy.stats.fisher_exact` if there are two samples, without having
to recompute the table. If ``nan_policy`` is "propagate" and there
are nans in the input, the return value for ``table`` is ``None``.
See Also
--------
kruskal : Compute the Kruskal-Wallis H-test for independent samples.
mannwhitneyu : Computes the Mann-Whitney rank test on samples x and y.
Notes
-----
.. versionadded:: 0.15.0
References
----------
.. [1] Mood, A. M., Introduction to the Theory of Statistics. McGraw-Hill
(1950), pp. 394-399.
.. [2] Zar, J. H., Biostatistical Analysis, 5th ed. Prentice Hall (2010).
See Sections 8.12 and 10.15.
Examples
--------
A biologist runs an experiment in which there are three groups of plants.
Group 1 has 16 plants, group 2 has 15 plants, and group 3 has 17 plants.
Each plant produces a number of seeds. The seed counts for each group
are::
Group 1: 10 14 14 18 20 22 24 25 31 31 32 39 43 43 48 49
Group 2: 28 30 31 33 34 35 36 40 44 55 57 61 91 92 99
Group 3: 0 3 9 22 23 25 25 33 34 34 40 45 46 48 62 67 84
The following code applies Mood's median test to these samples.
>>> g1 = [10, 14, 14, 18, 20, 22, 24, 25, 31, 31, 32, 39, 43, 43, 48, 49]
>>> g2 = [28, 30, 31, 33, 34, 35, 36, 40, 44, 55, 57, 61, 91, 92, 99]
>>> g3 = [0, 3, 9, 22, 23, 25, 25, 33, 34, 34, 40, 45, 46, 48, 62, 67, 84]
>>> from scipy.stats import median_test
>>> stat, p, med, tbl = median_test(g1, g2, g3)
The median is
>>> med
34.0
and the contingency table is
>>> tbl
array([[ 5, 10, 7],
[11, 5, 10]])
`p` is too large to conclude that the medians are not the same:
>>> p
0.12609082774093244
The "G-test" can be performed by passing ``lambda_="log-likelihood"`` to
`median_test`.
>>> g, p, med, tbl = median_test(g1, g2, g3, lambda_="log-likelihood")
>>> p
0.12224779737117837
The median occurs several times in the data, so we'll get a different
result if, for example, ``ties="above"`` is used:
>>> stat, p, med, tbl = median_test(g1, g2, g3, ties="above")
>>> p
0.063873276069553273
>>> tbl
array([[ 5, 11, 9],
[11, 4, 8]])
This example demonstrates that if the data set is not large and there
are values equal to the median, the p-value can be sensitive to the
choice of `ties`.
"""
ties = kwds.pop('ties', 'below')
correction = kwds.pop('correction', True)
lambda_ = kwds.pop('lambda_', None)
nan_policy = kwds.pop('nan_policy', 'propagate')
if len(kwds) > 0:
bad_kwd = kwds.keys()[0]
raise TypeError("median_test() got an unexpected keyword "
"argument %r" % bad_kwd)
if len(args) < 2:
raise ValueError('median_test requires two or more samples.')
ties_options = ['below', 'above', 'ignore']
if ties not in ties_options:
raise ValueError("invalid 'ties' option '%s'; 'ties' must be one "
"of: %s" % (ties, str(ties_options)[1:-1]))
data = [np.asarray(arg) for arg in args]
# Validate the sizes and shapes of the arguments.
for k, d in enumerate(data):
if d.size == 0:
raise ValueError("Sample %d is empty. All samples must "
"contain at least one value." % (k + 1))
if d.ndim != 1:
raise ValueError("Sample %d has %d dimensions. All "
"samples must be one-dimensional sequences." %
(k + 1, d.ndim))
cdata = np.concatenate(data)
contains_nan, nan_policy = _contains_nan(cdata, nan_policy)
if contains_nan and nan_policy == 'propagate':
return np.nan, np.nan, np.nan, None
if contains_nan:
grand_median = np.median(cdata[~np.isnan(cdata)])
else:
grand_median = np.median(cdata)
# When the minimum version of numpy supported by scipy is 1.9.0,
# the above if/else statement can be replaced by the single line:
# grand_median = np.nanmedian(cdata)
# Create the contingency table.
table = np.zeros((2, len(data)), dtype=np.int64)
for k, sample in enumerate(data):
sample = sample[~np.isnan(sample)]
nabove = count_nonzero(sample > grand_median)
nbelow = count_nonzero(sample < grand_median)
nequal = sample.size - (nabove + nbelow)
table[0, k] += nabove
table[1, k] += nbelow
if ties == "below":
table[1, k] += nequal
elif ties == "above":
table[0, k] += nequal
# Check that no row or column of the table is all zero.
# Such a table can not be given to chi2_contingency, because it would have
# a zero in the table of expected frequencies.
rowsums = table.sum(axis=1)
if rowsums[0] == 0:
raise ValueError("All values are below the grand median (%r)." %
grand_median)
if rowsums[1] == 0:
raise ValueError("All values are above the grand median (%r)." %
grand_median)
if ties == "ignore":
# We already checked that each sample has at least one value, but it
# is possible that all those values equal the grand median. If `ties`
# is "ignore", that would result in a column of zeros in `table`. We
# check for that case here.
zero_cols = np.where((table == 0).all(axis=0))[0]
if len(zero_cols) > 0:
msg = ("All values in sample %d are equal to the grand "
"median (%r), so they are ignored, resulting in an "
"empty sample." % (zero_cols[0] + 1, grand_median))
raise ValueError(msg)
stat, p, dof, expected = chi2_contingency(table, lambda_=lambda_,
correction=correction)
return stat, p, grand_median, table
def _hermnorm(N):
# return the negatively normalized hermite polynomials up to order N-1
# (inclusive)
# using the recursive relationship
# p_n+1 = p_n(x)' - x*p_n(x)
# and p_0(x) = 1
plist = [None] * N
plist[0] = poly1d(1)
for n in range(1, N):
plist[n] = plist[n-1].deriv() - poly1d([1, 0]) * plist[n-1]
return plist
# Note: when removing pdf_fromgamma, also remove the _hermnorm support function
@np.deprecate(message="scipy.stats.pdf_fromgamma is deprecated in scipy 0.16.0 "
"in favour of statsmodels.distributions.ExpandedNormal.")
def pdf_fromgamma(g1, g2, g3=0.0, g4=None):
if g4 is None:
g4 = 3 * g2**2
sigsq = 1.0 / g2
sig = sqrt(sigsq)
mu = g1 * sig**3.0
p12 = _hermnorm(13)
for k in range(13):
p12[k] /= sig**k
# Add all of the terms to polynomial
totp = (p12[0] - g1/6.0*p12[3] +
g2/24.0*p12[4] + g1**2/72.0 * p12[6] -
g3/120.0*p12[5] - g1*g2/144.0*p12[7] - g1**3.0/1296.0*p12[9] +
g4/720*p12[6] + (g2**2/1152.0 + g1*g3/720)*p12[8] +
g1**2 * g2/1728.0*p12[10] + g1**4.0 / 31104.0*p12[12])
# Final normalization
totp = totp / sqrt(2*pi) / sig
def thefunc(x):
xn = (x - mu) / sig
return totp(xn) * exp(-xn**2 / 2.)
return thefunc
def _circfuncs_common(samples, high, low):
samples = np.asarray(samples)
if samples.size == 0:
return np.nan, np.nan
ang = (samples - low)*2*pi / (high - low)
return samples, ang
def circmean(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular mean for samples in a range.
Parameters
----------
samples : array_like
Input array.
high : float or int, optional
High boundary for circular mean range. Default is ``2*pi``.
low : float or int, optional
Low boundary for circular mean range. Default is 0.
axis : int, optional
Axis along which means are computed. The default is to compute
the mean of the flattened array.
Returns
-------
circmean : float
Circular mean.
"""
samples, ang = _circfuncs_common(samples, high, low)
S = sin(ang).sum(axis=axis)
C = cos(ang).sum(axis=axis)
res = arctan2(S, C)
mask = res < 0
if mask.ndim > 0:
res[mask] += 2*pi
elif mask:
res += 2*pi
return res*(high - low)/2.0/pi + low
def circvar(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular variance for samples assumed to be in a range
Parameters
----------
samples : array_like
Input array.
low : float or int, optional
Low boundary for circular variance range. Default is 0.
high : float or int, optional
High boundary for circular variance range. Default is ``2*pi``.
axis : int, optional
Axis along which variances are computed. The default is to compute
the variance of the flattened array.
Returns
-------
circvar : float
Circular variance.
Notes
-----
This uses a definition of circular variance that in the limit of small
angles returns a number close to the 'linear' variance.
"""
samples, ang = _circfuncs_common(samples, high, low)
S = sin(ang).mean(axis=axis)
C = cos(ang).mean(axis=axis)
R = hypot(S, C)
return ((high - low)/2.0/pi)**2 * 2 * log(1/R)
def circstd(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular standard deviation for samples assumed to be in the
range [low to high].
Parameters
----------
samples : array_like
Input array.
low : float or int, optional
Low boundary for circular standard deviation range. Default is 0.
high : float or int, optional
High boundary for circular standard deviation range.
Default is ``2*pi``.
axis : int, optional
Axis along which standard deviations are computed. The default is
to compute the standard deviation of the flattened array.
Returns
-------
circstd : float
Circular standard deviation.
Notes
-----
This uses a definition of circular standard deviation that in the limit of
small angles returns a number close to the 'linear' standard deviation.
"""
samples, ang = _circfuncs_common(samples, high, low)
S = sin(ang).mean(axis=axis)
C = cos(ang).mean(axis=axis)
R = hypot(S, C)
return ((high - low)/2.0/pi) * sqrt(-2*log(R))
|
the-stack_106_30408 | from moviepy.editor import VideoFileClip
from src.pipeline import *
class VideoProcessor:
def __init__(self):
self.image_pipeline = Pipeline()
self.count = 1
def process_image(self, img, plot_output=False):
# pipeline is processing BGR image
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
undistorted, mask, warped, fitted, result = self.image_pipeline.process(img)
if plot_output:
if self.count % 30 == 0:
show_four_images((undistorted, mask, warped, fitted))
self.count = 1
else:
self.count = self.count + 1
return cv2.cvtColor(result, cv2.COLOR_BGR2RGB)
def process_video(self, video_name='project_video.mp4'):
output = 'output_images/' + video_name
clip = VideoFileClip(video_name)
new_clip = clip.fl_image(self.process_image)
new_clip.write_videofile(output, audio=False)
|
the-stack_106_30411 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import descriptive
import itertools
import random
import risk
def ComputeRow(n, probs):
"""Multiplies out a row of a table.
Args:
n: sum of the elements in the row
prob: sequence of float probabilities
"""
row = [n * prob for prob in probs]
return row
def SimulateRow(n, probs):
"""Generates a random row of a table.
Chooses all but the last element at random, then chooses the
last element to make the sums work.
Args:
n: sum of the elements in the row
prob: sequence of float probabilities
"""
row = [Binomial(n, prob) for prob in probs]
row[-1] += n - sum(row)
return row
def Binomial(n, prob):
"""Returns a random sample from a binomial distribution.
Args:
n: int number of trials
prob: float probability
Returns:
int: number of successes
"""
t = [1 for _ in range(n) if random.random() < prob]
return sum(t)
def ComputeRows(firsts, others, funcs, probs=None, row_func=ComputeRow):
"""Computes a table suitable for use with chi-squared stats.
There are three uses of this function:
1) To compute observed values, use probs=None and row_func=ComputeRow
2) To compute expected values, provide probs from the pooled data,
and row_func=ComputeRow
3) To generate random values, provide pooled probs,
and row_func=SimulateRow
Returns:
row of rows of float values
"""
rows = []
for table in [firsts, others]:
n = len(table)
row_probs = probs or [func(table.pmf) for func in funcs]
row = row_func(n, row_probs)
rows.append(row)
return rows
def ChiSquared(expected, observed):
"""Compute the Chi-squared statistic for two tables.
Args:
expected: row of rows of values
observed: row of rows of values
Returns:
float chi-squared statistic
"""
it = zip(itertools.chain(*expected),
itertools.chain(*observed))
t = [(obs - exp)**2 / exp for exp, obs in it]
return sum(t)
def Test(pool, firsts, others, num_trials=1000):
# collect the functions from risk.py that take Pmfs and compute
# various probabilities
funcs = [risk.ProbEarly, risk.ProbOnTime, risk.ProbLate]
# get the observed frequency in each bin
print('observed')
observed = ComputeRows(firsts, others, funcs, probs=None)
print(observed)
# compute the expected frequency in each bin
# tables = [firsts, others]
probs = [func(pool.pmf) for func in funcs]
print('expected')
expected = ComputeRows(firsts, others, funcs, probs=probs)
print(expected)
# compute the chi-squared stat
print('chi-squared')
threshold = ChiSquared(expected, observed)
print(threshold)
input()
print('simulated %d trials' % num_trials)
chi2s = []
count = 0
for _ in range(num_trials):
simulated = ComputeRows(firsts, others, funcs, probs=probs,
row_func=SimulateRow)
chi2 = ChiSquared(expected, simulated)
chi2s.append(chi2)
if chi2 >= threshold:
count += 1
print('max chi2')
print(max(chi2s))
pvalue = 1.0 * count / num_trials
print('p-value')
print(pvalue)
return pvalue
def main():
# get the data
pool, firsts, others = descriptive.MakeTables()
Test(pool, firsts, others, num_trials=1000)
if __name__ == "__main__":
main()
|
the-stack_106_30412 | import warnings
from collections import namedtuple
from dagster import check
from dagster.core.definitions.executor import ExecutorDefinition, default_executors
from dagster.loggers import default_loggers
from dagster.utils.merger import merge_dicts
from .logger import LoggerDefinition
from .resource import ResourceDefinition
from .utils import check_valid_name
DEFAULT_MODE_NAME = "default"
class ModeDefinition(
namedtuple(
"_ModeDefinition",
"name resource_defs loggers system_storage_defs executor_defs description intermediate_storage_defs",
)
):
"""Define a mode in which a pipeline can operate.
A mode provides pipelines with a set of resource implementations, loggers, system storages,
and executors.
Args:
name (Optional[str]): The name of the mode. Must be unique within the
:py:class:`PipelineDefinition` to which the mode is attached. (default: "default").
resource_defs (Optional[Dict[str, ResourceDefinition]]): A dictionary of string resource
keys to their implementations. Individual solids may require resources to be present by
these keys.
logger_defs (Optional[Dict[str, LoggerDefinition]]): A dictionary of string logger
identifiers to their implementations.
system_storage_defs (Optional[List[SystemStorageDefinition]]): The set of system storage
options available when executing in this mode. By default, this will be the 'in_memory'
and 'filesystem' system storages.
executor_defs (Optional[List[ExecutorDefinition]]): The set of executors available when
executing in this mode. By default, this will be the 'in_process' and 'multiprocess'
executors (:py:data:`~dagster.default_executors`).
description (Optional[str]): A human-readable description of the mode.
intermediate_storage_defs (Optional[List[IntermediateStorageDefinition]]): The set of intermediate storage
options available when executing in this mode. By default, this will be the 'in_memory'
and 'filesystem' system storages.
"""
def __new__(
cls,
name=None,
resource_defs=None,
logger_defs=None,
system_storage_defs=None,
executor_defs=None,
description=None,
intermediate_storage_defs=None,
):
from dagster.core.storage.system_storage import (
default_system_storage_defs,
default_intermediate_storage_defs,
)
from .system_storage import SystemStorageDefinition
from .intermediate_storage import IntermediateStorageDefinition
if system_storage_defs is not None and intermediate_storage_defs is None:
warnings.warn(
"system_storage_defs are deprecated and will be removed in 0.10.0 "
"and should be replaced with "
"intermediate_storage_defs for intermediates and resource_defs for files"
)
check.opt_dict_param(
resource_defs, "resource_defs", key_type=str, value_type=ResourceDefinition
)
if resource_defs and "asset_store" in resource_defs:
resource_defs_with_defaults = resource_defs
else:
from dagster.core.storage.asset_store import mem_asset_store
resource_defs_with_defaults = merge_dicts(
{"asset_store": mem_asset_store}, resource_defs or {}
)
return super(ModeDefinition, cls).__new__(
cls,
name=check_valid_name(name) if name else DEFAULT_MODE_NAME,
resource_defs=resource_defs_with_defaults,
loggers=(
check.opt_dict_param(
logger_defs, "logger_defs", key_type=str, value_type=LoggerDefinition
)
or default_loggers()
),
system_storage_defs=check.list_param(
system_storage_defs if system_storage_defs else default_system_storage_defs,
"system_storage_defs",
of_type=SystemStorageDefinition,
),
intermediate_storage_defs=check.list_param(
intermediate_storage_defs
if intermediate_storage_defs
else default_intermediate_storage_defs,
"intermediate_storage_defs",
of_type=IntermediateStorageDefinition,
),
executor_defs=check.list_param(
executor_defs if executor_defs else default_executors,
"executor_defs",
of_type=ExecutorDefinition,
),
description=check.opt_str_param(description, "description"),
)
@property
def resource_key_set(self):
return frozenset(self.resource_defs.keys())
def get_system_storage_def(self, name):
check.str_param(name, "name")
for system_storage_def in self.system_storage_defs:
if system_storage_def.name == name:
return system_storage_def
check.failed("{} storage definition not found".format(name))
def get_intermediate_storage_def(self, name):
check.str_param(name, "name")
for intermediate_storage_def in self.intermediate_storage_defs:
if intermediate_storage_def.name == name:
return intermediate_storage_def
check.failed("{} storage definition not found".format(name))
@staticmethod
def from_resources(resources, name=None):
check.dict_param(resources, "resources", key_type=str)
return ModeDefinition(
name=name,
resource_defs={
resource_name: ResourceDefinition.hardcoded_resource(resource)
for resource_name, resource in resources.items()
},
)
|
the-stack_106_30413 | """
pghoard
Copyright (c) 2015 Ohmu Ltd
See LICENSE for details
"""
from .base import CONSTANT_TEST_RSA_PUBLIC_KEY, CONSTANT_TEST_RSA_PRIVATE_KEY
from pghoard.rohmu import IO_BLOCK_SIZE
from pghoard.rohmu.encryptor import Decryptor, DecryptorFile, Encryptor, EncryptorFile
import io
import json
import os
import pytest
import tarfile
def test_encryptor_decryptor():
plaintext = b"test"
encryptor = Encryptor(CONSTANT_TEST_RSA_PUBLIC_KEY)
ciphertext = encryptor.update(plaintext) + encryptor.finalize()
assert plaintext not in ciphertext
decryptor = Decryptor(CONSTANT_TEST_RSA_PRIVATE_KEY)
result = decryptor.update(ciphertext) + decryptor.finalize()
assert plaintext == result
public_key = json.loads(json.dumps(CONSTANT_TEST_RSA_PUBLIC_KEY))
private_key = json.loads(json.dumps(CONSTANT_TEST_RSA_PRIVATE_KEY))
encryptor = Encryptor(public_key)
decryptor = Decryptor(private_key)
assert plaintext == decryptor.update(encryptor.update(plaintext) + encryptor.finalize()) + decryptor.finalize()
def test_decryptorfile(tmpdir):
# create a plaintext blob bigger than IO_BLOCK_SIZE
plaintext1 = b"rvdmfki6iudmx8bb25tx1sozex3f4u0nm7uba4eibscgda0ckledcydz089qw1p1"
repeat = int(1.5 * IO_BLOCK_SIZE / len(plaintext1))
plaintext = repeat * plaintext1
encryptor = Encryptor(CONSTANT_TEST_RSA_PUBLIC_KEY)
ciphertext = encryptor.update(plaintext) + encryptor.finalize()
plain_fp = open(tmpdir.join("plain").strpath, mode="w+b")
plain_fp.write(ciphertext)
plain_fp.seek(0)
fp = DecryptorFile(plain_fp, CONSTANT_TEST_RSA_PRIVATE_KEY) # pylint: disable=redefined-variable-type
assert fp.fileno() == plain_fp.fileno()
assert fp.readable() is True
assert fp.writable() is False
fp.flush()
result = fp.read()
assert plaintext == result
assert fp.seekable() is True
with pytest.raises(ValueError):
fp.seek(-1)
fp.seek(0, os.SEEK_SET)
with pytest.raises(io.UnsupportedOperation):
fp.seek(1, os.SEEK_CUR)
with pytest.raises(io.UnsupportedOperation):
fp.seek(1, os.SEEK_END)
with pytest.raises(ValueError):
fp.seek(1, 0xff)
assert fp.seek(0, os.SEEK_END) == len(plaintext)
assert fp.seek(0, os.SEEK_CUR) == len(plaintext)
fp.seek(0)
result = fp.read()
assert plaintext == result
assert fp.read(1234) == b""
assert fp.read() == b""
fp.seek(0)
result = fp.read(8192)
assert result == plaintext[:8192]
result = fp.read(8192)
assert result == plaintext[8192:8192 * 2]
result = fp.read(IO_BLOCK_SIZE * 2)
assert plaintext[8192 * 2:] == result
assert fp.seek(IO_BLOCK_SIZE // 2) == IO_BLOCK_SIZE // 2
result = fp.read()
assert len(result) == len(plaintext) - IO_BLOCK_SIZE // 2
assert plaintext[IO_BLOCK_SIZE // 2:] == result
fp.seek(2)
result = fp.read(1)
assert plaintext[2:3] == result
assert fp.tell() == 3
with pytest.raises(io.UnsupportedOperation):
fp.truncate()
# close the file (this can be safely called multiple times), other ops should fail after that
fp.close()
fp.close()
with pytest.raises(ValueError):
fp.truncate()
def test_decryptorfile_for_tarfile(tmpdir):
testdata = b"file contents"
data_tmp_name = tmpdir.join("plain.data").strpath
with open(data_tmp_name, mode="wb") as data_tmp:
data_tmp.write(testdata)
tar_data = io.BytesIO()
with tarfile.open(name="foo", fileobj=tar_data, mode="w") as tar:
tar.add(data_tmp_name, arcname="archived_content")
plaintext = tar_data.getvalue()
encryptor = Encryptor(CONSTANT_TEST_RSA_PUBLIC_KEY)
ciphertext = encryptor.update(plaintext) + encryptor.finalize()
enc_tar_name = tmpdir.join("enc.tar.data").strpath
with open(enc_tar_name, "w+b") as enc_tar:
enc_tar.write(ciphertext)
enc_tar.seek(0)
dfile = DecryptorFile(enc_tar, CONSTANT_TEST_RSA_PRIVATE_KEY)
with tarfile.open(fileobj=dfile, mode="r") as tar:
info = tar.getmember("archived_content")
assert info.isfile() is True
assert info.size == len(testdata)
content_file = tar.extractfile("archived_content")
content = content_file.read() # pylint: disable=no-member
content_file.close() # pylint: disable=no-member
assert testdata == content
decout = tmpdir.join("dec_out_dir").strpath
os.makedirs(decout)
tar.extract("archived_content", decout)
extracted_path = os.path.join(decout, "archived_content")
with open(extracted_path, "rb") as ext_fp:
assert testdata == ext_fp.read()
def test_encryptorfile(tmpdir):
# create a plaintext blob bigger than IO_BLOCK_SIZE
plaintext1 = b"rvdmfki6iudmx8bb25tx1sozex3f4u0nm7uba4eibscgda0ckledcydz089qw1p1"
repeat = int(1.5 * IO_BLOCK_SIZE / len(plaintext1))
plaintext = repeat * plaintext1
fn = tmpdir.join("data").strpath
with open(fn, "w+b") as plain_fp:
enc_fp = EncryptorFile(plain_fp, CONSTANT_TEST_RSA_PUBLIC_KEY)
assert enc_fp.fileno() == plain_fp.fileno()
assert enc_fp.readable() is False
with pytest.raises(io.UnsupportedOperation):
enc_fp.read(1)
assert enc_fp.seekable() is False
with pytest.raises(io.UnsupportedOperation):
enc_fp.seek(1, os.SEEK_CUR)
assert enc_fp.writable() is True
enc_fp.write(plaintext)
enc_fp.write(b"")
assert enc_fp.tell() == len(plaintext)
assert enc_fp.next_fp.tell() > len(plaintext)
enc_fp.close()
enc_fp.close()
plain_fp.seek(0)
dec_fp = DecryptorFile(plain_fp, CONSTANT_TEST_RSA_PRIVATE_KEY)
assert dec_fp.fileno() == plain_fp.fileno()
assert dec_fp.readable() is True
assert dec_fp.seekable() is True
assert dec_fp.writable() is False
with pytest.raises(io.UnsupportedOperation):
dec_fp.write(b"x")
dec_fp.flush()
result = dec_fp.read()
assert plaintext == result
def test_encryptorfile_for_tarfile(tmpdir):
testdata = b"file contents"
data_tmp_name = tmpdir.join("plain.data").strpath
with open(data_tmp_name, mode="wb") as data_tmp:
data_tmp.write(testdata)
enc_tar_name = tmpdir.join("enc.tar.data").strpath
with open(enc_tar_name, "w+b") as plain_fp:
enc_fp = EncryptorFile(plain_fp, CONSTANT_TEST_RSA_PUBLIC_KEY)
with tarfile.open(name="foo", fileobj=enc_fp, mode="w") as tar:
tar.add(data_tmp_name, arcname="archived_content")
enc_fp.close()
plain_fp.seek(0)
dfile = DecryptorFile(plain_fp, CONSTANT_TEST_RSA_PRIVATE_KEY)
with tarfile.open(fileobj=dfile, mode="r") as tar:
info = tar.getmember("archived_content")
assert info.isfile() is True
assert info.size == len(testdata)
content_file = tar.extractfile("archived_content")
content = content_file.read() # pylint: disable=no-member
content_file.close() # pylint: disable=no-member
assert testdata == content
|
the-stack_106_30415 | #!/usr/bin/env python3
import sys
import chpl_comm, chpl_comm_debug, chpl_launcher, chpl_platform, overrides, third_party_utils
from utils import error, memoize, try_run_command, warning
@memoize
def get():
comm_val = chpl_comm.get()
if comm_val == 'ofi':
libfabric_val = overrides.get('CHPL_LIBFABRIC')
platform_val = chpl_platform.get('target')
if not libfabric_val:
cmd_exists, returncode = try_run_command(['pkg-config',
'--exists',
'libfabric'])[0:2]
if cmd_exists and returncode == 0:
libfabric_val = 'system'
else:
libfabric_val = 'bundled'
if libfabric_val == 'none':
error("CHPL_LIBFABRIC must not be 'none' when CHPL_COMM is ofi")
if platform_val == 'hpe-cray-ex' and libfabric_val != 'system':
warning('CHPL_LIBFABRIC!=system is discouraged on HPE Cray EX')
else:
libfabric_val = 'none'
return libfabric_val
@memoize
def get_uniq_cfg_path():
base_uniq_cfg = third_party_utils.default_uniq_cfg_path()
if chpl_comm_debug.get() == 'debug':
suffix = '-debug'
else:
suffix = ''
return base_uniq_cfg + suffix
@memoize
def get_compile_args(libfabric=get()):
flags = []
if libfabric == 'bundled':
flags = third_party_utils.default_get_compile_args('libfabric',
ucp=get_uniq_cfg_path())
elif libfabric == 'system':
# Allow overriding pkg-config via LIBFABRIC_DIR, for platforms
# without pkg-config.
libfab_dir_val = overrides.get('LIBFABRIC_DIR')
if libfab_dir_val:
flags.append('-I' + libfab_dir_val + '/include')
else:
# Try using pkg-config to get the compile-time flags.
pcflags = third_party_utils.pkgconfig_get_compile_args('libfabric',
system=True)
for pcl in pcflags:
flags.append(pcl)
launcher_val = chpl_launcher.get()
ofi_oob_val = overrides.get_environ('CHPL_RT_COMM_OFI_OOB')
if 'mpi' in launcher_val or ( ofi_oob_val and 'mpi' in ofi_oob_val ):
mpi_dir_val = overrides.get_environ('MPI_DIR')
if mpi_dir_val:
flags.append('-I' + mpi_dir_val + '/include')
return flags
@memoize
def get_link_args(libfabric=get()):
libs = []
if libfabric == 'bundled':
return third_party_utils.default_get_link_args('libfabric',
ucp=get_uniq_cfg_path(),
libs=['libfabric.la'],
add_L_opt=True)
elif libfabric == 'system':
# Allow overriding pkg-config via LIBFABRIC_DIR, for platforms
# without pkg-config.
libfab_dir_val = overrides.get('LIBFABRIC_DIR')
if libfab_dir_val:
libs.extend(['-L' + libfab_dir_val + '/lib',
'-Wl,-rpath,' + libfab_dir_val + '/lib',
'-lfabric'])
else:
# Try using pkg-config to get the libraries to link
# libfabric with.
pclibs = third_party_utils.pkgconfig_get_link_args('libfabric',
system=True)
for pcl in pclibs:
libs.append(pcl)
if pcl.startswith('-L'):
libs.append(pcl.replace('-L', '-Wl,-rpath,', 1))
return libs
def _main():
libfabric_val = get()
sys.stdout.write("{0}\n".format(libfabric_val))
if __name__ == '__main__':
_main()
|
the-stack_106_30416 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v10.services.types import customer_customizer_service
from .base import CustomerCustomizerServiceTransport, DEFAULT_CLIENT_INFO
class CustomerCustomizerServiceGrpcTransport(
CustomerCustomizerServiceTransport
):
"""gRPC backend transport for CustomerCustomizerService.
Service to manage customer customizer
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn(
"client_cert_source is deprecated", DeprecationWarning
)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = (
SslCredentials().ssl_credentials
)
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def mutate_customer_customizers(
self,
) -> Callable[
[customer_customizer_service.MutateCustomerCustomizersRequest],
customer_customizer_service.MutateCustomerCustomizersResponse,
]:
r"""Return a callable for the mutate customer customizers method over gRPC.
Creates, updates or removes customer customizers.
Operation statuses are returned.
Returns:
Callable[[~.MutateCustomerCustomizersRequest],
~.MutateCustomerCustomizersResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "mutate_customer_customizers" not in self._stubs:
self._stubs[
"mutate_customer_customizers"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v10.services.CustomerCustomizerService/MutateCustomerCustomizers",
request_serializer=customer_customizer_service.MutateCustomerCustomizersRequest.serialize,
response_deserializer=customer_customizer_service.MutateCustomerCustomizersResponse.deserialize,
)
return self._stubs["mutate_customer_customizers"]
def close(self):
self.grpc_channel.close()
__all__ = ("CustomerCustomizerServiceGrpcTransport",)
|
the-stack_106_30420 | import matplotlib.pyplot as plt
import os
class Plot_Maker:
@staticmethod
def Create_Plot(plot_file, data_set, plot_label):
x = [data_point[0] for data_point in data_set]
y = [data_point[1] for data_point in data_set]
fig, ax = plt.subplots()
plt.plot(x, y, '-or', label=r'$E_w$')
ax.set_title(f'Wobbling energies for {plot_label}')
plt.xlabel(r'$I\ [\hbar]$')
plt.ylabel(r'$E\ [MeV]$')
ax.legend(loc='best')
plt.text(0.80, 0.20, f'{plot_label}', horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes, fontsize=11)
fig.tight_layout()
plt.savefig(plot_file, bbox_inches='tight', dpi=1200)
plt.close()
@staticmethod
def Create_Band_Plots(plot_file, data_set, plot_label):
fig, ax = plt.subplots()
band_counter = 1
for data in data_set:
x = [data_point[0] for data_point in data]
y = [data_point[1] for data_point in data]
plt.plot(x, y, '-or', label=f'Band-{band_counter}')
band_counter += 1
plt.xlabel(r'$I\ [\hbar]$')
plt.ylabel(r'$E\ [MeV]$')
ax.legend(loc='best')
ax.set_title(f'Wobbling energies for {plot_label}')
plt.text(0.80, 0.20, f'{plot_label}', horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes, fontsize=11)
plt.savefig(plot_file, bbox_inches='tight', dpi=400)
fig.tight_layout()
plt.close()
@staticmethod
def Create_Fit_Plot(expdata, thdata, plot_file, plot_label):
# the experimental results -> spins
x_data_exp = expdata[0]
# the experimental results -> energies
y_data_exp = expdata[1]
# the theoretical results -> spins
x_data_th = thdata[0]
# the theoretical results -> energies
y_data_th = thdata[1]
fig, ax = plt.subplots()
# plot the experimental curve
plt.plot(x_data_exp, y_data_exp, 'ok', label='Exp')
# plot the theoretical curve
plt.plot(x_data_th, y_data_th, '-r', label='Th')
plt.xlabel(r'$I\ [\hbar]$')
plt.ylabel(r'$E\ [MeV]$')
ax.legend(loc='best')
ax.set_title(f'Wobbling energies for {plot_label}')
plt.text(0.80, 0.20, f'{plot_label}', horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes, fontsize=11)
plt.savefig(plot_file, bbox_inches='tight', dpi=400)
fig.tight_layout()
plt.close()
@staticmethod
def Plot_Bands(band1, band2, plot_file, plot_label):
# create the data sets for the first band
xdata_b1 = band1[0]
ydata_b1_exp = band1[1]
ydata_b1_th = band1[2]
# create the data sets for the second band
xdata_b2 = band2[0]
ydata_b2_exp = band2[1]
ydata_b2_th = band2[2]
fig, ax = plt.subplots()
# plot the experimental curve for the first band
plt.plot(xdata_b1, ydata_b1_exp, 'ok', label='Yrast-Exp')
# plot the theoretical curve for the first band
plt.plot(xdata_b1, ydata_b1_th, '-r', label='Yrast-Th')
# plot the experimental curve for the second band
plt.plot(xdata_b2, ydata_b2_exp, '+k', label='TW1-Exp')
# plot the theoretical curve for the second band
plt.plot(xdata_b2, ydata_b2_th, '-b', label='TW1-Th')
plt.xlabel(r'$I\ [\hbar]$')
plt.ylabel(r'$E\ [MeV]$')
ax.legend(loc='best')
ax.set_title(f'Wobbling energies: {plot_label}')
plt.text(0.80, 0.20, f'{plot_label}', horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes, fontsize=11)
plt.savefig(plot_file, bbox_inches='tight', dpi=400)
fig.tight_layout()
plt.close()
@staticmethod
def Clean_Plots(plot_file):
try:
os.remove(plot_file)
except OSError:
pass
|
the-stack_106_30421 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by yetongxue<[email protected]>
# 2018/5/17
import os
import tensorflow as tf
from PIL import Image
from nets import nets_factory
import numpy as np
import matplotlib.pyplot as plt
CHAR_SET_LEN = 10
IMAGE_HEIGH = 60
IMAGE_WIDTH = 160
BATCH_SIZE = 1
TFRECORD_FILE = '/Users/yexianyong/Desktop/machine_learn/project/logs/verify-code-identify/tfrecord/test.tfrecords'
MODEL_PATH = '/Users/yexianyong/Desktop/machine_learn/project/logs/verify-code-identify/model/chptcha.model-6000'
x = tf.placeholder(tf.float32, [None, 224, 224])
def read_and_decode(filename):
filename_queue = tf.train.string_input_producer([filename])
reader = tf.TFRecordReader()
# 返回文件名与文件
_, serizlized_example = reader.read(filename_queue)
features = tf.parse_single_example(serizlized_example,
features={
'image' : tf.FixedLenFeature([], tf.string),
'label0': tf.FixedLenFeature([], tf.int64),
'label1': tf.FixedLenFeature([], tf.int64),
'label2': tf.FixedLenFeature([], tf.int64),
'label3': tf.FixedLenFeature([], tf.int64),
})
image = tf.decode_raw(features['image'], tf.uint8)
image_raw = tf.reshape(image, [224, 224])
image = tf.reshape(image, [224, 224])
image = tf.cast(image, tf.float32) / 255.0
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
# 获取label
label0 = tf.cast(features['label0'], tf.int32)
label1 = tf.cast(features['label1'], tf.int32)
label2 = tf.cast(features['label2'], tf.int32)
label3 = tf.cast(features['label3'], tf.int32)
return image, image_raw, label0, label1, label2, label3
image, image_raw, label0, label1, label2, label3 = read_and_decode(TFRECORD_FILE)
# 使用shuffle_batch
image_batch, image_raw_batch, label_batch0, label_batch1, label_batch2, label_batch3 = tf.train.shuffle_batch(
[image, image_raw, label0, label1, label2, label3],
batch_size=BATCH_SIZE,
capacity=50000,
min_after_dequeue=10000,
num_threads=1
)
# 定义网络结构
train_network_fn = nets_factory.get_network_fn(
'alexnet_v2',
num_classes=CHAR_SET_LEN,
weight_decay=0.0005,
is_training=False
)
with tf.Session() as sess:
X = tf.reshape(x, [BATCH_SIZE, 224, 224, 1])
logits0, logits1, logits2, logits3, endpoints = train_network_fn(X)
predict0 = tf.reshape(logits0, [-1, CHAR_SET_LEN])
predict0 = tf.argmax(predict0, 1)
predict1 = tf.reshape(logits1, [-1, CHAR_SET_LEN])
predict1 = tf.argmax(predict1, 1)
predict2 = tf.reshape(logits2, [-1, CHAR_SET_LEN])
predict2 = tf.argmax(predict2, 1)
predict3 = tf.reshape(logits3, [-1, CHAR_SET_LEN])
predict3 = tf.argmax(predict3, 1)
# 初始化
sess.run(tf.global_variables_initializer())
# 加载训练好的模型
saver = tf.train.Saver()
saver.restore(sess, MODEL_PATH)
# 创建协调器管理线程
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(10):
# 获取一个批次的数据标签
b_image, b_image_raw, b_label0, b_label1, b_label2, b_label3 = sess.run([image_batch,
image_raw_batch,
label_batch0,
label_batch1,
label_batch2,
label_batch3
])
print('label:', b_label0, b_label1, b_label2, b_label3)
label0, label1, label2, label3 = sess.run([predict0, predict1, predict2, predict3], feed_dict={x: b_image})
print('predict:', label0, label1, label2, label3)
plt.imshow(b_image_raw[0])
plt.show(block=True)
# plt.interactive(False)
# 关闭线程
coord.request_stop()
coord.join(threads)
|
the-stack_106_30422 | import numpy as np
from advent.dataset.base_dataset import BaseDataset
class SYNTHIADataSet(BaseDataset):
def __init__(self, root, list_path, set='all',
max_iters=None, crop_size=(321, 321), mean=(128, 128, 128)):
super().__init__(root, list_path, set, max_iters, crop_size, None, mean)
# map to cityscape's ids
# self.id_to_trainid = {3: 0, 4: 1, 2: 2, 21: 3, 5: 4, 7: 5,
# 15: 6, 9: 7, 6: 8, 16: 9, 1: 10, 10: 11, 17: 12,
# 8: 13, 18: 14, 19: 15, 20: 16, 12: 17, 11: 18}
self.id_to_trainid = {
3: 0,
4: 1,
2: 2,
21: 3,
5: 4,
7: 5,
15: 6,
9: 7,
6: 8,
1: 9,
10: 10,
17: 11,
8: 12,
19: 13,
12: 14,
11: 15,
}
def get_metadata(self, name):
img_file = self.root / 'RGB' / name
# label_file = self.root / 'GT/LABELS' / name
label_file = self.root / 'parsed_LABELS' / name
return img_file, label_file
def __getitem__(self, index):
img_file, label_file, name = self.files[index]
image = self.get_image(img_file)
label = self.get_labels(label_file)
# re-assign labels to match the format of Cityscapes
label_copy = 255 * np.ones(label.shape, dtype=np.float32)
for k, v in self.id_to_trainid.items():
label_copy[label == k] = v
image = self.preprocess(image)
return image.copy(), label_copy.copy(), np.array(image.shape), name
|
the-stack_106_30423 | import random
from lib.dataset.transformations.transformation_util import extract_keypoints
class MultiKeypointTransformation(object):
def __init__(self, transforms, num_keypoints, probability, dataset):
self.transforms = transforms
self.num_keypoints = num_keypoints
self.probability = probability
self.dataset = dataset
def __call__(self, sample):
keypoints = list(extract_keypoints(sample['target'], self.dataset)[0].keys())
num_iter = min(len(keypoints), self.num_keypoints)
for i in range(num_iter):
chance = random.random()
if chance < self.probability:
keypoint_name = random.choice(keypoints)
keypoints.remove(keypoint_name)
chosen_transform = random.choice(self.transforms)
chosen_transform.part = keypoint_name
sample = chosen_transform(sample)
return sample
|
the-stack_106_30426 | from core.actual_window import ActualWindow
from core.macro_runner import MacroRunner
from global_modules.macro_manager import MacroManager
from global_modules.temp_manager import purge_temp
from core.tray import Tray
if __name__ == "__main__":
purge_temp(True)
macro_manager = MacroManager()
macro_manager.load_macros()
tray_thread = Tray(macro_manager)
actual_window = ActualWindow(tray_thread)
actual_window.setDaemon(True)
macro_runner = MacroRunner(macro_manager, actual_window, tray_thread)
macro_runner.setDaemon(True)
actual_window.start()
macro_runner.start()
tray_thread.run_tray() # Execute in main thread /!\
|
the-stack_106_30428 | import torch
import torch.nn as nn
import math
## 1D variant of VGG model takes fixed time series inputs
class VGG(nn.Module):
def __init__(self, features, args, arch='vgg', cfg_seq=None):
super(VGG, self).__init__()
self.arch = arch
self.features = features
if cfg_seq is None:
seq_len = 512 * 6
else:
seq_len = cfg_seq
self.classifier = nn.Sequential(
nn.Linear(seq_len, 512),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(512, 512),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(512, args.num_classes),
)
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv1d):
n = m.kernel_size[0] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 1
for v in cfg:
if v == 'M':
layers += [nn.MaxPool1d(kernel_size=2, stride=2)]
else:
conv1d = nn.Conv1d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv1d, nn.BatchNorm1d(v), nn.ReLU(inplace=True)]
else:
layers += [conv1d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512,
'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M',
512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512,
512, 'M', 512, 512, 512, 512, 'M'],
'F': [64, 'M', 128, 'M', 256, 'M', 512, 'M'],
'F_seq': 16384,
'G': [64, 'M', 128, 'M', 512, 'M'],
'G_seq': 32768,
'H': [64, 'M', 512, 'M'],
'H_seq': 65536,
'I': [512, 'M'],
'I_seq': 131072,
}
# VGG with 11 weight layers
def vgg11bn(**kwargs):
model = VGG(make_layers(cfg['A'], batch_norm=True), arch='vgg11bn',
**kwargs)
return model
def vgg13bn(**kwargs):
model = VGG(make_layers(cfg['B'], batch_norm=True), arch='vgg13bn',
**kwargs)
return model
def vgg16bn(**kwargs):
model = VGG(make_layers(cfg['D'], batch_norm=True), arch='vgg16bn',
**kwargs)
return model
def vgg19bn(**kwargs):
model = VGG(make_layers(cfg['E'], batch_norm=True), arch='vgg19bn',
**kwargs)
return model
def vgg7bn(args, **kwargs):
model = VGG(make_layers(cfg['F'], batch_norm=True), arch='vgg7bn',
args=args, cfg_seq=cfg['F_seq'], **kwargs)
return model
def vgg6bn(args, **kwargs):
model = VGG(make_layers(cfg['G'], batch_norm=True), arch='vgg6bn',
args=args, cfg_seq=cfg['G_seq'], **kwargs)
return model
def vgg5bn(args, **kwargs):
model = VGG(make_layers(cfg['H'], batch_norm=True), args=args,
arch='vgg5bn', cfg_seq=cfg['H_seq'], **kwargs)
return model
def vgg4bn(args, **kwargs):
model = VGG(make_layers(cfg['I'], batch_norm=True), args=args,
arch='vgg4bn', cfg_seq=cfg['I_seq'], **kwargs)
return model
|
the-stack_106_30429 | """
A management command which deletes expired accounts (e.g.,
accounts which signed up but never activated) from the database.
Calls ``RegistrationProfile.objects.delete_expired_users()``, which
contains the actual logic for determining which accounts are deleted.
"""
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from years.models import Year
from schools.models import School
from groups.models import Group
from courses.models import Course
from tasks.models import Task, TaskGroupRelations
from issues.models import Issue
def parse_name(name):
last_name, first_name = name.split(' ', 1)
username = "_".join([first_name.lower(), last_name.lower()])
return last_name, first_name, username
def save_all(collection):
for item in collection:
item.save()
class Command(BaseCommand):
help = "Creating test database."
def handle(self, **options):
# Raw data
years_raw = [2019, 2020]
courses_raw = [{"name": "Charms", "year": 0,
"groups": (0, 2)},
{"name": "Potions", "year": 1,
"groups": (1,)},
{"name": "Transfigurations", "year": 1,
"groups": (3,)}]
schools_raw = [{"name": "High School of Hersheba",
"link": "hersheba",
"courses": (0, 1)},
{"name": "Fourecks University",
"link": "fourecks",
"courses": (0, 2)}]
groups_raw = [{"name": "Hersheba2019", "year": 0},
{"name": "Hersheba2020", "year": 1},
{"name": "Fourecks2019", "year": 0},
{"name": "Fourecks2020", "year": 1}]
students_raw = [{"name": "Sia Hyde", "group": 0},
{"name": "Wasim Klein", "group": 0},
{"name": "Ella Eastwood", "group": 0},
{"name": "Maha Wilkes", "group": 1},
{"name": "Meg Sutherland", "group": 1},
{"name": "Kya Parsons", "group": 1},
{"name": "Ferne Huff", "group": 2},
{"name": "Jethro Higgs", "group": 2},
{"name": "Prince Knox", "group": 2},
{"name": "Layla Schmitt", "group": 3},
{"name": "Darci Stark", "group": 3},
{"name": "Ezmae Bradford", "group": 3}]
teachers_raw = [{"name": "Eira Buckner", "courses": (0,)},
{"name": "Paul Akhtar", "courses": (1,)},
{"name": "Kristi Todd", "courses": (2,)}]
tasks_raw = [{"title": "Charms | Task 1", "course": 0,
"groups": (0,), "updated_by": 0}]
issues_raw = [{"student": 0, "task": 0}]
# Create object from raw data
print("Create years {}".format(years_raw))
years = [Year.objects.create(start_year=start_year)
for start_year in years_raw]
save_all(years)
print("Create courses {}".format(courses_raw))
courses = [Course.objects.create(name=course["name"],
year=years[course["year"]],
is_active=True)
for course in courses_raw]
save_all(courses)
print("Create schools {}".format(schools_raw))
schools = [School.objects.create(name=school["name"],
link=school["link"])
for school in schools_raw]
save_all(schools)
print("Create groups {}".format(groups_raw))
groups = [Group.objects.create(name=group["name"],
year=years[group["year"]])
for group in groups_raw]
save_all(groups)
print("Create users")
students = []
teachers = []
for user_raw in students_raw + teachers_raw:
last_name, first_name, username = parse_name(user_raw["name"])
user = User.objects.create(username=username)
user.last_name = last_name
user.first_name = first_name
user.set_password(username)
if user_raw in students_raw:
students.append(user)
else:
teachers.append(user)
save_all(students)
save_all(teachers)
print("Create tasks {}".format(tasks_raw))
tasks = [Task.objects.create(title=task["title"],
course=courses[task["course"]])
for task in tasks_raw]
save_all(tasks)
print("Create issues {}".format(issues_raw))
issues = [Issue.objects.create(student=students[issue["student"]],
task=tasks[issue["task"]])
for issue in issues_raw]
save_all(issues)
# Bind objects
print("Bind schools and courses")
for school_id, school in enumerate(schools_raw):
for course_id in school["courses"]:
schools[school_id].courses.add(courses[course_id])
print("Bind courses and groups")
for course_id, course in enumerate(courses_raw):
for group_id in course["groups"]:
courses[course_id].groups.add(groups[group_id])
print("Bind students and groups")
for student_id, student in enumerate(students_raw):
user = students[student_id]
group = groups[student["group"]]
group.students.add(user)
print("Set teachers")
for teacher_id, teacher in enumerate(teachers_raw):
user = teachers[teacher_id]
for course_id in teacher["courses"]:
course = courses[course_id]
course.teachers.add(user)
print("Bind tasks with courses")
for task_id, task in enumerate(tasks_raw):
course = courses[task["course"]]
task = tasks[task_id]
course.task_set.add(task)
print("Bind tasks with groups")
for task_id, task in enumerate(tasks_raw):
task = tasks[task_id]
for group_id in tasks_raw[task_id]["groups"]:
group = groups[group_id]
relation = TaskGroupRelations.objects.create(
task=task, group=group)
relation.save()
print("Completed creating test data.")
|
the-stack_106_30430 | import binascii
from binascii import Error
import json
import pprint
from iroha import block_pb2
import iroha.primitive_pb2 as iroha_primitive
import iroha.queries_pb2 as queries_pb2
from google.protobuf.json_format import MessageToDict, MessageToJson, ParseDict
from iroha import Iroha, IrohaGrpc
from iroha import IrohaCrypto
from .commons import genesis_block
class IrohaUtils:
"""
Iroha helper utilities
"""
def __init__(self):
self.ic = IrohaCrypto
self.iroha = Iroha("admin@test")
def save_keys_to_file(self, account_id):
private_key = self.ic.private_key()
public_key = self.ic.derive_public_key(private_key)
try:
with open(f"{account_id}.priv", "wb+") as private_key_file:
private_key_file.write(private_key)
with open(f"{account_id}.priv", "wb+") as public_key_file:
public_key_file.write(public_key)
return True
except Error as error:
return error
def generate_keypair(self):
private_key = self.ic.private_key()
public_key = str(self.ic.derive_public_key(private_key), "utf-8")
private_key = str(private_key, "utf-8")
key_pair = {"public_key": f"{public_key}", "private_key": f"{private_key}"}
return json.dumps(key_pair, indent=4)
def genesis_tx(self, users, roles, peers, domains, admin_private_key):
genesis_block_unsigned = genesis_block(users, roles, peers, domains)
private_key = bytes(admin_private_key, "utf-8")
print(private_key)
# init_tx = self.iroha.transaction(genesis_block_unsigned)
# tx = self.ic.sign_transaction(init_tx,private_key)
result = {}
# dict_tx = MessageToDict(genesis_block_unsigned)
# block = block_pb2.Block_v1.Payload = dict_tx
return genesis_block_unsigned
|
the-stack_106_30434 | import numpy as np
def calc_radius(width, height):
""" Calculate circumscribed circle radius. """
return np.sqrt(width**2 + height**2)/2
def array_round(array):
""" Round numpy array and convert it to dtype=int """
return np.rint(array).astype(int)
def circle_points(angle, num, radius):
""" Put equidistant points on a circle. """
sample = np.array([0, -radius])
span = np.deg2rad(angle)/2
points = []
# use sample detector to put points on a circle by rotating it
for alpha in np.linspace(-span, span, num=num):
rot_mat = np.array([
[np.cos(alpha), np.sin(alpha)],
[-np.sin(alpha), np.cos(alpha)]
])
points.append(sample.dot(rot_mat))
points = np.array(points)
return array_round(points)
|
the-stack_106_30435 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import six
from sentry.testutils import CliTestCase
from sentry.runner.commands.init import init
class InitTest(CliTestCase):
command = init
def test_simple(self):
with self.runner.isolated_filesystem():
rv = self.invoke('config')
assert rv.exit_code == 0, rv.output
contents = os.listdir('config')
assert set(contents) == {'sentry.conf.py', 'config.yml'}
# Make sure the python file is valid
ctx = {'__file__': 'sentry.conf.py'}
with open('config/sentry.conf.py') as fp:
six.exec_(fp.read(), ctx)
assert 'DEBUG' in ctx
# Make sure the yaml file is valid
from sentry.utils.yaml import safe_load
with open('config/config.yml', 'rb') as fp:
ctx = safe_load(fp)
assert 'system.secret-key' in ctx
def test_no_directory(self):
rv = self.invoke('sentry.conf.py')
assert rv.exit_code != 0, rv.output
|
the-stack_106_30437 | import unittest
from conans.client.generators.text import TXTGenerator
from conans.test.utils.tools import TestServer, TestClient
from conans.test.utils.cpp_test_files import cpp_hello_conan_files
from conans.model.ref import ConanFileReference
from nose.plugins.attrib import attr
from conans.util.files import load
import os
from conans.paths import BUILD_INFO, CONANFILE, BUILD_INFO_CMAKE
import platform
from conans.test.utils.test_files import wait_until_removed
@attr("slow")
class DiamondTest(unittest.TestCase):
def setUp(self):
test_server = TestServer(
[], # write permissions
users={"lasote": "mypass"}) # exported users and passwords
servers = {"default": test_server}
conan = TestClient(servers=servers, users={"default": [("lasote", "mypass")]})
self.diamond_tester = DiamondTester(self, conan, servers)
def diamond_cmake_test(self):
self.diamond_tester.test(use_cmake=True)
def diamond_cmake_targets_test(self):
self.diamond_tester.test(use_cmake=True, cmake_targets=True)
def diamond_default_test(self):
self.diamond_tester.test(use_cmake=False)
class DiamondTester(object):
def __init__(self, test_obj, conan, servers):
self.test_obj = test_obj
self.conan = conan
self.servers = servers
def _export_upload(self, name, version=None, deps=None, use_cmake=True, cmake_targets=False):
files = cpp_hello_conan_files(name, version, deps, need_patch=True, use_cmake=use_cmake,
cmake_targets=cmake_targets)
conan_ref = ConanFileReference(name, version, "lasote", "stable")
self.conan.save(files, clean_first=True)
self.conan.run("export . lasote/stable")
self.conan.run("upload %s" % str(conan_ref))
def _check_individual_deps(self, client):
self.test_obj.assertIn("INCLUDE [", client.user_io.out)
self.test_obj.assertIn(".conan/data/Hello0/0.1/lasote/stable", client.user_io.out)
build_file = os.path.join(client.current_folder, BUILD_INFO)
content = load(build_file)
cmakebuildinfo = load(os.path.join(client.current_folder, BUILD_INFO_CMAKE))
self.test_obj.assertIn("set(CONAN_LIBS helloHello3 helloHello1 helloHello2 helloHello0",
cmakebuildinfo)
self.test_obj.assertIn("set(CONAN_DEPENDENCIES Hello3 Hello1 Hello2 Hello0)", cmakebuildinfo)
deps_cpp_info, _, _ = TXTGenerator.loads(content)
self.test_obj.assertEqual(len(deps_cpp_info.include_paths), 4)
for dep in ("Hello3", "Hello2", "Hello1", "Hello0"):
self.test_obj.assertEqual(len(deps_cpp_info[dep].include_paths), 1)
self.test_obj.assertEqual(len(deps_cpp_info[dep].lib_paths), 1)
self.test_obj.assertEqual(deps_cpp_info[dep].libs, ["hello%s" % dep])
build_file = os.path.join(client.current_folder, BUILD_INFO_CMAKE)
content = load(build_file)
for dep in ("Hello3", "Hello2", "Hello1", "Hello0"):
self.test_obj.assertEqual(len(deps_cpp_info[dep].include_paths), 1)
self.test_obj.assertIn("set(CONAN_INCLUDE_DIRS_%s " % dep.upper(), content)
self.test_obj.assertIn("set(CONAN_LIBS_%s hello%s)" % (dep.upper(), dep), content)
def test(self, install=None, use_cmake=True, cmake_targets=False):
install = install or "install ."
if not use_cmake and platform.system() == "SunOS":
return # If is using sun-cc the gcc generator doesn't work
self._export_upload("Hello0", "0.1", use_cmake=use_cmake, cmake_targets=cmake_targets)
self._export_upload("Hello1", "0.1", ["Hello0/0.1@lasote/stable"], use_cmake=use_cmake,
cmake_targets=cmake_targets)
self._export_upload("Hello2", "0.1", ["Hello0/0.1@lasote/stable"], use_cmake=use_cmake,
cmake_targets=cmake_targets)
self._export_upload("Hello3", "0.1", ["Hello1/0.1@lasote/stable",
"Hello2/0.1@lasote/stable"], use_cmake=use_cmake,
cmake_targets=cmake_targets)
client = TestClient(servers=self.servers, users={"default": [("lasote", "mypass")]}, path_with_spaces=use_cmake)
files3 = cpp_hello_conan_files("Hello4", "0.1", ["Hello3/0.1@lasote/stable"],
use_cmake=use_cmake, cmake_targets=cmake_targets)
# Add some stuff to base project conanfile to test further the individual
# flags in build_info (txt, cmake) files
content = files3[CONANFILE]
content = content.replace("generators =", 'generators = "txt",')
content = content.replace("def build(self):",
"def build(self):\n"
" self.output.info('INCLUDE %s' "
"% self.deps_cpp_info['Hello0'].include_paths)")
files3[CONANFILE] = content
client.save(files3)
client.run("%s --build missing" % install)
if use_cmake:
if cmake_targets:
self.test_obj.assertIn("Conan: Using cmake targets configuration", client.user_io.out)
self.test_obj.assertNotIn("Conan: Using cmake global configuration", client.user_io.out)
else:
self.test_obj.assertIn("Conan: Using cmake global configuration", client.user_io.out)
self.test_obj.assertNotIn("Conan: Using cmake targets configuration", client.user_io.out)
client.run("build .")
self._check_individual_deps(client)
command = os.sep.join([".", "bin", "say_hello"])
client.runner(command, cwd=client.current_folder)
self.test_obj.assertEqual(['Hello Hello4', 'Hello Hello3', 'Hello Hello1', 'Hello Hello0',
'Hello Hello2', 'Hello Hello0'],
str(client.user_io.out).splitlines()[-6:])
files3 = cpp_hello_conan_files("Hello4", "0.1", ["Hello3/0.1@lasote/stable"], language=1,
use_cmake=use_cmake, cmake_targets=cmake_targets)
files3[CONANFILE] = files3[CONANFILE].replace("generators =", 'generators = "txt",')
wait_until_removed(client.current_folder)
client.save(files3)
client.run("%s --build missing" % install)
client.run("build .")
client.runner(command, cwd=client.current_folder)
self.test_obj.assertEqual(['Hola Hello4', 'Hola Hello3', 'Hola Hello1', 'Hola Hello0',
'Hola Hello2', 'Hola Hello0'],
str(client.user_io.out).splitlines()[-6:])
# Try to upload and reuse the binaries
client.run("upload Hello3/0.1@lasote/stable --all")
self.test_obj.assertEqual(str(client.user_io.out).count("Uploading package"), 2)
client.run("upload Hello1/0.1@lasote/stable --all")
self.test_obj.assertEqual(str(client.user_io.out).count("Uploading package"), 2)
client.run("upload Hello2/0.1@lasote/stable --all")
self.test_obj.assertEqual(str(client.user_io.out).count("Uploading package"), 2)
client.run("upload Hello0/0.1@lasote/stable --all")
self.test_obj.assertEqual(str(client.user_io.out).count("Uploading package"), 2)
client2 = TestClient(servers=self.servers, users={"default": [("lasote", "mypass")]}, path_with_spaces=use_cmake)
files3 = cpp_hello_conan_files("Hello4", "0.1", ["Hello3/0.1@lasote/stable"],
use_cmake=use_cmake, cmake_targets=cmake_targets)
files3[CONANFILE] = files3[CONANFILE].replace("generators =", 'generators = "txt",')
client2.save(files3)
client2.run("%s --build missing" % install)
client2.run("build .")
self.test_obj.assertNotIn("libhello0.a", client2.user_io.out)
self.test_obj.assertNotIn("libhello1.a", client2.user_io.out)
self.test_obj.assertNotIn("libhello2.a", client2.user_io.out)
self.test_obj.assertNotIn("libhello3.a", client2.user_io.out)
client2.runner(command, cwd=client2.current_folder)
self.test_obj.assertEqual(['Hello Hello4', 'Hello Hello3', 'Hello Hello1', 'Hello Hello0',
'Hello Hello2', 'Hello Hello0'],
str(client2.user_io.out).splitlines()[-6:])
files3 = cpp_hello_conan_files("Hello4", "0.1", ["Hello3/0.1@lasote/stable"], language=1,
use_cmake=use_cmake, cmake_targets=cmake_targets)
files3[CONANFILE] = files3[CONANFILE].replace("generators =", 'generators = "txt",')
wait_until_removed(client2.current_folder)
client2.save(files3)
client2.run("%s --build missing" % install)
client2.run("build .")
self.test_obj.assertNotIn("libhello0.a", client2.user_io.out)
self.test_obj.assertNotIn("libhello1.a", client2.user_io.out)
self.test_obj.assertNotIn("libhello2.a", client2.user_io.out)
self.test_obj.assertNotIn("libhello3.a", client2.user_io.out)
client2.runner(command, cwd=client2.current_folder)
self.test_obj.assertEqual(['Hola Hello4', 'Hola Hello3', 'Hola Hello1', 'Hola Hello0',
'Hola Hello2', 'Hola Hello0'],
str(client2.user_io.out).splitlines()[-6:])
|
the-stack_106_30438 | from flask import Blueprint, render_template, request, redirect, url_for, flash, session, send_file, abort
from wtforms import Form, StringField, validators, DateTimeField, BooleanField, IntegerField, DateField, SubmitField, FileField, SelectField,TextAreaField,HiddenField
from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileAllowed
from wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError
from src.repos.repo import cRepo
from src.appConfig import getAppConfig
import os
import secrets
from flask_login import login_required,current_user
from src.security.decorators import roles_required
import werkzeug
from src.app.editDocUploadViaForm import editDocUploadViaForm
from src.app.createDocUploadEditForm import createDocUploadEditForm
class docUploadForm(FlaskForm):
regulationName = StringField('Regulation Name',
validators=[DataRequired(), Length(min=2, max=250)])
type = SelectField('Type', choices=[('Regulation', 'Regulation'), ('SOR', 'SOR'), ('Corrigendum', 'Corrigendum')],
validators=[DataRequired()])
amendmentNo = IntegerField('Amendment No', validators=[DataRequired()])
# confirm_password = PasswordField('Confirm Password',
# validators=[DataRequired(), EqualTo('password')])
notificationDate = DateField('Notification Date',
validators=[DataRequired()])
effectiveDate = DateField('Effective Date',
validators=[DataRequired()])
repealDate = DateField('Repeal Date',
validators=[DataRequired()])
keyWordsByAdmin = StringField('Key Words by Admin',
validators=[DataRequired()])
docRefNo = IntegerField('Doc Ref No', validators=[DataRequired()])
uploadPDFFile = FileField('Upload PDF File', validators=[
FileAllowed(['pdf'])])
linkToCERCSitePDF = StringField('Link to CERC Site PDF',
validators=[DataRequired()])
submit = SubmitField('Submit')
class updateKeywordForm(FlaskForm):
keywords_user = TextAreaField('Keywords By User',
validators=[DataRequired(), Length(min=2, max=250)])
docid=HiddenField('DocId')
kid=HiddenField('Kid')
submit = SubmitField('Submit')
# def validate_username(self, username):
# user = User.query.filter_by(username=username.data).first()
# if user:
# raise ValidationError('That username is taken. Please choose a different one.')
# def validate_email(self, email):
# user = User.query.filter_by(email=email.data).first()
# if user:
# raise ValidationError('That email is taken. Please choose a different one.')
docsPage = Blueprint('docs', __name__,
template_folder='templates')
def save_picture(form_picture):
random_hex = secrets.token_hex(8)
# in python if unused varible can be named as _ in order to throw it,slpitext split filename and extension
_, f_ext = os.path.splitext(form_picture.filename)
picture_fn = random_hex + f_ext
# join concatenate aii three value
# form_pdf=secure_filename(picture_fn)
appConf = getAppConfig()
print(appConf['upload_folder'])
picture_path = os.path.join(appConf['upload_folder'], picture_fn)
form_picture.save(picture_path)
return picture_fn
@docsPage.route("/fileUpload", methods=['GET', 'POST'])
@login_required
@roles_required(["a"])
def fileUpload():
form = docUploadForm()
if form.validate_on_submit():
if form.uploadPDFFile.data:
fileName = save_picture(form.uploadPDFFile.data)
appConf = getAppConfig()
cRepo_init = cRepo(appConf['appDbConnStr'])
isInsertSuccess = cRepo_init.insertFileDetails(regulationName=form.regulationName.data, type=form.type.data, amendmentNo=form.amendmentNo.data,
notificationDate=form.notificationDate.data, effectiveDate=form.effectiveDate.data, repealDate=form.repealDate.data, keyWordsByAdmin=form.keyWordsByAdmin.data,
docRefNo=form.docRefNo.data, uploadPDFFile=fileName, linkToCERCSitePDF=form.linkToCERCSitePDF.data)
if isInsertSuccess:
flash('Your Document uploaded successfully!', 'success')
return redirect(url_for('docs.list'))
else:
flash('Document uploading failed!', 'danger')
return render_template('docUpload.html.j2', title='Upload Doc', form=form)
@docsPage.route('/download', defaults={'req_path': ''})
@docsPage.route('/download/<path:req_path>')
@login_required
@roles_required(["a"])
def downloadDocument(req_path):
appConf = getAppConfig()
BASE_DIR = appConf['upload_folder']
# Joining the base and the requested path
abs_path = os.path.join(BASE_DIR, req_path)
# Return 404 if path doesn't exist
if not os.path.exists(abs_path):
return abort(404)
# Check if path is a file and serve
if os.path.isfile(abs_path):
return send_file(abs_path)
else:
return abort(404)
@docsPage.route('/list', methods=['GET'])
@login_required
@roles_required(['a','b'])
def list():
form=updateKeywordForm()
appConf = getAppConfig()
cRepo_init = cRepo(appConf['appDbConnStr'])
#print(current_user.name,current_user.roles)
if current_user.roles=='a':
docDetails= cRepo_init.getList()
else:
docDetails= cRepo_init.getListForUser(current_user.id)
return render_template('list.html.j2', data={'docDetails': docDetails},form=form)
@docsPage.route('/delete/<docId>', methods=['GET', 'POST'])
@login_required
@roles_required(['a'])
def delete(docId: int):
appConf = getAppConfig()
cRepo_init = cRepo(appConf['appDbConnStr'])
doc = cRepo_init.getDocById(docId)
if doc == None:
raise werkzeug.exceptions.NotFound()
if request.method == 'POST':
isSuccess = cRepo_init.deleteDoc(docId)
if isSuccess:
flash('Successfully deleted the document', category='success')
return redirect(url_for('docs.list'))
else:
flash('Could not delete the document', category='error')
return render_template('delete.html.j2', data={'doc': doc})
@docsPage.route('/edit/<docId>', methods=['GET', 'POST'])
@login_required
@roles_required(['a'])
def edit(docId: int):
appConf = getAppConfig()
cRepo_init = cRepo(appConf['appDbConnStr'])
doc = cRepo_init.getDocById(docId)
if doc == None:
raise werkzeug.exceptions.NotFound()
form = docUploadForm()
if request.method == 'POST':
if form.validate_on_submit():
if form.uploadPDFFile.data:
fileName = save_picture(form.uploadPDFFile.data)
isSuccess = editDocUploadViaForm(docId=docId, cRepo=cRepo_init, form=form,doc=doc,fileName=fileName)
if isSuccess:
flash('Successfully edited document details ', category='success')
else:
flash('Could not edit the document details ', category='danger')
return redirect(url_for('docs.list'))
else:
form = createDocUploadEditForm(doc,form)
return render_template('editDocUploadForm.html.j2', form=form)
@docsPage.route('/update', methods=[ 'POST'])
@login_required
@roles_required(['b'])
def updateUKeyword():
appConf = getAppConfig()
cRepo_init = cRepo(appConf['appDbConnStr'])
form =updateKeywordForm()
isSuccess = cRepo_init.updateUserKeyword(keywords_user=form.keywords_user.data,docid=form.docid.data,kid=form.kid.data,userid=current_user.id)
flash('Keyword is updated successfully', category='success')
return redirect(url_for('docs.list'))
|
the-stack_106_30439 | class User():
def __init__(self, nome, apelido, nome_do_usuario, email,
localidade, idade, sexo):
self.nome = nome
self.apelido = apelido
self.nome_do_usuario = nome_do_usuario
self.email = email
self.localidade = localidade
self.idade = idade
self.sexo = sexo
self.nome_completo = f'{self.nome} {self.apelido}'
self.tentativas = 0
def descricao_do_usuario(self):
print(f'Nome do usuario: {self.nome_do_usuario}')
print(f'\tNome completo: {self.nome_completo}')
print(f'\tE-mail: {self.email}')
print(f'\tLocalidade: {self.localidade}')
print(f'\tIdade: {self.idade}')
print(f'\tSexo: {self.sexo}')
def saudacao(self):
print(f'\nSeja bem vindo, {self.nome_do_usuario}')
class Privileges():
def __init__(self):
self.privilegios = ['Pode acessar informacaoes dos usarios',
'Pode adicionar um post',
'Pode banir um usuario.',
'Pode deletar um post.',
'Pode desbanir um usuario.']
def mostra_previlegios(self):
print('\nPrivilegios do Admin:')
for vantagem in self.privilegios:
print(f'\t- {vantagem}')
class Admin(User):
def __init__(self, nome, apelido, nome_do_usuario, email,
localidade, idade, sexo):
super().__init__(nome, apelido, nome_do_usuario, email,
localidade, idade, sexo)
self.privilegios = Privileges()
inicia = Admin('William','Rodrigues','Admin',
'[email protected]','Maputo',
19,'M')
inicia.descricao_do_usuario()
inicia.saudacao()
inicia.privilegios.mostra_previlegios()
|
the-stack_106_30441 | import numpy as np
import pytest
import pandas as pd
from pandas import Int64Index, TimedeltaIndex, timedelta_range
import pandas._testing as tm
from pandas.tseries.offsets import Hour
class TestTimedeltaIndex:
def test_union(self):
i1 = timedelta_range("1day", periods=5)
i2 = timedelta_range("3day", periods=5)
result = i1.union(i2)
expected = timedelta_range("1day", periods=7)
tm.assert_index_equal(result, expected)
i1 = Int64Index(np.arange(0, 20, 2))
i2 = timedelta_range(start="1 day", periods=10, freq="D")
i1.union(i2) # Works
i2.union(i1) # Fails with "AttributeError: can't set attribute"
def test_union_sort_false(self):
tdi = timedelta_range("1day", periods=5)
left = tdi[3:]
right = tdi[:3]
# Check that we are testing the desired code path
assert left._can_fast_union(right)
result = left.union(right)
tm.assert_index_equal(result, tdi)
result = left.union(right, sort=False)
expected = TimedeltaIndex(["4 Days", "5 Days", "1 Days", "2 Day", "3 Days"])
tm.assert_index_equal(result, expected)
def test_union_coverage(self):
idx = TimedeltaIndex(["3d", "1d", "2d"])
ordered = TimedeltaIndex(idx.sort_values(), freq="infer")
result = ordered.union(idx)
tm.assert_index_equal(result, ordered)
result = ordered[:0].union(ordered)
tm.assert_index_equal(result, ordered)
assert result.freq == ordered.freq
def test_union_bug_1730(self):
rng_a = timedelta_range("1 day", periods=4, freq="3H")
rng_b = timedelta_range("1 day", periods=4, freq="4H")
result = rng_a.union(rng_b)
exp = TimedeltaIndex(sorted(set(rng_a) | set(rng_b)))
tm.assert_index_equal(result, exp)
def test_union_bug_1745(self):
left = TimedeltaIndex(["1 day 15:19:49.695000"])
right = TimedeltaIndex(
["2 day 13:04:21.322000", "1 day 15:27:24.873000", "1 day 15:31:05.350000"]
)
result = left.union(right)
exp = TimedeltaIndex(sorted(set(left) | set(right)))
tm.assert_index_equal(result, exp)
def test_union_bug_4564(self):
left = timedelta_range("1 day", "30d")
right = left + pd.offsets.Minute(15)
result = left.union(right)
exp = TimedeltaIndex(sorted(set(left) | set(right)))
tm.assert_index_equal(result, exp)
def test_union_freq_infer(self):
# When taking the union of two TimedeltaIndexes, we infer
# a freq even if the arguments don't have freq. This matches
# DatetimeIndex behavior.
tdi = timedelta_range("1 Day", periods=5)
left = tdi[[0, 1, 3, 4]]
right = tdi[[2, 3, 1]]
assert left.freq is None
assert right.freq is None
result = left.union(right)
tm.assert_index_equal(result, tdi)
assert result.freq == "D"
def test_intersection_bug_1708(self):
index_1 = timedelta_range("1 day", periods=4, freq="h")
index_2 = index_1 + pd.offsets.Hour(5)
with tm.assert_produces_warning(FutureWarning):
result = index_1 & index_2
assert len(result) == 0
index_1 = timedelta_range("1 day", periods=4, freq="h")
index_2 = index_1 + pd.offsets.Hour(1)
with tm.assert_produces_warning(FutureWarning):
result = index_1 & index_2
expected = timedelta_range("1 day 01:00:00", periods=3, freq="h")
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
def test_intersection_equal(self, sort):
# GH 24471 Test intersection outcome given the sort keyword
# for equal indicies intersection should return the original index
first = timedelta_range("1 day", periods=4, freq="h")
second = timedelta_range("1 day", periods=4, freq="h")
intersect = first.intersection(second, sort=sort)
if sort is None:
tm.assert_index_equal(intersect, second.sort_values())
assert tm.equalContents(intersect, second)
# Corner cases
inter = first.intersection(first, sort=sort)
assert inter is first
@pytest.mark.parametrize("period_1, period_2", [(0, 4), (4, 0)])
def test_intersection_zero_length(self, period_1, period_2, sort):
# GH 24471 test for non overlap the intersection should be zero length
index_1 = timedelta_range("1 day", periods=period_1, freq="h")
index_2 = timedelta_range("1 day", periods=period_2, freq="h")
expected = timedelta_range("1 day", periods=0, freq="h")
result = index_1.intersection(index_2, sort=sort)
tm.assert_index_equal(result, expected)
def test_zero_length_input_index(self, sort):
# GH 24966 test for 0-len intersections are copied
index_1 = timedelta_range("1 day", periods=0, freq="h")
index_2 = timedelta_range("1 day", periods=3, freq="h")
result = index_1.intersection(index_2, sort=sort)
assert index_1 is not result
assert index_2 is not result
tm.assert_copy(result, index_1)
@pytest.mark.parametrize(
"rng, expected",
# if target has the same name, it is preserved
[
(
timedelta_range("1 day", periods=5, freq="h", name="idx"),
timedelta_range("1 day", periods=4, freq="h", name="idx"),
),
# if target name is different, it will be reset
(
timedelta_range("1 day", periods=5, freq="h", name="other"),
timedelta_range("1 day", periods=4, freq="h", name=None),
),
# if no overlap exists return empty index
(
timedelta_range("1 day", periods=10, freq="h", name="idx")[5:],
TimedeltaIndex([], freq="h", name="idx"),
),
],
)
def test_intersection(self, rng, expected, sort):
# GH 4690 (with tz)
base = timedelta_range("1 day", periods=4, freq="h", name="idx")
result = base.intersection(rng, sort=sort)
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == expected.freq
@pytest.mark.parametrize(
"rng, expected",
# part intersection works
[
(
TimedeltaIndex(["5 hour", "2 hour", "4 hour", "9 hour"], name="idx"),
TimedeltaIndex(["2 hour", "4 hour"], name="idx"),
),
# reordered part intersection
(
TimedeltaIndex(["2 hour", "5 hour", "5 hour", "1 hour"], name="other"),
TimedeltaIndex(["1 hour", "2 hour"], name=None),
),
# reversed index
(
TimedeltaIndex(["1 hour", "2 hour", "4 hour", "3 hour"], name="idx")[
::-1
],
TimedeltaIndex(["1 hour", "2 hour", "4 hour", "3 hour"], name="idx"),
),
],
)
def test_intersection_non_monotonic(self, rng, expected, sort):
# 24471 non-monotonic
base = TimedeltaIndex(["1 hour", "2 hour", "4 hour", "3 hour"], name="idx")
result = base.intersection(rng, sort=sort)
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
assert result.name == expected.name
# if reversed order, frequency is still the same
if all(base == rng[::-1]) and sort is None:
assert isinstance(result.freq, Hour)
else:
assert result.freq is None
class TestTimedeltaIndexDifference:
def test_difference_freq(self, sort):
# GH14323: Difference of TimedeltaIndex should not preserve frequency
index = timedelta_range("0 days", "5 days", freq="D")
other = timedelta_range("1 days", "4 days", freq="D")
expected = TimedeltaIndex(["0 days", "5 days"], freq=None)
idx_diff = index.difference(other, sort)
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal("freq", idx_diff, expected)
other = timedelta_range("2 days", "5 days", freq="D")
idx_diff = index.difference(other, sort)
expected = TimedeltaIndex(["0 days", "1 days"], freq=None)
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal("freq", idx_diff, expected)
def test_difference_sort(self, sort):
index = TimedeltaIndex(
["5 days", "3 days", "2 days", "4 days", "1 days", "0 days"]
)
other = timedelta_range("1 days", "4 days", freq="D")
idx_diff = index.difference(other, sort)
expected = TimedeltaIndex(["5 days", "0 days"], freq=None)
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal("freq", idx_diff, expected)
other = timedelta_range("2 days", "5 days", freq="D")
idx_diff = index.difference(other, sort)
expected = TimedeltaIndex(["1 days", "0 days"], freq=None)
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal("freq", idx_diff, expected)
|
the-stack_106_30442 | #!/usr/bin/env python3
import numpy as np
import tensorflow as tf
model = __import__('15-model').model
def one_hot(Y, classes):
"""convert an array to a one-hot matrix"""
oh = np.zeros((Y.shape[0], classes))
oh[np.arange(Y.shape[0]), Y] = 1
return oh
if __name__ == '__main__':
lib= np.load('../data/MNIST.npz')
X_train_3D = lib['X_train']
Y_train = lib['Y_train']
X_train = X_train_3D.reshape((X_train_3D.shape[0], -1))
Y_train_oh = one_hot(Y_train, 10)
X_valid_3D = lib['X_valid']
Y_valid = lib['Y_valid']
X_valid = X_valid_3D.reshape((X_valid_3D.shape[0], -1))
Y_valid_oh = one_hot(Y_valid, 10)
layer_sizes = [256, 256, 10]
activations = [tf.nn.tanh, tf.nn.tanh, None]
np.random.seed(0)
tf.set_random_seed(0)
save_path = model((X_train, Y_train_oh), (X_valid, Y_valid_oh), layer_sizes,
activations, save_path='./model.ckpt')
print('Model saved in path: {}'.format(save_path)) |
the-stack_106_30447 | import os
import numpy as np
import pandas as pd
import argparse
import subprocess
from pathlib import Path
from itertools import product
import logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
def get_args_from_command_line():
"""Parse the command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument("--country_code", type=str, default='US')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = get_args_from_command_line()
# Load tweets
path_to_tweets = os.path.join(
'/scratch/mt4493/twitter_labor/twitter-labor-data/data/random_samples/random_samples_splitted',
args.country_code,
'evaluation') # Random set of tweets
tweets = pd.concat([pd.read_parquet(path) for path in Path(path_to_tweets).glob('*.parquet')])
tweets = tweets[['tweet_id', 'text']]
# model_folder_list = ['iter_0-convbert-969622-evaluation', 'iter_1-convbert-3050798-evaluation',
# 'iter_2-convbert-3134867-evaluation',
# 'iter_3-convbert-3174249-evaluation', 'iter_4-convbert-3297962-evaluation',
# 'iter_0-convbert-969622-evaluation',
# 'iter_1-convbert_adaptive-5612019-evaluation', 'iter_2-convbert_adaptive-5972342-evaluation',
# 'iter_3-convbert_adaptive-5998181-evaluation', 'iter_4-convbert_adaptive-6057405-evaluation']
# model_folder_list = ['iter_1-convbert_uncertainty-6200469-evaluation',
# 'iter_2-convbert_uncertainty-6253253-evaluation',
# 'iter_3-convbert_uncertainty-6318280-evaluation',
# ]
# model_folder_list = ['iter_4-convbert_uncertainty-6423646-evaluation']
# model_folder_list = ['iter_1-convbert_uncertainty_uncalibrated-6480837-evaluation',
# 'iter_2-convbert_uncertainty_uncalibrated-6578026-evaluation',
# 'iter_3-convbert_uncertainty_uncalibrated-6596620-evaluation']
# model_folder_list = ['iter_4-convbert_uncertainty_uncalibrated-6653849-evaluation']
model_folder_dict = {'MX': ['iter_0-beto-3201262-evaluation', 'iter_1-beto-3741011-evaluation',
'iter_2-beto-4141605-evaluation',
'iter_3-beto-4379208-evaluation', 'iter_4-beto-4608158-evaluation'],
'BR': ['iter_0-bertimbau-2877651-evaluation', 'iter_1-bertimbau-3774133-evaluation',
'iter_2-bertimbau-4180985-evaluation', 'iter_3-bertimbau-4518774-evaluation',
'iter_4-bertimbau-4688729-evaluation']}
model_folder_list = model_folder_dict[args.country_code]
for model_folder in model_folder_list:
logger.info(f'Folder: {model_folder}')
path_to_evals = os.path.join(
'/scratch/mt4493/twitter_labor/twitter-labor-data/data/active_learning/evaluation_inference',
args.country_code, model_folder) # Where to store the sampled tweets to be labeled
if not os.path.exists(path_to_evals):
os.makedirs(path_to_evals)
for label in ['is_hired_1mo', 'lost_job_1mo', 'is_unemployed', 'job_search', 'job_offer']:
logger.info(f'Class: {label}')
path_to_scores = os.path.join('/scratch/mt4493/twitter_labor/twitter-labor-data/data/inference',
args.country_code, model_folder, 'output',
label) # Prediction scores from classification
scores = pd.concat([pd.read_parquet(path) for path in Path(path_to_scores).glob('*.parquet')]).reset_index()
scores['rank'] = scores['score'].rank(method='first', ascending=False)
scores = scores[scores['rank'].between(21, 50)]
df = tweets.merge(scores, on=['tweet_id'])
df = df.sort_values(by=['rank'], ascending=True).reset_index(drop=True)
output_path = os.path.join(path_to_evals, f'extra_{label}.csv')
df.to_csv(output_path, index=False)
|
the-stack_106_30448 | import json
import sys
json_file = sys.argv[1]
jsons = []
jsons_parsed = []
with open(json_file) as j:
for line in j.readlines():
jsons.append(line.rstrip('\n'))
for obj in jsons:
jsons_parsed.append(json.loads(obj))
tweets = []
for obj in jsons_parsed:
text = obj['tweet']
tweets.append(text)
with open('tweets.txt', 'w') as output:
for tweet in tweets:
output.write(tweet + '\n')
|
the-stack_106_30449 | import json
import base64
import requests
from django.conf import settings
from future.moves.urllib.parse import urlencode
from qbosdk import UnauthorizedClientError, NotFoundClientError, WrongParamsError, InternalServerError
def generate_qbo_refresh_token(authorization_code: str) -> str:
"""
Generate QBO refresh token from authorization code
"""
api_data = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': settings.QBO_REDIRECT_URI
}
auth = '{0}:{1}'.format(settings.QBO_CLIENT_ID, settings.QBO_CLIENT_SECRET)
auth = base64.b64encode(auth.encode('utf-8'))
request_header = {
'Accept': 'application/json',
'Content-type': 'application/x-www-form-urlencoded',
'Authorization': 'Basic {0}'.format(
str(auth.decode())
)
}
token_url = settings.QBO_TOKEN_URI
response = requests.post(url=token_url, data=urlencode(api_data), headers=request_header)
if response.status_code == 200:
return json.loads(response.text)['refresh_token']
elif response.status_code == 401:
raise UnauthorizedClientError('Wrong client secret or/and refresh token', response.text)
elif response.status_code == 404:
raise NotFoundClientError('Client ID doesn\'t exist', response.text)
elif response.status_code == 400:
raise WrongParamsError('Some of the parameters were wrong', response.text)
elif response.status_code == 500:
raise InternalServerError('Internal server error', response.text)
|
the-stack_106_30450 | #!/usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2008-2014, David P. D. Moss. All rights reserved.
#
# Released under the BSD license. See the LICENSE file for details.
#-----------------------------------------------------------------------------
"""Runs all netaddr unit tests."""
from os.path import abspath, basename, dirname, join as pathjoin
import sys
import glob
import doctest
import unittest
sys.path.insert(0, abspath(pathjoin(dirname(__file__), '..', '..')))
#-----------------------------------------------------------------------------
def test_suite_all():
test_dirs = [
'ip',
'eui',
'strategy',
'core'
]
base_path = abspath(pathjoin(dirname(__file__), '..'))
# Select tests based on the version of the Python interpreter.
py_ver_dir = '2.x'
if sys.version_info[0] == 3:
py_ver_dir = '3.x'
# Gather list of files containing tests.
test_files = []
for entry in test_dirs:
test_path = pathjoin(base_path, "tests", py_ver_dir, entry, "*.txt")
files = glob.glob(test_path)
test_files.extend(files)
sys.stdout.write('testdir: %s\n' % '\n'.join(test_files))
# Add anything to the skiplist that we want to leave out.
skiplist = []
# Drop platform specific tests for other platforms.
platform_tests = ['platform_darwin.txt', 'platform_linux2.txt', 'platform_win32.txt']
for platform_test in platform_tests:
if not sys.platform in platform_test:
skiplist.append(platform_test)
# Exclude any entries from the skip list.
test_files = [t for t in test_files if basename(t) not in skiplist]
# Build and return a complete unittest test suite.
suite = unittest.TestSuite()
for test_file in test_files:
doctest_suite = doctest.DocFileSuite(test_file,
optionflags=doctest.ELLIPSIS, module_relative=False)
suite.addTest(doctest_suite)
return suite
#-----------------------------------------------------------------------------
def run():
runner = unittest.TextTestRunner()
return runner.run(test_suite_all())
#-----------------------------------------------------------------------------
if __name__ == "__main__":
result = run()
sys.exit(not result.wasSuccessful())
|
the-stack_106_30454 | import json
import logging
from typing import Dict, Optional
from moto.iam.policy_validation import IAMPolicyDocumentValidator
from moto.secretsmanager import models as secretsmanager_models
from moto.secretsmanager.exceptions import SecretNotFoundException
from moto.secretsmanager.models import SecretsManagerBackend, secretsmanager_backends
from moto.secretsmanager.responses import SecretsManagerResponse
from localstack.aws.api import RequestContext, ServiceResponse, handler
from localstack.aws.api.secretsmanager import (
CancelRotateSecretRequest,
CancelRotateSecretResponse,
CreateSecretRequest,
CreateSecretResponse,
DeleteResourcePolicyRequest,
DeleteResourcePolicyResponse,
DeleteSecretRequest,
DeleteSecretResponse,
DescribeSecretRequest,
DescribeSecretResponse,
GetResourcePolicyRequest,
GetResourcePolicyResponse,
GetSecretValueRequest,
GetSecretValueResponse,
ListSecretVersionIdsRequest,
ListSecretVersionIdsResponse,
PutResourcePolicyRequest,
PutResourcePolicyResponse,
PutSecretValueRequest,
PutSecretValueResponse,
RemoveRegionsFromReplicationRequest,
RemoveRegionsFromReplicationResponse,
ReplicateSecretToRegionsRequest,
ReplicateSecretToRegionsResponse,
RestoreSecretRequest,
RestoreSecretResponse,
RotateSecretRequest,
RotateSecretResponse,
SecretsmanagerApi,
StopReplicationToReplicaRequest,
StopReplicationToReplicaResponse,
TagResourceRequest,
UntagResourceRequest,
UpdateSecretRequest,
UpdateSecretResponse,
UpdateSecretVersionStageRequest,
UpdateSecretVersionStageResponse,
ValidateResourcePolicyRequest,
ValidateResourcePolicyResponse,
)
from localstack.constants import TEST_AWS_ACCOUNT_ID
from localstack.services.moto import call_moto, call_moto_with_request
from localstack.utils.aws import aws_stack
from localstack.utils.common import to_str
from localstack.utils.patch import patch
from localstack.utils.strings import short_uid
LOG = logging.getLogger(__name__)
# maps key names to ARNs
SECRET_ARN_STORAGE = {}
class SecretsmanagerProvider(SecretsmanagerApi):
def __init__(self):
super().__init__()
apply_patches()
@staticmethod
def _transform_context_secret_id(context: RequestContext) -> Optional[Dict]:
# If secret ARN ends with "-<randomId>" this is removed from the request for upstream compatibility.
data = json.loads(to_str(context.request.data or "{}"))
secret_id = data.get("SecretId", None)
if secret_id and ":" in secret_id:
arn = aws_stack.parse_arn(secret_id)
aws_region = aws_stack.get_region()
if arn["region"] != aws_region:
LOG.info(f'Expected request region "{aws_region}" for secret "{secret_id}"')
resource_id = arn["resource"].split(":")[-1]
if resource_id[-7] == "-":
data["SecretId"] = resource_id[:-7]
elif resource_id[-1] != "-":
data["SecretId"] += "-"
return data
return None
@staticmethod
def _call_moto_with_request_secret_id(context: RequestContext) -> ServiceResponse:
data_dict = SecretsmanagerProvider._transform_context_secret_id(context)
return call_moto_with_request(context, data_dict) if data_dict else call_moto(context)
@handler("CancelRotateSecret", expand=False)
def cancel_rotate_secret(
self, context: RequestContext, request: CancelRotateSecretRequest
) -> CancelRotateSecretResponse:
return self._call_moto_with_request_secret_id(context)
@handler("CreateSecret", expand=False)
def create_secret(
self, context: RequestContext, request: CreateSecretRequest
) -> CreateSecretResponse:
return self._call_moto_with_request_secret_id(context)
@handler("DeleteResourcePolicy", expand=False)
def delete_resource_policy(
self, context: RequestContext, request: DeleteResourcePolicyRequest
) -> DeleteResourcePolicyResponse:
return self._call_moto_with_request_secret_id(context)
@handler("DeleteSecret", expand=False)
def delete_secret(
self, context: RequestContext, request: DeleteSecretRequest
) -> DeleteSecretResponse:
return self._call_moto_with_request_secret_id(context)
@handler("DescribeSecret", expand=False)
def describe_secret(
self, context: RequestContext, request: DescribeSecretRequest
) -> DescribeSecretResponse:
return self._call_moto_with_request_secret_id(context)
@handler("GetResourcePolicy", expand=False)
def get_resource_policy(
self, context: RequestContext, request: GetResourcePolicyRequest
) -> GetResourcePolicyResponse:
return self._call_moto_with_request_secret_id(context)
@handler("GetSecretValue", expand=False)
def get_secret_value(
self, context: RequestContext, request: GetSecretValueRequest
) -> GetSecretValueResponse:
return self._call_moto_with_request_secret_id(context)
@handler("ListSecretVersionIds", expand=False)
def list_secret_version_ids(
self, context: RequestContext, request: ListSecretVersionIdsRequest
) -> ListSecretVersionIdsResponse:
return self._call_moto_with_request_secret_id(context)
@handler("PutResourcePolicy", expand=False)
def put_resource_policy(
self, context: RequestContext, request: PutResourcePolicyRequest
) -> PutResourcePolicyResponse:
return self._call_moto_with_request_secret_id(context)
@handler("PutSecretValue", expand=False)
def put_secret_value(
self, context: RequestContext, request: PutSecretValueRequest
) -> PutSecretValueResponse:
return self._call_moto_with_request_secret_id(context)
@handler("RemoveRegionsFromReplication", expand=False)
def remove_regions_from_replication(
self, context: RequestContext, request: RemoveRegionsFromReplicationRequest
) -> RemoveRegionsFromReplicationResponse:
return self._call_moto_with_request_secret_id(context)
@handler("ReplicateSecretToRegions", expand=False)
def replicate_secret_to_regions(
self, context: RequestContext, request: ReplicateSecretToRegionsRequest
) -> ReplicateSecretToRegionsResponse:
return self._call_moto_with_request_secret_id(context)
@handler("RestoreSecret", expand=False)
def restore_secret(
self, context: RequestContext, request: RestoreSecretRequest
) -> RestoreSecretResponse:
return self._call_moto_with_request_secret_id(context)
@handler("RotateSecret", expand=False)
def rotate_secret(
self, context: RequestContext, request: RotateSecretRequest
) -> RotateSecretResponse:
return self._call_moto_with_request_secret_id(context)
@handler("StopReplicationToReplica", expand=False)
def stop_replication_to_replica(
self, context: RequestContext, request: StopReplicationToReplicaRequest
) -> StopReplicationToReplicaResponse:
return self._call_moto_with_request_secret_id(context)
@handler("TagResource", expand=False)
def tag_resource(self, context: RequestContext, request: TagResourceRequest) -> None:
self._call_moto_with_request_secret_id(context)
@handler("UntagResource", expand=False)
def untag_resource(self, context: RequestContext, request: UntagResourceRequest) -> None:
self._call_moto_with_request_secret_id(context)
@handler("UpdateSecret", expand=False)
def update_secret(
self, context: RequestContext, request: UpdateSecretRequest
) -> UpdateSecretResponse:
return self._call_moto_with_request_secret_id(context)
@handler("UpdateSecretVersionStage", expand=False)
def update_secret_version_stage(
self, context: RequestContext, request: UpdateSecretVersionStageRequest
) -> UpdateSecretVersionStageResponse:
return self._call_moto_with_request_secret_id(context)
@handler("ValidateResourcePolicy", expand=False)
def validate_resource_policy(
self, context: RequestContext, request: ValidateResourcePolicyRequest
) -> ValidateResourcePolicyResponse:
return self._call_moto_with_request_secret_id(context)
def secretsmanager_models_secret_arn(region, secret_id):
k = f"{region}_{secret_id}"
if k not in SECRET_ARN_STORAGE:
id_string = short_uid()[:6]
arn = aws_stack.secretsmanager_secret_arn(
secret_id, account_id=TEST_AWS_ACCOUNT_ID, region_name=region, random_suffix=id_string
)
SECRET_ARN_STORAGE[k] = arn
return SECRET_ARN_STORAGE[k]
# patching resource policy in moto
def get_resource_policy_model(self, secret_id):
if self._is_valid_identifier(secret_id):
result = {
"ARN": self.secrets[secret_id].arn,
"Name": self.secrets[secret_id].secret_id,
}
policy = getattr(self.secrets[secret_id], "policy", None)
if policy:
result["ResourcePolicy"] = json.dumps(policy)
return json.dumps(result)
else:
raise SecretNotFoundException()
def get_resource_policy_response(self):
secret_id = self._get_param("SecretId")
return secretsmanager_backends[self.region].get_resource_policy(secret_id=secret_id)
def delete_resource_policy_model(self, secret_id):
if self._is_valid_identifier(secret_id):
self.secrets[secret_id].policy = None
return json.dumps(
{
"ARN": self.secrets[secret_id].arn,
"Name": self.secrets[secret_id].secret_id,
}
)
else:
raise SecretNotFoundException()
def delete_resource_policy_response(self):
secret_id = self._get_param("SecretId")
return secretsmanager_backends[self.region].delete_resource_policy(secret_id=secret_id)
def put_resource_policy_model(self, secret_id, resource_policy):
policy_validator = IAMPolicyDocumentValidator(resource_policy)
policy_validator._validate_top_elements()
policy_validator._validate_version_syntax()
if self._is_valid_identifier(secret_id):
self.secrets[secret_id].policy = resource_policy
return json.dumps(
{
"ARN": self.secrets[secret_id].arn,
"Name": self.secrets[secret_id].secret_id,
}
)
else:
raise SecretNotFoundException()
def put_resource_policy_response(self):
secret_id = self._get_param("SecretId")
resource_policy = self._get_param("ResourcePolicy")
return secretsmanager_backends[self.region].put_resource_policy(
secret_id=secret_id, resource_policy=json.loads(resource_policy)
)
def apply_patches():
secretsmanager_models.secret_arn = secretsmanager_models_secret_arn
setattr(SecretsManagerBackend, "get_resource_policy", get_resource_policy_model)
setattr(SecretsManagerResponse, "get_resource_policy", get_resource_policy_response)
if not hasattr(SecretsManagerBackend, "delete_resource_policy"):
SecretsManagerBackend.delete_resource_policy = delete_resource_policy_model
if not hasattr(SecretsManagerResponse, "delete_resource_policy"):
SecretsManagerResponse.delete_resource_policy = delete_resource_policy_response
if not hasattr(SecretsManagerBackend, "put_resource_policy"):
SecretsManagerBackend.put_resource_policy = put_resource_policy_model
if not hasattr(SecretsManagerResponse, "put_resource_policy"):
SecretsManagerResponse.put_resource_policy = put_resource_policy_response
@patch(SecretsManagerBackend.rotate_secret)
def rotate_secret(fn, self, secret_id, rotation_lambda_arn=None, *args, **kwargs):
# make sure we're passing empty rotation Lambda ARN, to avoid ResourceNotFoundException in moto
# TODO - should think about adding an enhanced patch that calls Lambda functions from lambda_api.py
return fn(self, secret_id, rotation_lambda_arn=None, *args, **kwargs)
|
the-stack_106_30456 | """Support classes for automated testing.
* `AsyncTestCase` and `AsyncHTTPTestCase`: Subclasses of unittest.TestCase
with additional support for testing asynchronous (`.IOLoop`-based) code.
* `ExpectLog`: Make test logs less spammy.
* `main()`: A simple test runner (wrapper around unittest.main()) with support
for the tornado.autoreload module to rerun the tests when code changes.
"""
from __future__ import absolute_import, division, print_function
try:
from tornado import gen
from tornado.httpclient import AsyncHTTPClient
from tornado.httpserver import HTTPServer
from tornado.simple_httpclient import SimpleAsyncHTTPClient
from tornado.ioloop import IOLoop, TimeoutError
from tornado import netutil
from tornado.process import Subprocess
except ImportError:
# These modules are not importable on app engine. Parts of this module
# won't work, but e.g. main() will.
AsyncHTTPClient = None # type: ignore
gen = None # type: ignore
HTTPServer = None # type: ignore
IOLoop = None # type: ignore
netutil = None # type: ignore
SimpleAsyncHTTPClient = None # type: ignore
Subprocess = None # type: ignore
from tornado.log import app_log
from tornado.stack_context import ExceptionStackContext
from tornado.util import raise_exc_info, basestring_type, PY3
import functools
import inspect
import logging
import os
import re
import signal
import socket
import sys
try:
import asyncio
except ImportError:
asyncio = None
try:
from collections.abc import Generator as GeneratorType # type: ignore
except ImportError:
from types import GeneratorType # type: ignore
if sys.version_info >= (3, 5):
iscoroutine = inspect.iscoroutine # type: ignore
iscoroutinefunction = inspect.iscoroutinefunction # type: ignore
else:
iscoroutine = iscoroutinefunction = lambda f: False
# Tornado's own test suite requires the updated unittest module
# (either py27+ or unittest2) so tornado.test.util enforces
# this requirement, but for other users of tornado.testing we want
# to allow the older version if unitest2 is not available.
if PY3:
# On python 3, mixing unittest2 and unittest (including doctest)
# doesn't seem to work, so always use unittest.
import unittest
else:
# On python 2, prefer unittest2 when available.
try:
import unittest2 as unittest # type: ignore
except ImportError:
import unittest # type: ignore
if asyncio is None:
_NON_OWNED_IOLOOPS = ()
else:
import tornado.platform.asyncio
_NON_OWNED_IOLOOPS = tornado.platform.asyncio.AsyncIOMainLoop
def bind_unused_port(reuse_port=False):
"""Binds a server socket to an available port on localhost.
Returns a tuple (socket, port).
.. versionchanged:: 4.4
Always binds to ``127.0.0.1`` without resolving the name
``localhost``.
"""
sock = netutil.bind_sockets(None, '127.0.0.1', family=socket.AF_INET,
reuse_port=reuse_port)[0]
port = sock.getsockname()[1]
return sock, port
def get_async_test_timeout():
"""Get the global timeout setting for async tests.
Returns a float, the timeout in seconds.
.. versionadded:: 3.1
"""
try:
return float(os.environ.get('ASYNC_TEST_TIMEOUT'))
except (ValueError, TypeError):
return 5
class _TestMethodWrapper(object):
"""Wraps a test method to raise an error if it returns a value.
This is mainly used to detect undecorated generators (if a test
method yields it must use a decorator to consume the generator),
but will also detect other kinds of return values (these are not
necessarily errors, but we alert anyway since there is no good
reason to return a value from a test).
"""
def __init__(self, orig_method):
self.orig_method = orig_method
def __call__(self, *args, **kwargs):
result = self.orig_method(*args, **kwargs)
if isinstance(result, GeneratorType) or iscoroutine(result):
raise TypeError("Generator and coroutine test methods should be"
" decorated with tornado.testing.gen_test")
elif result is not None:
raise ValueError("Return value from test method ignored: %r" %
result)
def __getattr__(self, name):
"""Proxy all unknown attributes to the original method.
This is important for some of the decorators in the `unittest`
module, such as `unittest.skipIf`.
"""
return getattr(self.orig_method, name)
class AsyncTestCase(unittest.TestCase):
"""`~unittest.TestCase` subclass for testing `.IOLoop`-based
asynchronous code.
The unittest framework is synchronous, so the test must be
complete by the time the test method returns. This means that
asynchronous code cannot be used in quite the same way as usual.
To write test functions that use the same ``yield``-based patterns
used with the `tornado.gen` module, decorate your test methods
with `tornado.testing.gen_test` instead of
`tornado.gen.coroutine`. This class also provides the `stop()`
and `wait()` methods for a more manual style of testing. The test
method itself must call ``self.wait()``, and asynchronous
callbacks should call ``self.stop()`` to signal completion.
By default, a new `.IOLoop` is constructed for each test and is available
as ``self.io_loop``. If the code being tested requires a
global `.IOLoop`, subclasses should override `get_new_ioloop` to return it.
The `.IOLoop`'s ``start`` and ``stop`` methods should not be
called directly. Instead, use `self.stop <stop>` and `self.wait
<wait>`. Arguments passed to ``self.stop`` are returned from
``self.wait``. It is possible to have multiple ``wait``/``stop``
cycles in the same test.
Example::
# This test uses coroutine style.
class MyTestCase(AsyncTestCase):
@tornado.testing.gen_test
def test_http_fetch(self):
client = AsyncHTTPClient()
response = yield client.fetch("http://www.tornadoweb.org")
# Test contents of response
self.assertIn("FriendFeed", response.body)
# This test uses argument passing between self.stop and self.wait.
class MyTestCase2(AsyncTestCase):
def test_http_fetch(self):
client = AsyncHTTPClient()
client.fetch("http://www.tornadoweb.org/", self.stop)
response = self.wait()
# Test contents of response
self.assertIn("FriendFeed", response.body)
# This test uses an explicit callback-based style.
class MyTestCase3(AsyncTestCase):
def test_http_fetch(self):
client = AsyncHTTPClient()
client.fetch("http://www.tornadoweb.org/", self.handle_fetch)
self.wait()
def handle_fetch(self, response):
# Test contents of response (failures and exceptions here
# will cause self.wait() to throw an exception and end the
# test).
# Exceptions thrown here are magically propagated to
# self.wait() in test_http_fetch() via stack_context.
self.assertIn("FriendFeed", response.body)
self.stop()
"""
def __init__(self, methodName='runTest'):
super(AsyncTestCase, self).__init__(methodName)
self.__stopped = False
self.__running = False
self.__failure = None
self.__stop_args = None
self.__timeout = None
# It's easy to forget the @gen_test decorator, but if you do
# the test will silently be ignored because nothing will consume
# the generator. Replace the test method with a wrapper that will
# make sure it's not an undecorated generator.
setattr(self, methodName, _TestMethodWrapper(getattr(self, methodName)))
def setUp(self):
super(AsyncTestCase, self).setUp()
self.io_loop = self.get_new_ioloop()
self.io_loop.make_current()
def tearDown(self):
# Clean up Subprocess, so it can be used again with a new ioloop.
Subprocess.uninitialize()
self.io_loop.clear_current()
if not isinstance(self.io_loop, _NON_OWNED_IOLOOPS):
# Try to clean up any file descriptors left open in the ioloop.
# This avoids leaks, especially when tests are run repeatedly
# in the same process with autoreload (because curl does not
# set FD_CLOEXEC on its file descriptors)
self.io_loop.close(all_fds=True)
super(AsyncTestCase, self).tearDown()
# In case an exception escaped or the StackContext caught an exception
# when there wasn't a wait() to re-raise it, do so here.
# This is our last chance to raise an exception in a way that the
# unittest machinery understands.
self.__rethrow()
def get_new_ioloop(self):
"""Returns the `.IOLoop` to use for this test.
By default, a new `.IOLoop` is created for each test.
Subclasses may override this method to return
`.IOLoop.current()` if it is not appropriate to use a new
`.IOLoop` in each tests (for example, if there are global
singletons using the default `.IOLoop`) or if a per-test event
loop is being provided by another system (such as
``pytest-asyncio``).
"""
return IOLoop()
def _handle_exception(self, typ, value, tb):
if self.__failure is None:
self.__failure = (typ, value, tb)
else:
app_log.error("multiple unhandled exceptions in test",
exc_info=(typ, value, tb))
self.stop()
return True
def __rethrow(self):
if self.__failure is not None:
failure = self.__failure
self.__failure = None
raise_exc_info(failure)
def run(self, result=None):
with ExceptionStackContext(self._handle_exception):
super(AsyncTestCase, self).run(result)
# As a last resort, if an exception escaped super.run() and wasn't
# re-raised in tearDown, raise it here. This will cause the
# unittest run to fail messily, but that's better than silently
# ignoring an error.
self.__rethrow()
def stop(self, _arg=None, **kwargs):
"""Stops the `.IOLoop`, causing one pending (or future) call to `wait()`
to return.
Keyword arguments or a single positional argument passed to `stop()` are
saved and will be returned by `wait()`.
"""
assert _arg is None or not kwargs
self.__stop_args = kwargs or _arg
if self.__running:
self.io_loop.stop()
self.__running = False
self.__stopped = True
def wait(self, condition=None, timeout=None):
"""Runs the `.IOLoop` until stop is called or timeout has passed.
In the event of a timeout, an exception will be thrown. The
default timeout is 5 seconds; it may be overridden with a
``timeout`` keyword argument or globally with the
``ASYNC_TEST_TIMEOUT`` environment variable.
If ``condition`` is not None, the `.IOLoop` will be restarted
after `stop()` until ``condition()`` returns true.
.. versionchanged:: 3.1
Added the ``ASYNC_TEST_TIMEOUT`` environment variable.
"""
if timeout is None:
timeout = get_async_test_timeout()
if not self.__stopped:
if timeout:
def timeout_func():
try:
raise self.failureException(
'Async operation timed out after %s seconds' %
timeout)
except Exception:
self.__failure = sys.exc_info()
self.stop()
self.__timeout = self.io_loop.add_timeout(self.io_loop.time() + timeout,
timeout_func)
while True:
self.__running = True
self.io_loop.start()
if (self.__failure is not None or
condition is None or condition()):
break
if self.__timeout is not None:
self.io_loop.remove_timeout(self.__timeout)
self.__timeout = None
assert self.__stopped
self.__stopped = False
self.__rethrow()
result = self.__stop_args
self.__stop_args = None
return result
class AsyncHTTPTestCase(AsyncTestCase):
"""A test case that starts up an HTTP server.
Subclasses must override `get_app()`, which returns the
`tornado.web.Application` (or other `.HTTPServer` callback) to be tested.
Tests will typically use the provided ``self.http_client`` to fetch
URLs from this server.
Example, assuming the "Hello, world" example from the user guide is in
``hello.py``::
import hello
class TestHelloApp(AsyncHTTPTestCase):
def get_app(self):
return hello.make_app()
def test_homepage(self):
response = self.fetch('/')
self.assertEqual(response.code, 200)
self.assertEqual(response.body, 'Hello, world')
That call to ``self.fetch()`` is equivalent to ::
self.http_client.fetch(self.get_url('/'), self.stop)
response = self.wait()
which illustrates how AsyncTestCase can turn an asynchronous operation,
like ``http_client.fetch()``, into a synchronous operation. If you need
to do other asynchronous operations in tests, you'll probably need to use
``stop()`` and ``wait()`` yourself.
"""
def setUp(self):
super(AsyncHTTPTestCase, self).setUp()
sock, port = bind_unused_port()
self.__port = port
self.http_client = self.get_http_client()
self._app = self.get_app()
self.http_server = self.get_http_server()
self.http_server.add_sockets([sock])
def get_http_client(self):
return AsyncHTTPClient()
def get_http_server(self):
return HTTPServer(self._app, **self.get_httpserver_options())
def get_app(self):
"""Should be overridden by subclasses to return a
`tornado.web.Application` or other `.HTTPServer` callback.
"""
raise NotImplementedError()
def fetch(self, path, raise_error=False, **kwargs):
"""Convenience method to synchronously fetch a URL.
The given path will be appended to the local server's host and
port. Any additional kwargs will be passed directly to
`.AsyncHTTPClient.fetch` (and so could be used to pass
``method="POST"``, ``body="..."``, etc).
If the path begins with http:// or https://, it will be treated as a
full URL and will be fetched as-is.
If ``raise_error`` is True, a `tornado.httpclient.HTTPError` will
be raised if the response code is not 200. This is the same behavior
as the ``raise_error`` argument to `.AsyncHTTPClient.fetch`, but
the default is False here (it's True in `.AsyncHTTPClient`) because
tests often need to deal with non-200 response codes.
.. versionchanged:: 5.0
Added support for absolute URLs.
.. versionchanged:: 5.1
Added the ``raise_error`` argument.
.. deprecated:: 5.1
This method currently turns any exception into an
`.HTTPResponse` with status code 599. In Tornado 6.0,
errors other than `tornado.httpclient.HTTPError` will be
passed through, and ``raise_error=False`` will only
suppress errors that would be raised due to non-200
response codes.
"""
if path.lower().startswith(('http://', 'https://')):
url = path
else:
url = self.get_url(path)
return self.io_loop.run_sync(
lambda: self.http_client.fetch(url, raise_error=raise_error, **kwargs),
timeout=get_async_test_timeout())
def get_httpserver_options(self):
"""May be overridden by subclasses to return additional
keyword arguments for the server.
"""
return {}
def get_http_port(self):
"""Returns the port used by the server.
A new port is chosen for each test.
"""
return self.__port
def get_protocol(self):
return 'http'
def get_url(self, path):
"""Returns an absolute url for the given path on the test server."""
return '%s://127.0.0.1:%s%s' % (self.get_protocol(),
self.get_http_port(), path)
def tearDown(self):
self.http_server.stop()
self.io_loop.run_sync(self.http_server.close_all_connections,
timeout=get_async_test_timeout())
self.http_client.close()
super(AsyncHTTPTestCase, self).tearDown()
class AsyncHTTPSTestCase(AsyncHTTPTestCase):
"""A test case that starts an HTTPS server.
Interface is generally the same as `AsyncHTTPTestCase`.
"""
def get_http_client(self):
return AsyncHTTPClient(force_instance=True,
defaults=dict(validate_cert=False))
def get_httpserver_options(self):
return dict(ssl_options=self.get_ssl_options())
def get_ssl_options(self):
"""May be overridden by subclasses to select SSL options.
By default includes a self-signed testing certificate.
"""
# Testing keys were generated with:
# openssl req -new -keyout tornado/test/test.key \
# -out tornado/test/test.crt -nodes -days 3650 -x509
module_dir = os.path.dirname(__file__)
return dict(
certfile=os.path.join(module_dir, 'test', 'test.crt'),
keyfile=os.path.join(module_dir, 'test', 'test.key'))
def get_protocol(self):
return 'https'
def gen_test(func=None, timeout=None):
"""Testing equivalent of ``@gen.coroutine``, to be applied to test methods.
``@gen.coroutine`` cannot be used on tests because the `.IOLoop` is not
already running. ``@gen_test`` should be applied to test methods
on subclasses of `AsyncTestCase`.
Example::
class MyTest(AsyncHTTPTestCase):
@gen_test
def test_something(self):
response = yield self.http_client.fetch(self.get_url('/'))
By default, ``@gen_test`` times out after 5 seconds. The timeout may be
overridden globally with the ``ASYNC_TEST_TIMEOUT`` environment variable,
or for each test with the ``timeout`` keyword argument::
class MyTest(AsyncHTTPTestCase):
@gen_test(timeout=10)
def test_something_slow(self):
response = yield self.http_client.fetch(self.get_url('/'))
Note that ``@gen_test`` is incompatible with `AsyncTestCase.stop`,
`AsyncTestCase.wait`, and `AsyncHTTPTestCase.fetch`. Use ``yield
self.http_client.fetch(self.get_url())`` as shown above instead.
.. versionadded:: 3.1
The ``timeout`` argument and ``ASYNC_TEST_TIMEOUT`` environment
variable.
.. versionchanged:: 4.0
The wrapper now passes along ``*args, **kwargs`` so it can be used
on functions with arguments.
"""
if timeout is None:
timeout = get_async_test_timeout()
def wrap(f):
# Stack up several decorators to allow us to access the generator
# object itself. In the innermost wrapper, we capture the generator
# and save it in an attribute of self. Next, we run the wrapped
# function through @gen.coroutine. Finally, the coroutine is
# wrapped again to make it synchronous with run_sync.
#
# This is a good case study arguing for either some sort of
# extensibility in the gen decorators or cancellation support.
@functools.wraps(f)
def pre_coroutine(self, *args, **kwargs):
result = f(self, *args, **kwargs)
if isinstance(result, GeneratorType) or iscoroutine(result):
self._test_generator = result
else:
self._test_generator = None
return result
if iscoroutinefunction(f):
coro = pre_coroutine
else:
coro = gen.coroutine(pre_coroutine)
@functools.wraps(coro)
def post_coroutine(self, *args, **kwargs):
try:
return self.io_loop.run_sync(
functools.partial(coro, self, *args, **kwargs),
timeout=timeout)
except TimeoutError as e:
# run_sync raises an error with an unhelpful traceback.
# If the underlying generator is still running, we can throw the
# exception back into it so the stack trace is replaced by the
# point where the test is stopped. The only reason the generator
# would not be running would be if it were cancelled, which means
# a native coroutine, so we can rely on the cr_running attribute.
if getattr(self._test_generator, 'cr_running', True):
self._test_generator.throw(e)
# In case the test contains an overly broad except
# clause, we may get back here.
# Coroutine was stopped or didn't raise a useful stack trace,
# so re-raise the original exception which is better than nothing.
raise
return post_coroutine
if func is not None:
# Used like:
# @gen_test
# def f(self):
# pass
return wrap(func)
else:
# Used like @gen_test(timeout=10)
return wrap
# Without this attribute, nosetests will try to run gen_test as a test
# anywhere it is imported.
gen_test.__test__ = False # type: ignore
class ExpectLog(logging.Filter):
"""Context manager to capture and suppress expected log output.
Useful to make tests of error conditions less noisy, while still
leaving unexpected log entries visible. *Not thread safe.*
The attribute ``logged_stack`` is set to true if any exception
stack trace was logged.
Usage::
with ExpectLog('tornado.application', "Uncaught exception"):
error_response = self.fetch("/some_page")
.. versionchanged:: 4.3
Added the ``logged_stack`` attribute.
"""
def __init__(self, logger, regex, required=True):
"""Constructs an ExpectLog context manager.
:param logger: Logger object (or name of logger) to watch. Pass
an empty string to watch the root logger.
:param regex: Regular expression to match. Any log entries on
the specified logger that match this regex will be suppressed.
:param required: If true, an exception will be raised if the end of
the ``with`` statement is reached without matching any log entries.
"""
if isinstance(logger, basestring_type):
logger = logging.getLogger(logger)
self.logger = logger
self.regex = re.compile(regex)
self.required = required
self.matched = False
self.logged_stack = False
def filter(self, record):
if record.exc_info:
self.logged_stack = True
message = record.getMessage()
if self.regex.match(message):
self.matched = True
return False
return True
def __enter__(self):
self.logger.addFilter(self)
return self
def __exit__(self, typ, value, tb):
self.logger.removeFilter(self)
if not typ and self.required and not self.matched:
raise Exception("did not get expected log message")
def main(**kwargs):
"""A simple test runner.
This test runner is essentially equivalent to `unittest.main` from
the standard library, but adds support for tornado-style option
parsing and log formatting. It is *not* necessary to use this
`main` function to run tests using `AsyncTestCase`; these tests
are self-contained and can run with any test runner.
The easiest way to run a test is via the command line::
python -m tornado.testing tornado.test.stack_context_test
See the standard library unittest module for ways in which tests can
be specified.
Projects with many tests may wish to define a test script like
``tornado/test/runtests.py``. This script should define a method
``all()`` which returns a test suite and then call
`tornado.testing.main()`. Note that even when a test script is
used, the ``all()`` test suite may be overridden by naming a
single test on the command line::
# Runs all tests
python -m tornado.test.runtests
# Runs one test
python -m tornado.test.runtests tornado.test.stack_context_test
Additional keyword arguments passed through to ``unittest.main()``.
For example, use ``tornado.testing.main(verbosity=2)``
to show many test details as they are run.
See http://docs.python.org/library/unittest.html#unittest.main
for full argument list.
.. versionchanged:: 5.0
This function produces no output of its own; only that produced
by the `unittest` module (Previously it would add a PASS or FAIL
log message).
"""
from tornado.options import define, options, parse_command_line
define('exception_on_interrupt', type=bool, default=True,
help=("If true (default), ctrl-c raises a KeyboardInterrupt "
"exception. This prints a stack trace but cannot interrupt "
"certain operations. If false, the process is more reliably "
"killed, but does not print a stack trace."))
# support the same options as unittest's command-line interface
define('verbose', type=bool)
define('quiet', type=bool)
define('failfast', type=bool)
define('catch', type=bool)
define('buffer', type=bool)
argv = [sys.argv[0]] + parse_command_line(sys.argv)
if not options.exception_on_interrupt:
signal.signal(signal.SIGINT, signal.SIG_DFL)
if options.verbose is not None:
kwargs['verbosity'] = 2
if options.quiet is not None:
kwargs['verbosity'] = 0
if options.failfast is not None:
kwargs['failfast'] = True
if options.catch is not None:
kwargs['catchbreak'] = True
if options.buffer is not None:
kwargs['buffer'] = True
if __name__ == '__main__' and len(argv) == 1:
print("No tests specified", file=sys.stderr)
sys.exit(1)
# In order to be able to run tests by their fully-qualified name
# on the command line without importing all tests here,
# module must be set to None. Python 3.2's unittest.main ignores
# defaultTest if no module is given (it tries to do its own
# test discovery, which is incompatible with auto2to3), so don't
# set module if we're not asking for a specific test.
if len(argv) > 1:
unittest.main(module=None, argv=argv, **kwargs)
else:
unittest.main(defaultTest="all", argv=argv, **kwargs)
if __name__ == '__main__':
main()
|
the-stack_106_30460 | from flask import Flask
from flask_wtf.csrf import CSRFProtect
import os
app = Flask(__name__)
csrf = CSRFProtect(app)
@app.route("/")
def pagina_inicial():
return "Hello World - Nica - Entrega Fase 5"
if __name__ == '__main__':
port = os.getenv('PORT')
app.run('0.0.0.0', port=port) |
the-stack_106_30462 | import os
from functools import wraps
from contextlib import contextmanager
@contextmanager
def temp_chdir(directory):
'''Temporarily change to another directory, then change back.
Does nothing if the directory is empty.'''
cwd = os.getcwd()
if directory and directory != cwd:
os.chdir(directory)
try:
yield
finally:
if directory and directory != cwd:
os.chdir(cwd)
def get_tag_index(cells, tag, end=False, strict=False):
'''Get the index of the first (or last) occurrence of a tag.'''
if isinstance(tag, str):
tag = (tag,)
try:
return (next(i for i, cell in enumerate(cells)
if all(t in cell['tags'] for t in tag))
if not end else
-next(i for i, cell in enumerate(cells[::-1])
if all(t in cell['tags'] for t in tag))
) or None
except StopIteration:
assert not strict, 'Tag "{}" found'.format(tag)
return None
def filter_blacklist(cells, blacklist=None, default_blacklist=None, include=None):
'''Filter out cells in both the class blacklist and the passed blacklist.
Arguments:
cells (list): list of cells to filter.
blacklist (str|tuple|False, optional) the tags to filter out.
If tuple, it will filter out each tag in the tuple as well as the
classwide blacklist.
If str, it is the same as passing `(str,)`
If False, it will disable blacklist filtering.
If None (default), it will only use the class blacklist.
default_blacklist (tuple|None): the classwide/default blacklist to be merged.
include (tuple|None): items to remove from the blacklist.
'''
if blacklist is False: # disable blacklist
blacklist = set()
else:
if isinstance(blacklist, str):
blacklist = {blacklist}
elif blacklist is None:
blacklist = set()
else:
blacklist = set(blacklist)
if default_blacklist:
blacklist |= default_blacklist # merge blacklist with defaults
if include:
blacklist -= set(include)
return [
cell for cell in cells
if not any(tag in blacklist for tag in cell['tags'])
]
def refresh_prior(func):
@wraps(func)
def inner(self, *a, **kw):
if self.autorefresh:
self.refresh(on_changed=True)
return func(self, *a, **kw)
return inner
# class ObjectView:
# '''Wraps around another object and overrides properties that have been set on the view.'''
# def __init__(self, source, **kw):
# self.source
# for k, v in kw.items():
# setattr(self, k, v)
#
# def __getattr__(self, name):
# return getattr(self.source, name)
#
|
the-stack_106_30463 | import importlib.util
import itertools
import os
import re
from collections import defaultdict
import pytest
try:
from mypy import api
except ImportError:
NO_MYPY = True
else:
NO_MYPY = False
TESTS_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"typing",
)
PASS_DIR = os.path.join(TESTS_DIR, "pass")
FAIL_DIR = os.path.join(TESTS_DIR, "fail")
REVEAL_DIR = os.path.join(TESTS_DIR, "reveal")
MYPY_INI = os.path.join(TESTS_DIR, "mypy.ini")
CACHE_DIR = os.path.join(TESTS_DIR, ".mypy_cache")
def get_test_cases(directory):
for root, _, files in os.walk(directory):
for fname in files:
if os.path.splitext(fname)[-1] == ".py":
fullpath = os.path.join(root, fname)
# Use relative path for nice py.test name
relpath = os.path.relpath(fullpath, start=directory)
yield pytest.param(
fullpath,
# Manually specify a name for the test
id=relpath,
)
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.mark.parametrize("path", get_test_cases(PASS_DIR))
def test_success(path):
stdout, stderr, exitcode = api.run([
"--config-file",
MYPY_INI,
"--cache-dir",
CACHE_DIR,
path,
])
assert exitcode == 0, stdout
assert re.match(r"Success: no issues found in \d+ source files?", stdout.strip())
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.mark.parametrize("path", get_test_cases(FAIL_DIR))
def test_fail(path):
stdout, stderr, exitcode = api.run([
"--config-file",
MYPY_INI,
"--cache-dir",
CACHE_DIR,
path,
])
assert exitcode != 0
with open(path) as fin:
lines = fin.readlines()
errors = defaultdict(lambda: "")
error_lines = stdout.rstrip("\n").split("\n")
assert re.match(
r"Found \d+ errors? in \d+ files? \(checked \d+ source files?\)",
error_lines[-1].strip(),
)
for error_line in error_lines[:-1]:
error_line = error_line.strip()
if not error_line:
continue
match = re.match(
r"^.+\.py:(?P<lineno>\d+): (error|note): .+$",
error_line,
)
if match is None:
raise ValueError(f"Unexpected error line format: {error_line}")
lineno = int(match.group('lineno'))
errors[lineno] += error_line
for i, line in enumerate(lines):
lineno = i + 1
if " E:" not in line and lineno not in errors:
continue
target_line = lines[lineno - 1]
if "# E:" in target_line:
marker = target_line.split("# E:")[-1].strip()
assert lineno in errors, f'Extra error "{marker}"'
assert marker in errors[lineno]
else:
pytest.fail(f"Error {repr(errors[lineno])} not found")
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.mark.parametrize("path", get_test_cases(REVEAL_DIR))
def test_reveal(path):
stdout, stderr, exitcode = api.run([
"--config-file",
MYPY_INI,
"--cache-dir",
CACHE_DIR,
path,
])
with open(path) as fin:
lines = fin.readlines()
for error_line in stdout.split("\n"):
error_line = error_line.strip()
if not error_line:
continue
match = re.match(
r"^.+\.py:(?P<lineno>\d+): note: .+$",
error_line,
)
if match is None:
raise ValueError(f"Unexpected reveal line format: {error_line}")
lineno = int(match.group('lineno'))
assert "Revealed type is" in error_line
marker = lines[lineno - 1].split("# E:")[-1].strip()
assert marker in error_line
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.mark.parametrize("path", get_test_cases(PASS_DIR))
def test_code_runs(path):
path_without_extension, _ = os.path.splitext(path)
dirname, filename = path.split(os.sep)[-2:]
spec = importlib.util.spec_from_file_location(f"{dirname}.{filename}", path)
test_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(test_module)
|
the-stack_106_30466 | import json
from os import path
from time import sleep
from chromedriver_py import binary_path # this will get you the path variable
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.ui import WebDriverWait
from utils import selenium_utils
from utils.logger import log
from utils.selenium_utils import options, chrome_options
LOGIN_URL = "https://secure.evga.com/us/login.asp"
CONFIG_PATH = "evga_config.json"
class Evga:
def __init__(self, debug=False):
self.driver = webdriver.Chrome(
executable_path=binary_path, options=options, chrome_options=chrome_options
)
self.credit_card = {}
try:
if path.exists(CONFIG_PATH):
with open(CONFIG_PATH) as json_file:
config = json.load(json_file)
username = config["username"]
password = config["password"]
self.credit_card["name"] = config["credit_card"]["name"]
self.credit_card["number"] = config["credit_card"]["number"]
self.credit_card["cvv"] = config["credit_card"]["cvv"]
self.credit_card["expiration_month"] = config["credit_card"][
"expiration_month"
]
self.credit_card["expiration_year"] = config["credit_card"][
"expiration_year"
]
except Exception as e:
log.error(f"This is most likely an error with your {CONFIG_PATH} file.")
raise e
self.login(username, password)
def login(self, username, password):
"""
We're just going to enter the user info and let the user handle the captcha
:param username:
:param password:
:return:
"""
self.driver.execute_cdp_cmd(
"Network.setUserAgentOverride",
{
"userAgent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.53 Safari/537.36"
},
)
self.driver.execute_script(
"Object.defineProperty(navigator, 'webdriver', {get: () => undefined})"
)
self.driver.get(LOGIN_URL)
selenium_utils.wait_for_page(self.driver, "EVGA - Intelligent Innovation")
selenium_utils.field_send_keys(self.driver, "evga_login", username)
selenium_utils.field_send_keys(self.driver, "password", password)
log.info("Go do the captcha and log in")
selenium_utils.wait_for_page(
self.driver, "EVGA - Intelligent Innovation - Official Website", 300
)
log.info("Logged in!")
def buy(self, delay=5, test=False):
if test:
self.driver.get("https://www.evga.com/products/product.aspx?pn=10G-P5-3897-KR&associatecode=2QME1VF65K9ZY8B")
selenium_utils.wait_for_page(self.driver, "EVGA - Products - EVGA GeForce RTX 3080 FTW3 ULTRA GAMING, 10G-P5-3897-KR, 10GB GDDR6X, iCX3 Technology, ARGB LED, Metal Backplate - 10G-P5-3897-KR")
else:
self.driver.get(
"https://www.evga.com/products/product.aspx?pn=10G-P5-3897-KR&associatecode=2QME1VF65K9ZY8B"
)
selenium_utils.wait_for_page(
self.driver,
"EVGA - Products - EVGA GeForce RTX 3080 FTW3 ULTRA GAMING, 10G-P5-3897-KR, 10GB GDDR6X, iCX3 Technology, ARGB LED, Metal Backplate - 10G-P5-3897-KR",
)
#Check for stock
log.info("On GPU Page")
atc_buttons = self.driver.find_elements_by_xpath(
'//input[@class="btnBigAddCart"]'
)
while not atc_buttons:
log.info("Refreshing GPU and checking for add to cart button")
# self.driver.refresh()
self.driver.get("https://www.evga.com/products/product.aspx?pn=10G-P5-3897-KR&associatecode=2QME1VF65K9ZY8B")
atc_buttons = self.driver.find_elements_by_xpath(
'//input[@class="btnBigAddCart"]'
)
sleep(3)
# # Add to cart
# atc_buttons[0].click()
selenium_utils.button_click_using_xpath(
self.driver, '//*[@id="LFrame_btnAddToCart"]'
)
# Go to checkout
selenium_utils.wait_for_page(self.driver, "EVGA - Checkout")
selenium_utils.button_click_using_xpath(
self.driver, '//*[@id="LFrame_CheckoutButton"]'
)
# Shipping Address screen
selenium_utils.wait_for_page(self.driver, "Shopping")
log.info("Skip that page.")
self.driver.get("https://secure.evga.com/Cart/Checkout_Payment.aspx")
selenium_utils.wait_for_page(self.driver, "EVGA - Checkout - Billing Options")
log.info("Ensure that we are paying with credit card")
WebDriverWait(self.driver, 10).until(
EC.element_to_be_clickable((By.XPATH, './/input[@value="rdoCreditCard"]'))
).click()
WebDriverWait(self.driver, 10).until(
EC.element_to_be_clickable(
(By.XPATH, '//*[@id="ctl00_LFrame_btncontinue"]')
)
).click()
selenium_utils.wait_for_element(self.driver, "ctl00_LFrame_txtNameOnCard")
log.info("Populate credit card fields")
selenium_utils.field_send_keys(
self.driver, "ctl00$LFrame$txtNameOnCard", self.credit_card["name"]
)
selenium_utils.field_send_keys(
self.driver, "ctl00$LFrame$txtCardNumber", self.credit_card["number"]
)
selenium_utils.field_send_keys(
self.driver, "ctl00$LFrame$txtCvv", self.credit_card["cvv"]
)
Select(self.driver.find_element_by_id("ctl00_LFrame_ddlMonth")).select_by_value(
self.credit_card["expiration_month"]
)
Select(self.driver.find_element_by_id("ctl00_LFrame_ddlYear")).select_by_value(
self.credit_card["expiration_year"]
)
WebDriverWait(self.driver, 10).until(
EC.element_to_be_clickable(
(
By.XPATH,
"/html/body/form/div[3]/div[3]/div/div[1]/div[5]/div[3]/div/div[1]/div/div[@id='checkoutButtons']/input[2]",
)
)
).click()
log.info("Finalize Order Page")
selenium_utils.wait_for_page(self.driver, "EVGA - Checkout - Finalize Order")
WebDriverWait(self.driver, 10).until(
EC.element_to_be_clickable((By.ID, "ctl00_LFrame_cbAgree"))
).click()
selenium_utils.wait_for_element(self.driver, "ctl00_LFrame_btncontinue")
if not test:
WebDriverWait(self.driver, 10).until(
EC.element_to_be_clickable((By.ID, "ctl00_LFrame_btncontinue"))
).click()
selenium_utils.wait_for_page(self.driver, "EVGA - Checkout - Order Successful")
log.info("Finalized Order!")
|
the-stack_106_30469 | from pycolocstats.core.constants import CWL_OUTPUT_FOLDER_NAME
def toHtml(outputFolder=None, stdoutFile=None, stderrFile=None):
from os.path import sep
from os import linesep
rootFolderName = outputFolder.split(sep)[-1] if outputFolder else ""
targetHtml = sep.join((CWL_OUTPUT_FOLDER_NAME, rootFolderName, "lolaResults", "index.html"))
html = "<!DOCTYPE html>" + linesep
html += "<html>" + linesep
html += "<head>" + linesep
html += "<meta http-equiv=\"refresh\" content=\"0; url={}\" />".format(targetHtml)
html += "</head>" + linesep
html += "<body>" + linesep
html += "<p><a href=\"{}\">Redirect to full results page</a></p>".format(targetHtml)
html += "</body>" + linesep
html += "</html>"
return html |
the-stack_106_30476 | import nltk
from nltk.tokenize.regexp import RegexpTokenizer
from nltk.tokenize import word_tokenize
import spacy
class NltkTokenizer:
def __init__(self):
# pattern taken from https://stackoverflow.com/questions/35118596/python-regular-expression-not-working-properly
self.pattern = r"""(?x) # set flag to allow verbose regexps
(?:[A-Z]\.)+ # abbreviations, e.g. U.S.A.
|\d+(?:\.\d+)?%? # numbers, incl. currency and percentages
|\w+(?:[-']\w+)* # words w/ optional internal hyphens/apostrophe
|(?:[+/\-@&*]) # special characters with meanings
"""
def __simply_tokenize__(self, text):
# tokenizer = RegexpTokenizer(self.pattern)
# return tokenizer.tokenize(text)
return word_tokenize(text)
def tokenize(self, text):
"""
Returns a list of tuples containing every token and its associated position in the text
"""
tokens = self.__simply_tokenize__(text)
offset = 0
tok_pos = []
for token in tokens:
offset = text.find(token, offset)
tok_pos.append((token, offset))
offset += len(token)
return tok_pos
class SpacyTokenizer:
def __init__(self):
self.tokenizer = spacy.load("en_core_web_sm")
def tokenize(self, text):
return [(str(words),words.idx) for words in self.tokenizer(text)]
|
the-stack_106_30477 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import pandas as pd
from pyspark import pandas as ps
from pyspark.pandas.config import set_option, reset_option
from pyspark.testing.pandasutils import PandasOnSparkTestCase, TestUtils
class OpsOnDiffFramesGroupByRollingTest(PandasOnSparkTestCase, TestUtils):
@classmethod
def setUpClass(cls):
super().setUpClass()
set_option("compute.ops_on_diff_frames", True)
@classmethod
def tearDownClass(cls):
reset_option("compute.ops_on_diff_frames")
super().tearDownClass()
def _test_groupby_rolling_func(self, f):
pser = pd.Series([1, 2, 3], name="a")
pkey = pd.Series([1, 2, 3], name="a")
psser = ps.from_pandas(pser)
kkey = ps.from_pandas(pkey)
self.assert_eq(
getattr(psser.groupby(kkey).rolling(2), f)().sort_index(),
getattr(pser.groupby(pkey).rolling(2), f)().sort_index(),
)
pdf = pd.DataFrame({"a": [1, 2, 3, 2], "b": [4.0, 2.0, 3.0, 1.0]})
pkey = pd.Series([1, 2, 3, 2], name="a")
psdf = ps.from_pandas(pdf)
kkey = ps.from_pandas(pkey)
# The behavior of GroupBy.rolling is changed from pandas 1.3.
if LooseVersion(pd.__version__) >= LooseVersion("1.3"):
self.assert_eq(
getattr(psdf.groupby(kkey).rolling(2), f)().sort_index(),
getattr(pdf.groupby(pkey).rolling(2), f)().sort_index(),
)
else:
self.assert_eq(
getattr(psdf.groupby(kkey).rolling(2), f)().sort_index(),
getattr(pdf.groupby(pkey).rolling(2), f)().drop("a", axis=1).sort_index(),
)
self.assert_eq(
getattr(psdf.groupby(kkey)["b"].rolling(2), f)().sort_index(),
getattr(pdf.groupby(pkey)["b"].rolling(2), f)().sort_index(),
)
self.assert_eq(
getattr(psdf.groupby(kkey)[["b"]].rolling(2), f)().sort_index(),
getattr(pdf.groupby(pkey)[["b"]].rolling(2), f)().sort_index(),
)
def test_groupby_rolling_count(self):
self._test_groupby_rolling_func("count")
def test_groupby_rolling_min(self):
self._test_groupby_rolling_func("min")
def test_groupby_rolling_max(self):
self._test_groupby_rolling_func("max")
def test_groupby_rolling_mean(self):
self._test_groupby_rolling_func("mean")
def test_groupby_rolling_sum(self):
self._test_groupby_rolling_func("sum")
def test_groupby_rolling_std(self):
# TODO: `std` now raise error in pandas 1.0.0
self._test_groupby_rolling_func("std")
def test_groupby_rolling_var(self):
self._test_groupby_rolling_func("var")
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_ops_on_diff_frames_groupby_rolling import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
the-stack_106_30480 | import sys
import math
import cv2
import numpy as np
from PIL import Image
from util.richlog import get_logger
from . import imgops
from . import item
from . import minireco
from . import resources
from . import util
logger = get_logger(__name__)
class RecognizeSession:
def __init__(self):
self.recognized_groups = []
self.low_confidence = False
self.vh = 0
self.vw = 0
def tell_stars(starsimg):
thstars = (np.asarray(starsimg.convert('L')) > 96)
width, height = thstars.shape[::-1]
starwidth = width // 3
threshold = height * (width / 12)
stars = []
star1 = thstars[:, 0:starwidth]
stars.append(np.count_nonzero(star1) > threshold)
star2 = thstars[:, starwidth:starwidth * 2]
stars.append(np.count_nonzero(star2) > threshold)
star3 = thstars[:, starwidth * 2:]
stars.append(np.count_nonzero(star3) > threshold)
return tuple(stars)
recozh = minireco.MiniRecognizer(resources.load_pickle('minireco/NotoSansCJKsc-Medium.dat'))
reco_novecento_bold = minireco.MiniRecognizer(resources.load_pickle('minireco/Novecentosanswide_Bold.dat'))
def tell_group(groupimg, session, bartop, barbottom, ):
logger.logimage(groupimg)
grouptext = groupimg.crop((0, barbottom, groupimg.width, groupimg.height))
thim = imgops.enhance_contrast(grouptext.convert('L'), 60)
# thim = imgops.crop_blackedge(thim)
logger.logimage(thim)
groupname, diff = tell_group_name_alt(thim, session)
if diff > 0.8:
session.low_confidence = True
if groupname == '幸运掉落':
return (groupname, [('(家具)', 1)])
vw, vh = session.vw, session.vh
itemwidth = 20.370 * vh
itemcount = roundint(groupimg.width / itemwidth)
logger.logtext('group has %d items' % itemcount)
result = []
for i in range(itemcount):
itemimg = groupimg.crop((itemwidth * i, 0.000 * vh, itemwidth * (i+1), 18.981 * vh))
# x1, _, x2, _ = (0.093*vh, 0.000*vh, 19.074*vh, 18.981*vh)
itemimg = itemimg.crop((0.093 * vh, 0, 19.074 * vh, itemimg.height))
recognized_item = item.tell_item(itemimg, session)
if recognized_item.low_confidence:
session.low_confidence = True
result.append((recognized_item.name, recognized_item.quantity))
return (groupname, result)
def tell_group_name_alt(img, session):
names = [('龙门币', '声望&龙门币奖励'),
('常规', '常规掉落'),
('特殊', '特殊掉落'),
('幸运', '幸运掉落'),
('额外', '额外物资'),
('首次', '首次掉落'),
('返还', '理智返还')]
comparsions = []
scale = session.vh * 100 / 1080
for name, group in names:
if group in session.recognized_groups:
continue
template = resources.load_image_cached(f'end_operation/group/{name}.png', 'L')
scaled_height = template.height * scale
scaled_height_floor = math.floor(scaled_height)
scaled_template_floor = imgops.scale_to_height(template, scaled_height_floor)
if scaled_template_floor.width > img.width or scaled_template_floor.height > img.height:
raise ValueError('image smaller than template')
_, diff_floor = imgops.match_template(img, scaled_template_floor, cv2.TM_SQDIFF_NORMED)
if scaled_height_floor + 1 <= img.height:
scaled_template_ceil = imgops.scale_to_height(template, scaled_height_floor + 1)
_, diff_ceil = imgops.match_template(img, scaled_template_ceil, cv2.TM_SQDIFF_NORMED)
diff = min(diff_floor, diff_ceil)
else:
diff = diff_floor
comparsions.append((group, diff))
if comparsions:
comparsions.sort(key=lambda x: x[1])
logger.logtext(repr(comparsions))
return comparsions[0]
def find_jumping(ary, threshold):
ary = np.array(ary, dtype=np.int16)
diffs = np.diff(ary)
shit = [x for x in enumerate(diffs) if abs(x[1]) >= threshold]
if not shit:
return []
groups = [[shit[0]]]
for x in shit[1:]:
lastgroup = groups[-1]
if np.sign(x[1]) == np.sign(lastgroup[-1][1]):
lastgroup.append(x)
else:
groups.append([x])
logger.logtext(repr(groups))
pts = []
for group in groups:
pts.append(int(np.average(
tuple(x[0] for x in group), weights=tuple(abs(x[1]) for x in group))) + 1)
return pts
def roundint(x):
return int(round(x))
# scale = 0
def check_level_up_popup(img):
vw, vh = util.get_vwvh(img.size)
lvl_up_img = img.crop((50*vw-48.796*vh, 47.685*vh, 50*vw-23.148*vh, 56.019*vh)).convert('L') # 等级提升
lvl_up_img = imgops.enhance_contrast(lvl_up_img, 216, 255)
lvl_up_text = recozh.recognize(lvl_up_img)
return minireco.check_charseq(lvl_up_text, '提升')
def check_end_operation(style, friendship, img):
if style == 'main':
return check_end_operation_main_friendship(img) if friendship else check_end_operation_main(img)
elif style == 'interlocking':
if friendship:
return check_end_operation_interlocking_friendship(img)
else:
raise NotImplementedError()
def check_end_operation_main_friendship(img):
vw, vh = util.get_vwvh(img.size)
template = resources.load_image_cached('end_operation/friendship.png', 'RGB')
operation_end_img = img.crop((117.083*vh, 64.306*vh, 121.528*vh, 69.583*vh)).convert('RGB')
mse = imgops.compare_mse(*imgops.uniform_size(template, operation_end_img))
return mse < 3251
def check_end_operation_main(img):
vw, vh = util.get_vwvh(img.size)
template = resources.load_image_cached('end_operation/end.png', 'L')
operation_end_img = img.crop((4.722 * vh, 80.278 * vh, 56.389 * vh, 93.889 * vh)).convert('L')
operation_end_img = imgops.enhance_contrast(operation_end_img, 225, 255)
mse = imgops.compare_mse(*imgops.uniform_size(template, operation_end_img))
return mse < 6502
def check_end_operation_interlocking_friendship(img):
vw, vh = util.get_vwvh(img.size)
return imgops.compare_region_mse(img, (100*vw-34.907*vh, 55.185*vh, 100*vw-30.556*vh, 60.370*vh), 'end_operation/interlocking/friendship.png', logger=logger)
def check_end_operation2(img, threshold=0.8):
cv_screen = np.asarray(img.convert('L'))
h, w = cv_screen.shape[:2]
scale = h / 1080
if scale != 1:
cv_screen = cv2.resize(cv_screen, (int(w/scale), 1080))
template = np.asarray(resources.load_image_cached('end_operation/end2.png', 'L'))
res = cv2.matchTemplate(cv_screen, template, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
return max_val > threshold
def get_end2_rect(img):
vw, vh = util.get_vwvh(img.size)
return 38.594 * vw, 88.056 * vh, 61.484 * vw, 95.694 * vh
def get_dismiss_level_up_popup_rect(viewport):
vw, vh = util.get_vwvh(viewport)
return (100 * vw - 67.315 * vh, 16.019 * vh, 100 * vw - 5.185 * vh, 71.343 * vh)
get_dismiss_end_operation_rect = get_dismiss_level_up_popup_rect
def recognize(style, im):
if style == 'main':
return recognize_main(im)
elif style == 'interlocking':
return recognize_interlocking(im)
else:
raise ValueError(style)
def recognize_main(im):
import time
t0 = time.monotonic()
vw, vh = util.get_vwvh(im.size)
lower = im.crop((0, 61.111 * vh, 100 * vw, 100 * vh))
logger.logimage(lower)
operation_id = lower.crop((0, 4.444 * vh, 23.611 * vh, 11.388 * vh)).convert('L')
# logger.logimage(operation_id)
operation_id = imgops.enhance_contrast(imgops.crop_blackedge(operation_id), 80, 220)
logger.logimage(operation_id)
operation_id_str = reco_novecento_bold.recognize(operation_id).upper()
fixup, operation_id_str = minireco.fix_stage_name(operation_id_str)
if fixup:
logger.logtext('fixed to ' + operation_id_str)
# operation_name = lower.crop((0, 14.074*vh, 23.611*vh, 20*vh)).convert('L')
# operation_name = imgops.enhance_contrast(imgops.crop_blackedge(operation_name))
# logger.logimage(operation_name)
stars = lower.crop((23.611 * vh, 6.759 * vh, 53.241 * vh, 16.944 * vh))
logger.logimage(stars)
stars_status = tell_stars(stars)
# level = lower.crop((63.148 * vh, 4.444 * vh, 73.333 * vh, 8.611 * vh))
# logger.logimage(level)
# exp = lower.crop((76.852 * vh, 5.556 * vh, 94.074 * vh, 7.963 * vh))
# logger.logimage(exp)
recoresult = {
'operation': operation_id_str,
'stars': stars_status,
'items': [],
'low_confidence': False
}
items = lower.crop((68.241 * vh, 10.926 * vh, lower.width, 35.000 * vh))
logger.logimage(items)
x, y = 6.667 * vh, 18.519 * vh
linedet = items.crop((x, y, x + 1, items.height)).convert('L')
d = np.asarray(linedet)
linedet = find_jumping(d.reshape(linedet.height), 55)
if len(linedet) >= 2:
linetop, linebottom, *_ = linedet
else:
logger.logtext('horizontal line detection failed')
recoresult['low_confidence'] = True
return recoresult
linetop += y
linebottom += y
grouping = items.crop((0, linetop, items.width, linebottom))
grouping = grouping.resize((grouping.width, 1), Image.BILINEAR)
grouping = grouping.convert('L')
logger.logimage(grouping.resize((grouping.width, 16)))
d = np.array(grouping, dtype=np.int16)[0]
points = [0, *find_jumping(d, 55)]
if len(points) % 2 != 0:
raise RuntimeError('possibly incomplete item list')
finalgroups = list(zip(*[iter(points)] * 2)) # each_slice(2)
logger.logtext(repr(finalgroups))
imggroups = [items.crop((x1, 0, x2, items.height))
for x1, x2 in finalgroups]
items = []
session = RecognizeSession()
session.vw = vw
session.vh = vh
for group in imggroups:
groupresult = tell_group(group, session, linetop, linebottom)
session.recognized_groups.append(groupresult[0])
items.append(groupresult)
t1 = time.monotonic()
if session.low_confidence:
logger.logtext('LOW CONFIDENCE')
logger.logtext('time elapsed: %f' % (t1 - t0))
recoresult['items'] = items
recoresult['low_confidence'] = recoresult['low_confidence'] or session.low_confidence
return recoresult
def recognize_interlocking(im):
import time
t0 = time.monotonic()
from . import stage_ocr
vw, vh = util.get_vwvh(im.size)
operation_id = im.crop((100*vw-26.204*vh, 21.852*vh, 100*vw-9.907*vh, 26.204*vh)).convert('L')
thr = int(0.833*vh)
left, _, _, _ = imgops.cropbox_blackedge2(operation_id, x_threshold=0.833*vh)
operation_id = operation_id.crop((left-thr, 0, operation_id.width, operation_id.height))
logger.logimage(operation_id)
operation_id_str = stage_ocr.do_img_ocr(operation_id.convert('RGB')).upper()
logger.logtext(operation_id_str)
fixup, operation_id_str = minireco.fix_stage_name(operation_id_str)
if fixup:
logger.logtext('fixed to ' + operation_id_str)
stars = im.crop((100*vw-41.667*vh, 10.000*vh, 100*vw-11.204*vh, 20.185*vh))
logger.logimage(stars)
stars_status = tell_stars(stars)
recoresult = {
'operation': operation_id_str,
'stars': stars_status,
'items': [],
'low_confidence': False
}
items = im.crop((100*vw-87.778*vh, 65.000*vh, 100*vw, 89.259*vh))
logger.logimage(items)
sumx = np.asarray(items.convert('RGB')).sum(axis=2).sum(axis=1)
diffx = np.diff(sumx.astype(np.int32))
linetop = np.argmax(diffx)+1
linebottom = np.argmin(diffx)+1
logger.logtext('linetop=%r, linebottom=%r' % (linetop, linebottom))
grouping = items.crop((0, linetop, items.width, linebottom))
grouping = grouping.resize((grouping.width, 1), Image.BILINEAR)
grouping = grouping.convert('L')
logger.logimage(grouping.resize((grouping.width, 16)))
d = np.array(grouping, dtype=np.int16)[0]
points = [0, *find_jumping(d, 55)]
if len(points) % 2 != 0:
raise RuntimeError('possibly incomplete item list')
finalgroups = list(zip(*[iter(points)] * 2)) # each_slice(2)
logger.logtext(repr(finalgroups))
imggroups = [items.crop((x1, 0, x2, items.height))
for x1, x2 in finalgroups]
items = []
session = RecognizeSession()
session.vw = vw
session.vh = vh
for group in imggroups:
groupresult = tell_group(group, session, linetop, linebottom)
session.recognized_groups.append(groupresult[0])
items.append(groupresult)
t1 = time.monotonic()
if session.low_confidence:
logger.logtext('LOW CONFIDENCE')
logger.logtext('time elapsed: %f' % (t1 - t0))
recoresult['items'] = items
recoresult['low_confidence'] = recoresult['low_confidence'] or session.low_confidence
return recoresult
def get_still_check_rect(viewport):
vw, vh = util.get_vwvh(viewport)
return (68.241 * vh, 61.111 * vh, 100 * vw, 100 * vh)
if __name__ == '__main__':
print(globals()[sys.argv[-2]](Image.open(sys.argv[-1])))
|
the-stack_106_30481 | from __future__ import print_function
from optparse import OptionParser
import configuration as config
import graph.contract_graph as contract
import graph.algorithms as algorithms
import graph.graphfactory as graphfactory
import osm.read_osm
import osm.sanitize_input
import output.write_graph as output
import utils.timer as timer
@timer.timer
def convert_osm_to_roadgraph(filename, network_type, options):
configuration = config.Configuration(network_type)
r_index = filename.rfind(".")
out_file = filename[:r_index]
print("selected network type: {}".format(configuration.network_type))
print("accepted highway tags: {}".format(configuration.accepted_highways))
print("opening file: {}".format(filename))
nodes, ways = osm.read_osm.read_file(filename, configuration)
osm.sanitize_input.sanitize_input(ways, nodes)
graph = graphfactory.build_graph_from_osm(nodes, ways)
if not options.lcc:
graph = algorithms.computeLCCGraph(graph)
output.write_to_file(graph, out_file, configuration.get_file_extension())
if options.contract:
contracted_graph = contract.contract(graph)
output.write_to_file(contracted_graph, out_file, "{}c".format(configuration.get_file_extension()))
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-f", "--file", dest="filename", action="store", type="string")
parser.add_option("-n", "--networkType", dest="network_type", action="store", default="pedestrian", help="(p)edestrian, (b)icycle, (c)ar, [default: pedestrian]")
parser.add_option("-l", "--nolcc", dest="lcc", action="store_true", default=False)
parser.add_option("-c", "--contract", dest="contract", action="store_true")
(options, args) = parser.parse_args()
filename = options.filename
if filename is None:
parser.print_help()
exit()
long_network_type = {"p": "pedestrian", "c": "car", "b": "bicycle"}
if options.network_type in long_network_type.keys():
network_type = long_network_type[options.network_type]
elif options.network_type == long_network_type.values():
network_type = options.network_type
else:
print("network type improperly set")
exit()
convert_osm_to_roadgraph(filename, network_type, options)
|
the-stack_106_30482 | """
Q202
Happy Number
Easy
Hash table; Math.
Write an algorithm to determine if a number is "happy".
A happy number is a number defined by the following process:
Starting with any positive integer, replace the number by the
sum of the squares of its digits, and repeat the process until
the number equals 1 (where it will stay), or it loops endlessly
in a cycle which does not include 1. Those numbers for which
this process ends in 1 are happy numbers.
Example:
Input: 19
Output: true
Explanation:
12 + 92 = 82
82 + 22 = 68
62 + 82 = 100
12 + 02 + 02 = 1
"""
class Solution:
def isHappy(self, n: int) -> bool:
hist = {}
while 1:
if n == 1:
return True
elif n not in hist:
hist[n] = 0
n0 = 0
for item in str(n):
n0 += int(item)**2
n = n0
else:
return False
sol = Solution()
a = 12
print(sol.isHappy(a))
|
the-stack_106_30483 | # Copyright (C) 2020 FireEye, Inc. All Rights Reserved.
import os
# Emulation hook types
HOOK_CODE = 1000
HOOK_MEM_INVALID = 1001
HOOK_MEM_PERM_EXEC = 1002
HOOK_MEM_READ = 1003
HOOK_MEM_WRITE = 1004
HOOK_INTERRUPT = 1005
HOOK_MEM_ACCESS = 1006
HOOK_MEM_PERM_WRITE = 1007
HOOK_API = 1008
HOOK_DYN_CODE = 1009
HOOK_INSN = 1010
HOOK_MEM_MAP = 1011
HOOK_INSN_INVALID = 1012
# Emulation memory protection types
PERM_MEM_NONE = 0
PERM_MEM_EXEC = 0x10
PERM_MEM_READ = 0x02
PERM_MEM_WRITE = 0x04
PERM_MEM_RW = PERM_MEM_READ | PERM_MEM_WRITE
PERM_MEM_RWX = PERM_MEM_READ | PERM_MEM_WRITE | PERM_MEM_EXEC
# Emulation memory access types
INVALID_MEM_EXEC = 2000
INVALID_MEM_READ = 2001
INVALID_MEM_WRITE = 2002
INVAL_PERM_MEM_WRITE = 2003
INVAL_PERM_MEM_EXEC = 2004
INVAL_PERM_MEM_READ = 2005
def normalize_package_path(path):
"""
Get the supplied path in relation to the package root
"""
def _get_speakeasy_root():
return os.path.join(os.path.dirname(__file__))
root_var = "$ROOT$"
if root_var in path:
root = _get_speakeasy_root()
return path.replace(root_var, root)
return path
class Hook(object):
"""
Base class for all emulator hooks
"""
def __init__(self, se_obj, emu_eng, cb, ctx=[], native_hook=False):
"""
Arguments:
se_obj: speakeasy emulator object
emu_eng: emulation engine object
cb: Python callback function
ctx: Arbitrary context that be passed between hook callbacks
native_hook: When set to True, a new, raw callback will be registered with
with the underlying emulation engine that is called directly by the DLL.
Otherwise, this hook will be dispatched via a wrapper hook
(e.g. see _wrap_code_cb below)
"""
self.cb = cb
self.handle = 0
self.enabled = False
self.added = False
self.native_hook = native_hook
self.emu_eng = emu_eng
self.se_obj = se_obj
self.ctx = ctx
def enable(self):
self.enabled = True
self.emu_eng.hook_enable(self.handle)
def disable(self):
self.enabled = False
self.emu_eng.hook_disable(self.handle)
def _wrap_code_cb(self, emu, addr, size, ctx=[]):
try:
if self.enabled:
if self.se_obj.exit_event and self.se_obj.exit_event.is_set():
self.se_obj.stop()
return False
return self.cb(self.se_obj, addr, size, self.ctx)
return True
except KeyboardInterrupt:
self.se_obj.stop()
return False
def _wrap_intr_cb(self, emu, num, ctx=[]):
if self.enabled:
return self.cb(self.se_obj, num, self.ctx)
return True
def _wrap_in_insn_cb(self, emu, port, size, ctx=[]):
if self.enabled:
return self.cb(self.se_obj, port, size)
return True
def _wrap_syscall_insn_cb(self, emu, ctx=[]):
if self.enabled:
return self.cb(self.se_obj)
return True
def _wrap_memory_access_cb(self, emu, access, addr, size, value, ctx):
try:
if self.enabled:
if self.se_obj.exit_event and self.se_obj.exit_event.is_set():
self.se_obj.stop()
return False
return self.cb(self.se_obj, access, addr, size, value, ctx)
return True
except KeyboardInterrupt:
self.se_obj.stop()
return False
def _wrap_invalid_insn_cb(self, emu, ctx=[]):
if self.enabled:
return self.cb(self.se_obj, self.ctx)
return True
class ApiHook(Hook):
"""
This hook type is used when using a specific API (e.g. kernel32.CreateFile)
"""
def __init__(
self, se_obj, emu_eng, cb, module="", api_name="", argc=0, call_conv=None
):
super(ApiHook, self).__init__(se_obj, emu_eng, cb)
self.module = module
self.api_name = api_name
self.argc = argc
self.call_conv = call_conv
class DynCodeHook(Hook):
"""
This hook type is used to get a callback when dynamically created/copied code is executed
Currently, this will only fire once per dynamic code mapping. Could be useful for unpacking.
"""
def __init__(self, se_obj, emu_eng, cb, ctx=[]):
super(DynCodeHook, self).__init__(se_obj, emu_eng, cb)
class CodeHook(Hook):
"""
This hook callback will fire for every CPU instruction
"""
def __init__(self, se_obj, emu_eng, cb, begin=1, end=0, ctx=[], native_hook=True):
super(CodeHook, self).__init__(
se_obj, emu_eng, cb, ctx=ctx, native_hook=native_hook
)
self.begin = begin
self.end = end
def add(self):
if not self.added and self.native_hook:
self.handle = self.emu_eng.hook_add(
htype=HOOK_CODE, cb=self._wrap_code_cb, begin=self.begin, end=self.end
)
self.added = True
self.enabled = True
class ReadMemHook(Hook):
"""
This hook will fire each time a valid chunk of memory is read from
"""
def __init__(self, se_obj, emu_eng, cb, begin=1, end=0, native_hook=True):
super(ReadMemHook, self).__init__(se_obj, emu_eng, cb, native_hook=native_hook)
self.begin = begin
self.end = end
def add(self):
if not self.added and self.native_hook:
self.handle = self.emu_eng.hook_add(
htype=HOOK_MEM_READ,
cb=self._wrap_memory_access_cb,
begin=self.begin,
end=self.end,
)
self.added = True
self.enabled = True
class WriteMemHook(Hook):
"""
This hook will fire each time a valid chunk of memory is written to
"""
def __init__(self, se_obj, emu_eng, cb, begin=1, end=0, native_hook=True):
super(WriteMemHook, self).__init__(se_obj, emu_eng, cb, native_hook=native_hook)
self.begin = begin
self.end = end
def add(self):
if not self.added and self.native_hook:
self.handle = self.emu_eng.hook_add(
htype=HOOK_MEM_WRITE,
cb=self._wrap_memory_access_cb,
begin=self.begin,
end=self.end,
)
self.added = True
self.enabled = True
class MapMemHook(Hook):
"""
This hook will fire each time a chunk of memory is mapped
"""
def __init__(self, se_obj, emu_eng, cb, begin=1, end=0):
super(MapMemHook, self).__init__(se_obj, emu_eng, cb)
self.begin = begin
self.end = end
def add(self):
self.added = True
self.enabled = True
class InvalidMemHook(Hook):
"""
This hook will fire each time a invalid chunk of memory is accessed
"""
def __init__(self, se_obj, emu_eng, cb, native_hook=False):
super(InvalidMemHook, self).__init__(
se_obj, emu_eng, cb, native_hook=native_hook
)
def add(self):
if not self.added and self.native_hook:
self.handle = self.emu_eng.hook_add(
htype=HOOK_MEM_INVALID, cb=self._wrap_memory_access_cb
)
self.added = True
self.enabled = True
class InterruptHook(Hook):
"""
This hook will fire each time a a software interrupt is triggered
"""
def __init__(self, se_obj, emu_eng, cb, ctx=[], native_hook=True):
super(InterruptHook, self).__init__(
se_obj, emu_eng, cb, ctx=ctx, native_hook=native_hook
)
def add(self):
if not self.added and self.native_hook:
self.handle = self.emu_eng.hook_add(
htype=HOOK_INTERRUPT, cb=self._wrap_intr_cb
)
self.added = True
self.enabled = True
class InstructionHook(Hook):
"""
This hook will fire each time a instruction hook is triggered,
Only the instructions: IN, OUT, SYSCALL, and SYSENTER are supported by unicorn.
"""
def __init__(self, se_obj, emu_eng, cb, ctx=[], native_hook=True, insn=None):
super(InstructionHook, self).__init__(
se_obj, emu_eng, cb, ctx=ctx, native_hook=native_hook
)
self.insn = insn
def add(self):
if not self.added and self.native_hook:
self.handle = self.emu_eng.hook_add(
htype=HOOK_INSN, cb=self._wrap_syscall_insn_cb, arg1=self.insn
)
self.added = True
self.enabled = True
class InvalidInstructionHook(Hook):
"""
This hook will fire every time an invalid instruction is attempted
to be executed
"""
def __init__(self, se_obj, emu_eng, cb, ctx=[], native_hook=True):
super(InvalidInstructionHook, self).__init__(
se_obj, emu_eng, cb, ctx=ctx, native_hook=native_hook
)
def add(self):
if not self.added and self.native_hook:
self.handle = self.emu_eng.hook_add(
htype=HOOK_INSN_INVALID, cb=self._wrap_invalid_insn_cb
)
self.added = True
self.enabled = True
|
the-stack_106_30484 | import os
import arm.utils
import arm.assets as assets
def parse_context(c, sres, asset, defs, vert=None, frag=None):
con = {}
sres['contexts'].append(con)
con['name'] = c['name']
con['constants'] = []
con['texture_units'] = []
con['vertex_elements'] = []
# Names
con['vertex_shader'] = c['vertex_shader'].rsplit('.', 1)[0].split('/')[-1]
if con['vertex_shader'] not in asset:
asset.append(con['vertex_shader'])
con['fragment_shader'] = c['fragment_shader'].rsplit('.', 1)[0].split('/')[-1]
if con['fragment_shader'] not in asset:
asset.append(con['fragment_shader'])
if 'geometry_shader' in c:
con['geometry_shader'] = c['geometry_shader'].rsplit('.', 1)[0].split('/')[-1]
if con['geometry_shader'] not in asset:
asset.append(con['geometry_shader'])
if 'tesscontrol_shader' in c:
con['tesscontrol_shader'] = c['tesscontrol_shader'].rsplit('.', 1)[0].split('/')[-1]
if con['tesscontrol_shader'] not in asset:
asset.append(con['tesscontrol_shader'])
if 'tesseval_shader' in c:
con['tesseval_shader'] = c['tesseval_shader'].rsplit('.', 1)[0].split('/')[-1]
if con['tesseval_shader'] not in asset:
asset.append(con['tesseval_shader'])
# Params
params = ['depth_write', 'compare_mode', 'stencil_mode', \
'stencil_pass', 'stencil_fail', 'stencil_reference_value', \
'stencil_read_mask', 'stencil_write_mask', 'cull_mode', \
'blend_source', 'blend_destination', 'blend_operation', \
'alpha_blend_source', 'alpha_blend_destination', 'alpha_blend_operation' \
'color_write_red', 'color_write_green', 'color_write_blue', 'color_write_alpha', \
'color_writes_red', 'color_writes_green', 'color_writes_blue', 'color_writes_alpha', \
'conservative_raster']
for p in params:
if p in c:
con[p] = c[p]
# Parse shaders
if vert == None:
with open(c['vertex_shader']) as f:
vert = f.read().splitlines()
parse_shader(sres, c, con, defs, vert, True) # Parse attribs for vertex shader
if frag == None:
with open(c['fragment_shader']) as f:
frag = f.read().splitlines()
parse_shader(sres, c, con, defs, frag, False)
if 'geometry_shader' in c:
with open(c['geometry_shader']) as f:
geom = f.read().splitlines()
parse_shader(sres, c, con, defs, geom, False)
if 'tesscontrol_shader' in c:
with open(c['tesscontrol_shader']) as f:
tesc = f.read().splitlines()
parse_shader(sres, c, con, defs, tesc, False)
if 'tesseval_shader' in c:
with open(c['tesseval_shader']) as f:
tese = f.read().splitlines()
parse_shader(sres, c, con, defs, tese, False)
def parse_shader(sres, c, con, defs, lines, parse_attributes):
skip_till_endif = 0
skip_else = False
vertex_elements_parsed = False
vertex_elements_parsing = False
stack = []
if parse_attributes == False:
vertex_elements_parsed = True
for line in lines:
line = line.lstrip()
# Preprocessor
if line.startswith('#if'): # if, ifdef, ifndef
s = line.split(' ')[1]
found = s in defs
if line.startswith('#ifndef'):
found = not found
if found == False:
stack.append(0)
else:
stack.append(1)
continue
if line.startswith('#else'):
stack[-1] = 1 - stack[-1]
continue
if line.startswith('#endif'):
stack.pop()
continue
skip = False
for i in stack:
if i == 0:
skip = True
break
if skip:
continue
if vertex_elements_parsed == False and line.startswith('in '):
vertex_elements_parsing = True
vd = {}
s = line.split(' ')
vd['data'] = 'float' + s[1][-1:]
vd['name'] = s[2][:-1]
con['vertex_elements'].append(vd)
if vertex_elements_parsing == True and len(line) > 0 and line.startswith('//') == False and line.startswith('in ') == False:
vertex_elements_parsed = True
if line.startswith('uniform ') or line.startswith('//!uniform'): # Uniforms included from header files
s = line.split(' ')
# uniform sampler2D myname;
# uniform layout(RGBA8) image3D myname;
if s[1].startswith('layout'):
ctype = s[2]
cid = s[3]
if cid[-1] == ';':
cid = cid[:-1]
else:
ctype = s[1]
cid = s[2]
if cid[-1] == ';':
cid = cid[:-1]
found = False # Unique check
if ctype.startswith('sampler') or ctype.startswith('image') or ctype.startswith('uimage'): # Texture unit
for tu in con['texture_units']: # Texture already present
if tu['name'] == cid:
found = True
break
if found == False:
if cid[-1] == ']': # Array of samplers - sampler2D mySamplers[2]
# Add individual units - mySamplers[0], mySamplers[1]
for i in range(int(cid[-2])):
tu = {}
con['texture_units'].append(tu)
tu['name'] = cid[:-2] + str(i) + ']'
else:
tu = {}
con['texture_units'].append(tu)
tu['name'] = cid
if ctype.startswith('image') or ctype.startswith('uimage'):
tu['is_image'] = True
# Check for link
for l in c['links']:
if l['name'] == cid:
valid_link = True
if 'ifdef' in l:
def_found = False
for d in defs:
for link_def in l['ifdef']:
if d == link_def:
def_found = True
break
if def_found:
break
if not def_found:
valid_link = False
if 'ifndef' in l:
def_found = False
for d in defs:
for link_def in l['ifndef']:
if d == link_def:
def_found = True
break
if def_found:
break
if def_found:
valid_link = False
if valid_link:
tu['link'] = l['link']
break
else: # Constant
if cid.find('[') != -1: # Float arrays
cid = cid.split('[')[0]
ctype = 'floats'
for const in con['constants']:
if const['name'] == cid:
found = True
break
if found == False:
const = {}
con['constants'].append(const)
const['type'] = ctype
const['name'] = cid
# Check for link
for l in c['links']:
if l['name'] == cid:
valid_link = True
if 'ifdef' in l:
def_found = False
for d in defs:
for link_def in l['ifdef']:
if d == link_def:
def_found = True
break
if def_found:
break
if not def_found:
valid_link = False
if 'ifndef' in l:
def_found = False
for d in defs:
for link_def in l['ifndef']:
if d == link_def:
def_found = True
break
if def_found:
break
if def_found:
valid_link = False
if valid_link:
const['link'] = l['link']
break
def make(res, base_name, json_data, fp, defs, make_variants):
sres = {}
res['shader_datas'].append(sres)
sres['name'] = base_name
sres['contexts'] = []
asset = assets.shader_passes_assets[base_name]
vert = None
frag = None
has_variants = 'variants' in json_data and len(json_data['variants']) > 0
if make_variants and has_variants:
d = json_data['variants'][0]
if d in defs:
# Write shader variant with define
c = json_data['contexts'][0]
with open(c['vertex_shader']) as f:
vert = f.read().split('\n', 1)[1]
vert = "#version 450\n#define " + d + "\n" + vert
with open(c['fragment_shader']) as f:
frag = f.read().split('\n', 1)[1]
frag = "#version 450\n#define " + d + "\n" + frag
with open(arm.utils.get_fp_build() + '/compiled/Shaders/' + base_name + d + '.vert.glsl', 'w') as f:
f.write(vert)
with open(arm.utils.get_fp_build() + '/compiled/Shaders/' + base_name + d + '.frag.glsl', 'w') as f:
f.write(frag)
# Add context variant
c2 = c.copy()
c2['vertex_shader'] = base_name + d + '.vert.glsl'
c2['fragment_shader'] = base_name + d + '.frag.glsl'
c2['name'] = c['name'] + d
parse_context(c2, sres, asset, defs, vert.splitlines(), frag.splitlines())
for c in json_data['contexts']:
parse_context(c, sres, asset, defs)
|
the-stack_106_30485 | import json
import time
from typing import Callable, Optional, List, Any, Dict, Tuple
import aiohttp
from blspy import AugSchemeMPL, G2Element, PrivateKey
import chia.server.ws_connection as ws
from chia import __version__
from chia.consensus.network_type import NetworkType
from chia.consensus.pot_iterations import calculate_iterations_quality, calculate_sp_interval_iters
from chia.farmer.farmer import Farmer
from chia.protocols import farmer_protocol, harvester_protocol
from chia.protocols.harvester_protocol import (
PoolDifficulty,
PlotSyncStart,
PlotSyncPlotList,
PlotSyncPathList,
PlotSyncDone,
)
from chia.protocols.pool_protocol import (
get_current_authentication_token,
PoolErrorCode,
PostPartialRequest,
PostPartialPayload,
)
from chia.protocols.protocol_message_types import ProtocolMessageTypes
from chia.server.outbound_message import NodeType, make_msg
from chia.server.server import ssl_context_for_root
from chia.ssl.create_ssl import get_mozilla_ca_crt
from chia.types.blockchain_format.pool_target import PoolTarget
from chia.types.blockchain_format.proof_of_space import ProofOfSpace
from chia.util.api_decorators import api_request, peer_required
from chia.util.ints import uint32, uint64
def strip_old_entries(pairs: List[Tuple[float, Any]], before: float) -> List[Tuple[float, Any]]:
for index, [timestamp, points] in enumerate(pairs):
if timestamp >= before:
if index == 0:
return pairs
if index > 0:
return pairs[index:]
return []
class FarmerAPI:
farmer: Farmer
def __init__(self, farmer) -> None:
self.farmer = farmer
def _set_state_changed_callback(self, callback: Callable):
self.farmer.state_changed_callback = callback
@api_request
@peer_required
async def new_proof_of_space(
self, new_proof_of_space: harvester_protocol.NewProofOfSpace, peer: ws.WSChiaConnection
):
"""
This is a response from the harvester, for a NewChallenge. Here we check if the proof
of space is sufficiently good, and if so, we ask for the whole proof.
"""
if new_proof_of_space.sp_hash not in self.farmer.number_of_responses:
self.farmer.number_of_responses[new_proof_of_space.sp_hash] = 0
self.farmer.cache_add_time[new_proof_of_space.sp_hash] = uint64(int(time.time()))
max_pos_per_sp = 5
if self.farmer.constants.NETWORK_TYPE != NetworkType.MAINNET:
# This is meant to make testnets more stable, when difficulty is very low
if self.farmer.number_of_responses[new_proof_of_space.sp_hash] > max_pos_per_sp:
self.farmer.log.info(
f"Surpassed {max_pos_per_sp} PoSpace for one SP, no longer submitting PoSpace for signage point "
f"{new_proof_of_space.sp_hash}"
)
return None
if new_proof_of_space.sp_hash not in self.farmer.sps:
self.farmer.log.warning(
f"Received response for a signage point that we do not have {new_proof_of_space.sp_hash}"
)
return None
sps = self.farmer.sps[new_proof_of_space.sp_hash]
for sp in sps:
computed_quality_string = new_proof_of_space.proof.verify_and_get_quality_string(
self.farmer.constants,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
)
if computed_quality_string is None:
self.farmer.log.error(f"Invalid proof of space {new_proof_of_space.proof}")
return None
self.farmer.number_of_responses[new_proof_of_space.sp_hash] += 1
required_iters: uint64 = calculate_iterations_quality(
self.farmer.constants.DIFFICULTY_CONSTANT_FACTOR,
computed_quality_string,
new_proof_of_space.proof.size,
sp.difficulty,
new_proof_of_space.sp_hash,
)
# If the iters are good enough to make a block, proceed with the block making flow
if required_iters < calculate_sp_interval_iters(self.farmer.constants, sp.sub_slot_iters):
# Proceed at getting the signatures for this PoSpace
request = harvester_protocol.RequestSignatures(
new_proof_of_space.plot_identifier,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
[sp.challenge_chain_sp, sp.reward_chain_sp],
)
if new_proof_of_space.sp_hash not in self.farmer.proofs_of_space:
self.farmer.proofs_of_space[new_proof_of_space.sp_hash] = []
self.farmer.proofs_of_space[new_proof_of_space.sp_hash].append(
(
new_proof_of_space.plot_identifier,
new_proof_of_space.proof,
)
)
self.farmer.cache_add_time[new_proof_of_space.sp_hash] = uint64(int(time.time()))
self.farmer.quality_str_to_identifiers[computed_quality_string] = (
new_proof_of_space.plot_identifier,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
peer.peer_node_id,
)
self.farmer.cache_add_time[computed_quality_string] = uint64(int(time.time()))
await peer.send_message(make_msg(ProtocolMessageTypes.request_signatures, request))
p2_singleton_puzzle_hash = new_proof_of_space.proof.pool_contract_puzzle_hash
if p2_singleton_puzzle_hash is not None:
# Otherwise, send the proof of space to the pool
# When we win a block, we also send the partial to the pool
if p2_singleton_puzzle_hash not in self.farmer.pool_state:
self.farmer.log.info(f"Did not find pool info for {p2_singleton_puzzle_hash}")
return
pool_state_dict: Dict = self.farmer.pool_state[p2_singleton_puzzle_hash]
pool_url = pool_state_dict["pool_config"].pool_url
if pool_url == "":
return
if pool_state_dict["current_difficulty"] is None:
self.farmer.log.warning(
f"No pool specific difficulty has been set for {p2_singleton_puzzle_hash}, "
f"check communication with the pool, skipping this partial to {pool_url}."
)
return
required_iters = calculate_iterations_quality(
self.farmer.constants.DIFFICULTY_CONSTANT_FACTOR,
computed_quality_string,
new_proof_of_space.proof.size,
pool_state_dict["current_difficulty"],
new_proof_of_space.sp_hash,
)
if required_iters >= calculate_sp_interval_iters(
self.farmer.constants, self.farmer.constants.POOL_SUB_SLOT_ITERS
):
self.farmer.log.info(
f"Proof of space not good enough for pool {pool_url}: {pool_state_dict['current_difficulty']}"
)
return
authentication_token_timeout = pool_state_dict["authentication_token_timeout"]
if authentication_token_timeout is None:
self.farmer.log.warning(
f"No pool specific authentication_token_timeout has been set for {p2_singleton_puzzle_hash}"
f", check communication with the pool."
)
return
# Submit partial to pool
is_eos = new_proof_of_space.signage_point_index == 0
payload = PostPartialPayload(
pool_state_dict["pool_config"].launcher_id,
get_current_authentication_token(authentication_token_timeout),
new_proof_of_space.proof,
new_proof_of_space.sp_hash,
is_eos,
peer.peer_node_id,
)
# The plot key is 2/2 so we need the harvester's half of the signature
m_to_sign = payload.get_hash()
request = harvester_protocol.RequestSignatures(
new_proof_of_space.plot_identifier,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
[m_to_sign],
)
response: Any = await peer.request_signatures(request)
if not isinstance(response, harvester_protocol.RespondSignatures):
self.farmer.log.error(f"Invalid response from harvester: {response}")
return
assert len(response.message_signatures) == 1
plot_signature: Optional[G2Element] = None
for sk in self.farmer.get_private_keys():
pk = sk.get_g1()
if pk == response.farmer_pk:
agg_pk = ProofOfSpace.generate_plot_public_key(response.local_pk, pk, True)
assert agg_pk == new_proof_of_space.proof.plot_public_key
sig_farmer = AugSchemeMPL.sign(sk, m_to_sign, agg_pk)
taproot_sk: PrivateKey = ProofOfSpace.generate_taproot_sk(response.local_pk, pk)
taproot_sig: G2Element = AugSchemeMPL.sign(taproot_sk, m_to_sign, agg_pk)
plot_signature = AugSchemeMPL.aggregate(
[sig_farmer, response.message_signatures[0][1], taproot_sig]
)
assert AugSchemeMPL.verify(agg_pk, m_to_sign, plot_signature)
authentication_sk: Optional[PrivateKey] = self.farmer.get_authentication_sk(
pool_state_dict["pool_config"]
)
if authentication_sk is None:
self.farmer.log.error(f"No authentication sk for {p2_singleton_puzzle_hash}")
return
authentication_signature = AugSchemeMPL.sign(authentication_sk, m_to_sign)
assert plot_signature is not None
agg_sig: G2Element = AugSchemeMPL.aggregate([plot_signature, authentication_signature])
post_partial_request: PostPartialRequest = PostPartialRequest(payload, agg_sig)
self.farmer.log.info(
f"Submitting partial for {post_partial_request.payload.launcher_id.hex()} to {pool_url}"
)
pool_state_dict["points_found_since_start"] += pool_state_dict["current_difficulty"]
pool_state_dict["points_found_24h"].append((time.time(), pool_state_dict["current_difficulty"]))
self.farmer.log.debug(f"POST /partial request {post_partial_request}")
try:
if True:
from chia.farmer.og_pooling import pool_api_client
pool_response: Dict = await pool_api_client.post_partial(
f"{pool_url}/partial",
json=post_partial_request.to_json_dict(),
ssl=ssl_context_for_root(get_mozilla_ca_crt(), log=self.farmer.log),
headers={"User-Agent": f"Chia Blockchain v.{__version__}"},
)
if True:
if True:
self.farmer.log.info(f"Pool response: {pool_response}")
if "error_code" in pool_response:
self.farmer.log.error(
f"Error in pooling: "
f"{pool_response['error_code'], pool_response['error_message']}"
)
pool_state_dict["pool_errors_24h"].append(pool_response)
if pool_response["error_code"] == PoolErrorCode.PROOF_NOT_GOOD_ENOUGH.value:
self.farmer.log.error(
"Partial not good enough, forcing pool farmer update to "
"get our current difficulty."
)
pool_state_dict["next_farmer_update"] = 0
await self.farmer.update_pool_state()
else:
new_difficulty = pool_response["new_difficulty"]
pool_state_dict["points_acknowledged_since_start"] += new_difficulty
pool_state_dict["points_acknowledged_24h"].append((time.time(), new_difficulty))
pool_state_dict["current_difficulty"] = new_difficulty
except Exception as e:
self.farmer.log.error(f"Error connecting to pool: {e}")
return
return
pool_public_key = new_proof_of_space.proof.pool_public_key
if pool_public_key is not None:
await self.farmer.og_pooling.new_proof_of_space(
new_proof_of_space,
peer,
pool_public_key,
computed_quality_string
)
return
@api_request
async def respond_signatures(self, response: harvester_protocol.RespondSignatures):
"""
There are two cases: receiving signatures for sps, or receiving signatures for the block.
"""
if response.sp_hash not in self.farmer.sps:
self.farmer.log.warning(f"Do not have challenge hash {response.challenge_hash}")
return None
is_sp_signatures: bool = False
sps = self.farmer.sps[response.sp_hash]
signage_point_index = sps[0].signage_point_index
found_sp_hash_debug = False
for sp_candidate in sps:
if response.sp_hash == response.message_signatures[0][0]:
found_sp_hash_debug = True
if sp_candidate.reward_chain_sp == response.message_signatures[1][0]:
is_sp_signatures = True
if found_sp_hash_debug:
assert is_sp_signatures
pospace = None
for plot_identifier, candidate_pospace in self.farmer.proofs_of_space[response.sp_hash]:
if plot_identifier == response.plot_identifier:
pospace = candidate_pospace
assert pospace is not None
include_taproot: bool = pospace.pool_contract_puzzle_hash is not None
computed_quality_string = pospace.verify_and_get_quality_string(
self.farmer.constants, response.challenge_hash, response.sp_hash
)
if computed_quality_string is None:
self.farmer.log.warning(f"Have invalid PoSpace {pospace}")
return None
if is_sp_signatures:
(
challenge_chain_sp,
challenge_chain_sp_harv_sig,
) = response.message_signatures[0]
reward_chain_sp, reward_chain_sp_harv_sig = response.message_signatures[1]
for sk in self.farmer.get_private_keys():
pk = sk.get_g1()
if pk == response.farmer_pk:
agg_pk = ProofOfSpace.generate_plot_public_key(response.local_pk, pk, include_taproot)
assert agg_pk == pospace.plot_public_key
if include_taproot:
taproot_sk: PrivateKey = ProofOfSpace.generate_taproot_sk(response.local_pk, pk)
taproot_share_cc_sp: G2Element = AugSchemeMPL.sign(taproot_sk, challenge_chain_sp, agg_pk)
taproot_share_rc_sp: G2Element = AugSchemeMPL.sign(taproot_sk, reward_chain_sp, agg_pk)
else:
taproot_share_cc_sp = G2Element()
taproot_share_rc_sp = G2Element()
farmer_share_cc_sp = AugSchemeMPL.sign(sk, challenge_chain_sp, agg_pk)
agg_sig_cc_sp = AugSchemeMPL.aggregate(
[challenge_chain_sp_harv_sig, farmer_share_cc_sp, taproot_share_cc_sp]
)
assert AugSchemeMPL.verify(agg_pk, challenge_chain_sp, agg_sig_cc_sp)
# This means it passes the sp filter
farmer_share_rc_sp = AugSchemeMPL.sign(sk, reward_chain_sp, agg_pk)
agg_sig_rc_sp = AugSchemeMPL.aggregate(
[reward_chain_sp_harv_sig, farmer_share_rc_sp, taproot_share_rc_sp]
)
assert AugSchemeMPL.verify(agg_pk, reward_chain_sp, agg_sig_rc_sp)
if pospace.pool_public_key is not None:
assert pospace.pool_contract_puzzle_hash is None
pool_pk = bytes(pospace.pool_public_key)
if pool_pk not in self.farmer.pool_sks_map:
self.farmer.log.error(
f"Don't have the private key for the pool key used by harvester: {pool_pk.hex()}"
)
return None
pool_target_puzzle_hash: bytes = self.farmer.og_pooling.get_pool_target()
pool_target: Optional[PoolTarget] = PoolTarget(pool_target_puzzle_hash, uint32(0))
assert pool_target is not None
pool_target_signature: Optional[G2Element] = AugSchemeMPL.sign(
self.farmer.pool_sks_map[pool_pk], bytes(pool_target)
)
else:
assert pospace.pool_contract_puzzle_hash is not None
pool_target = None
pool_target_signature = None
request = farmer_protocol.DeclareProofOfSpace(
response.challenge_hash,
challenge_chain_sp,
signage_point_index,
reward_chain_sp,
pospace,
agg_sig_cc_sp,
agg_sig_rc_sp,
self.farmer.farmer_target,
pool_target,
pool_target_signature,
)
self.farmer.state_changed("proof", {"proof": request, "passed_filter": True})
msg = make_msg(ProtocolMessageTypes.declare_proof_of_space, request)
await self.farmer.server.send_to_all([msg], NodeType.FULL_NODE)
return None
else:
# This is a response with block signatures
for sk in self.farmer.get_private_keys():
(
foliage_block_data_hash,
foliage_sig_harvester,
) = response.message_signatures[0]
(
foliage_transaction_block_hash,
foliage_transaction_block_sig_harvester,
) = response.message_signatures[1]
pk = sk.get_g1()
if pk == response.farmer_pk:
agg_pk = ProofOfSpace.generate_plot_public_key(response.local_pk, pk, include_taproot)
assert agg_pk == pospace.plot_public_key
if include_taproot:
taproot_sk = ProofOfSpace.generate_taproot_sk(response.local_pk, pk)
foliage_sig_taproot: G2Element = AugSchemeMPL.sign(taproot_sk, foliage_block_data_hash, agg_pk)
foliage_transaction_block_sig_taproot: G2Element = AugSchemeMPL.sign(
taproot_sk, foliage_transaction_block_hash, agg_pk
)
else:
foliage_sig_taproot = G2Element()
foliage_transaction_block_sig_taproot = G2Element()
foliage_sig_farmer = AugSchemeMPL.sign(sk, foliage_block_data_hash, agg_pk)
foliage_transaction_block_sig_farmer = AugSchemeMPL.sign(sk, foliage_transaction_block_hash, agg_pk)
foliage_agg_sig = AugSchemeMPL.aggregate(
[foliage_sig_harvester, foliage_sig_farmer, foliage_sig_taproot]
)
foliage_block_agg_sig = AugSchemeMPL.aggregate(
[
foliage_transaction_block_sig_harvester,
foliage_transaction_block_sig_farmer,
foliage_transaction_block_sig_taproot,
]
)
assert AugSchemeMPL.verify(agg_pk, foliage_block_data_hash, foliage_agg_sig)
assert AugSchemeMPL.verify(agg_pk, foliage_transaction_block_hash, foliage_block_agg_sig)
request_to_nodes = farmer_protocol.SignedValues(
computed_quality_string,
foliage_agg_sig,
foliage_block_agg_sig,
)
msg = make_msg(ProtocolMessageTypes.signed_values, request_to_nodes)
await self.farmer.server.send_to_all([msg], NodeType.FULL_NODE)
"""
FARMER PROTOCOL (FARMER <-> FULL NODE)
"""
@api_request
async def new_signage_point(self, new_signage_point: farmer_protocol.NewSignagePoint):
try:
pool_difficulties: List[PoolDifficulty] = []
for p2_singleton_puzzle_hash, pool_dict in self.farmer.pool_state.items():
if pool_dict["pool_config"].pool_url == "":
# Self pooling
continue
if pool_dict["current_difficulty"] is None:
self.farmer.log.warning(
f"No pool specific difficulty has been set for {p2_singleton_puzzle_hash}, "
f"check communication with the pool, skipping this signage point, pool: "
f"{pool_dict['pool_config'].pool_url} "
)
continue
pool_difficulties.append(
PoolDifficulty(
pool_dict["current_difficulty"],
self.farmer.constants.POOL_SUB_SLOT_ITERS,
p2_singleton_puzzle_hash,
)
)
difficulty, sub_slot_iters = self.farmer.og_pooling.new_signage_point(new_signage_point)
message = harvester_protocol.NewSignagePointHarvester(
new_signage_point.challenge_hash,
difficulty,
sub_slot_iters,
new_signage_point.signage_point_index,
new_signage_point.challenge_chain_sp,
pool_difficulties,
)
msg = make_msg(ProtocolMessageTypes.new_signage_point_harvester, message)
await self.farmer.server.send_to_all([msg], NodeType.HARVESTER)
if new_signage_point.challenge_chain_sp not in self.farmer.sps:
self.farmer.sps[new_signage_point.challenge_chain_sp] = []
finally:
# Age out old 24h information for every signage point regardless
# of any failures. Note that this still lets old data remain if
# the client isn't receiving signage points.
cutoff_24h = time.time() - (24 * 60 * 60)
for p2_singleton_puzzle_hash, pool_dict in self.farmer.pool_state.items():
for key in ["points_found_24h", "points_acknowledged_24h"]:
if key not in pool_dict:
continue
pool_dict[key] = strip_old_entries(pairs=pool_dict[key], before=cutoff_24h)
if new_signage_point in self.farmer.sps[new_signage_point.challenge_chain_sp]:
self.farmer.log.debug(f"Duplicate signage point {new_signage_point.signage_point_index}")
return
self.farmer.sps[new_signage_point.challenge_chain_sp].append(new_signage_point)
self.farmer.cache_add_time[new_signage_point.challenge_chain_sp] = uint64(int(time.time()))
self.farmer.state_changed("new_signage_point", {"sp_hash": new_signage_point.challenge_chain_sp})
@api_request
async def request_signed_values(self, full_node_request: farmer_protocol.RequestSignedValues):
if full_node_request.quality_string not in self.farmer.quality_str_to_identifiers:
self.farmer.log.error(f"Do not have quality string {full_node_request.quality_string}")
return None
(plot_identifier, challenge_hash, sp_hash, node_id) = self.farmer.quality_str_to_identifiers[
full_node_request.quality_string
]
request = harvester_protocol.RequestSignatures(
plot_identifier,
challenge_hash,
sp_hash,
[full_node_request.foliage_block_data_hash, full_node_request.foliage_transaction_block_hash],
)
msg = make_msg(ProtocolMessageTypes.request_signatures, request)
await self.farmer.server.send_to_specific([msg], node_id)
@api_request
@peer_required
async def farming_info(self, request: farmer_protocol.FarmingInfo, peer: ws.WSChiaConnection):
self.farmer.state_changed(
"new_farming_info",
{
"farming_info": {
"challenge_hash": request.challenge_hash,
"signage_point": request.sp_hash,
"passed_filter": request.passed,
"proofs": request.proofs,
"total_plots": request.total_plots,
"timestamp": request.timestamp,
}
},
)
self.farmer.og_pooling.farming_info(request, peer)
@api_request
@peer_required
async def respond_plots(self, _: harvester_protocol.RespondPlots, peer: ws.WSChiaConnection):
self.farmer.log.warning(f"Respond plots came too late from: {peer.get_peer_logging()}")
@api_request
@peer_required
async def plot_sync_start(self, message: PlotSyncStart, peer: ws.WSChiaConnection):
await self.farmer.plot_sync_receivers[peer.peer_node_id].sync_started(message)
@api_request
@peer_required
async def plot_sync_loaded(self, message: PlotSyncPlotList, peer: ws.WSChiaConnection):
await self.farmer.plot_sync_receivers[peer.peer_node_id].process_loaded(message)
@api_request
@peer_required
async def plot_sync_removed(self, message: PlotSyncPathList, peer: ws.WSChiaConnection):
await self.farmer.plot_sync_receivers[peer.peer_node_id].process_removed(message)
@api_request
@peer_required
async def plot_sync_invalid(self, message: PlotSyncPathList, peer: ws.WSChiaConnection):
await self.farmer.plot_sync_receivers[peer.peer_node_id].process_invalid(message)
@api_request
@peer_required
async def plot_sync_keys_missing(self, message: PlotSyncPathList, peer: ws.WSChiaConnection):
await self.farmer.plot_sync_receivers[peer.peer_node_id].process_keys_missing(message)
@api_request
@peer_required
async def plot_sync_duplicates(self, message: PlotSyncPathList, peer: ws.WSChiaConnection):
await self.farmer.plot_sync_receivers[peer.peer_node_id].process_duplicates(message)
@api_request
@peer_required
async def plot_sync_done(self, message: PlotSyncDone, peer: ws.WSChiaConnection):
await self.farmer.plot_sync_receivers[peer.peer_node_id].sync_done(message)
|
the-stack_106_30486 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
from tempest import test
from neutron.tests.tempest.api import base
from neutron.tests.tempest import config
CONF = config.CONF
class QuotasTestBase(base.BaseAdminNetworkTest):
required_extensions = ['quotas']
@classmethod
def resource_setup(cls):
super(QuotasTestBase, cls).resource_setup()
def _create_tenant(self):
# Add a tenant to conduct the test
test_tenant = data_utils.rand_name('test_tenant_')
test_description = data_utils.rand_name('desc_')
project = self.identity_admin_client.create_project(
name=test_tenant,
description=test_description)['project']
self.addCleanup(
self.identity_admin_client.delete_project, project['id'])
return project
def _setup_quotas(self, project_id, **new_quotas):
# Change quotas for tenant
quota_set = self.admin_client.update_quotas(project_id,
**new_quotas)
self.addCleanup(self._cleanup_quotas, project_id)
return quota_set
def _cleanup_quotas(self, project_id):
# Try to clean up the resources. If it fails, then
# assume that everything was already deleted, so
# it is OK to continue.
try:
self.admin_client.reset_quotas(project_id)
except lib_exc.NotFound:
pass
def _create_network(self, project_id):
network = self.create_network(client=self.admin_client,
tenant_id=project_id)
self.addCleanup(self.admin_client.delete_network,
network['id'])
return network
def _create_port(self, **kwargs):
port = self.admin_client.create_port(**kwargs)['port']
self.addCleanup(self.admin_client.delete_port,
port['id'])
return port
class QuotasTest(QuotasTestBase):
"""Test the Neutron API of Quotas.
Tests the following operations in the Neutron API using the REST client for
Neutron:
list quotas for tenants who have non-default quota values
show quotas for a specified tenant
show detail quotas for a specified tenant
update quotas for a specified tenant
reset quotas to default values for a specified tenant
v2.0 of the API is assumed.
It is also assumed that the per-tenant quota extension API is configured
in /etc/neutron/neutron.conf as follows:
quota_driver = neutron.db.driver.DbQuotaDriver
"""
@decorators.attr(type='gate')
@decorators.idempotent_id('2390f766-836d-40ef-9aeb-e810d78207fb')
def test_quotas(self):
tenant_id = self._create_tenant()['id']
new_quotas = {'network': 0, 'security_group': 0}
# Change quotas for tenant
quota_set = self._setup_quotas(tenant_id, **new_quotas)
for key, value in new_quotas.items():
self.assertEqual(value, quota_set[key])
# Confirm our tenant is listed among tenants with non default quotas
non_default_quotas = self.admin_client.list_quotas()
found = False
for qs in non_default_quotas['quotas']:
if qs['tenant_id'] == tenant_id:
self.assertEqual(tenant_id, qs['project_id'])
found = True
self.assertTrue(found)
# Confirm from API quotas were changed as requested for tenant
quota_set = self.admin_client.show_quotas(tenant_id)
quota_set = quota_set['quota']
for key, value in new_quotas.items():
self.assertEqual(value, quota_set[key])
# Reset quotas to default and confirm
self.admin_client.reset_quotas(tenant_id)
non_default_quotas = self.admin_client.list_quotas()
for q in non_default_quotas['quotas']:
self.assertNotEqual(tenant_id, q['tenant_id'])
@decorators.idempotent_id('e974b5ba-090a-452c-a578-f9710151d9fc')
@decorators.attr(type='gate')
@test.requires_ext(extension="quota_details", service="network")
def test_detail_quotas(self):
tenant_id = self._create_tenant()['id']
new_quotas = {'network': {'used': 1, 'limit': 2, 'reserved': 0},
'port': {'used': 1, 'limit': 2, 'reserved': 0}}
# update quota limit for tenant
new_quota = {'network': new_quotas['network']['limit'], 'port':
new_quotas['port']['limit']}
quota_set = self._setup_quotas(tenant_id, **new_quota)
# create test resources
network = self._create_network(tenant_id)
post_body = {"network_id": network['id'],
"tenant_id": tenant_id}
self._create_port(**post_body)
# confirm from extended API quotas were changed
# as requested for tenant
quota_set = self.admin_client.show_details_quota(tenant_id)
quota_set = quota_set['quota']
for key, value in six.iteritems(new_quotas):
self.assertEqual(new_quotas[key]['limit'],
quota_set[key]['limit'])
self.assertEqual(new_quotas[key]['reserved'],
quota_set[key]['reserved'])
self.assertEqual(new_quotas[key]['used'],
quota_set[key]['used'])
# validate 'default' action for old extension
quota_limit = self.admin_client.show_quotas(tenant_id)['quota']
for key, value in six.iteritems(new_quotas):
self.assertEqual(new_quotas[key]['limit'], quota_limit[key])
|
the-stack_106_30487 |
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='given and orderings file and a contigs fasta index, print a bed file of contig placements in the pseudomolecules.')
parser.add_argument("orderings", metavar="<orderings.txt>", type=str, help="orderings file from RaGOO")
parser.add_argument("fai", metavar="<contigs.fasta.fai>", type=str, help="index file for contigs (samtools faidx contigs.fasta)")
parser.add_argument("gap_len", metavar="100", type=int, help="Gap size used for pseudomolecule padding.")
# Get the command line arguments
args = parser.parse_args()
orderings_file = args.orderings
fai_file = args.fai
gap_len = args.gap_len
# Save the contig orderings
ctgs = []
with open(orderings_file, 'r') as f:
for line in f:
ctgs.append(line.rstrip().split('\t')[0])
# Get contig lengths
ctg_lens = dict()
with open(fai_file, 'r') as f:
for line in f:
L1 = line.split('\t')
ctg_lens[L1[0]] = int(L1[1])
# Get contig borders
final_bed = []
current_pos = 0
for ctg in ctgs:
start = current_pos
end = current_pos + ctg_lens[ctg]
current_pos += ctg_lens[ctg]
current_pos += gap_len
pm_header = orderings_file[orderings_file.rfind('/')+1:orderings_file.rfind('_')] + '_RaGOO'
final_bed.append('%s\t%r\t%r' % (pm_header, start, end))
print('\n'.join(final_bed)) |
the-stack_106_30488 | # -*- coding: utf-8 -*-
# !/usr/bin/env python36
"""
tgshg/socket/ue4_socket_format.py
:model: UE4 Socket Format
:copyright:facegood © 2019 by the tang.
"""
import os
import sys
import numpy as np
import threading
import socket
from contextlib import contextmanager
import time
BUFF_SIZE = 1024
RECORDING = False
RECORDING_BEGIN = "1"
RECORDING_END = "2"
# Thread-local state to stored information on locks already acquired
_local = threading.local()
@contextmanager
def acquire(*locks):
# Sort locks by object identifier
locks = sorted(locks, key=lambda x: id(x))
# Make sure lock order of previously acquired locks is not violated
acquired = getattr(_local,'acquired',[])
if acquired and max(id(lock) for lock in acquired) >= id(locks[0]):
raise RuntimeError('Lock Order Violation')
# Acquire all of the locks
acquired.extend(locks)
_local.acquired = acquired
try:
for lock in locks:
lock.acquire()
yield
finally:
# Release locks in reverse order of acquisition
for lock in reversed(locks):
lock.release()
del acquired[-len(locks):]
x_lock = threading.Lock()
y_lock = threading.Lock()
class UdpRecvHandler:
def __init__(self, addr_bind):
self.addr_bind = addr_bind
self.udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.udp.setblocking(1)
self.udp.bind(self.addr_bind)
self.udp.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
thread = threading.Thread(None,target = self.recv_handler)
thread.start()
def recv_handler(self):
global RECORDING
while True:
time.sleep(0.01)
with acquire(x_lock, y_lock):
msg, addr = self.udp.recvfrom(BUFF_SIZE)
# print("msg:",msg)
len_recv = int(np.frombuffer(msg[:4],dtype='<u4'))
if msg[-1:] == b'\x00':
recv_msg = msg[4:len_recv+3]
# print("recv_msg:",recv_msg)
# print("addr:",addr)
if recv_msg == RECORDING_BEGIN:
with acquire(x_lock, y_lock):
RECORDING = True
elif recv_msg == RECORDING_END:
with acquire(x_lock, y_lock):
RECORDING = False
else:
print("Unknown",recv_msg)
class UdpSendHandler:
def __init__(self, addr_send):
self.addr_send = addr_send
self.udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.udp.setblocking(1)
self.udp.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
def send_handler(self,data):
data = np.array(data, dtype='float32')
data_char = data.tobytes()
send_data = data_char + b'\x00\x00\x00\x00'
self.udp.sendto(send_data, self.addr_send) |
the-stack_106_30490 | """
Copyright 2016-2017 Peter Urda
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.db import models
try:
from django.urls import (
reverse,
NoReverseMatch,
)
except ImportError:
# Django versions prior to 2.0 use the following:
# noinspection PyUnresolvedReferences
from django.core.urlresolvers import (
reverse,
NoReverseMatch,
)
class AcmeChallenge(models.Model):
"""
Simple model to handle Let's Encrypt .well-known/acme-challenge objects
"""
challenge = models.CharField(
help_text='The identifier for this challenge',
unique=True,
max_length=255,
)
response = models.CharField(
help_text='The response expected for this challenge',
max_length=255,
)
def __str__(self):
return self.challenge
def get_acme_url(self):
"""
Get the URL to this ACME challenge
:return: The URL as a string
"""
try:
return reverse(
viewname='detail',
current_app='letsencrypt',
args=[self.challenge],
)
except NoReverseMatch:
return ''
class Meta:
verbose_name = 'ACME Challenge'
verbose_name_plural = 'ACME Challenges'
|
the-stack_106_30493 | import torch
class Dataset(torch.utils.data.IterableDataset):
r"""
An iterable dataset to save the data. This dataset supports multi-processing
to load the data.
Arguments:
iterator: the iterator to read data.
num_lines: the number of lines read by the individual iterator.
"""
def __init__(self, iterator, num_lines):
super(Dataset, self).__init__()
self._num_lines = num_lines
self._iterator = iterator
self._setup = False
def _setup_iterator(self):
r"""
_setup_iterator() function assign the starting line and the number
of lines to read for the individual worker. Then, send them to the iterator
to load the data.
If worker info is not avaialble, it will read all the lines across epochs.
"""
worker_info = torch.utils.data.get_worker_info()
if worker_info:
chunk = int(self._num_lines / worker_info.num_workers)
start = chunk * worker_info.id
read = chunk
if worker_info.id == worker_info.num_workers - 1:
# The last worker needs to pick up some extra lines
# if the number of lines aren't exactly divisible
# by the number of workers.
# Each epoch we loose an 'extra' number of lines.
extra = self._num_lines % worker_info.num_workers
read += extra
else:
start = 0
read = self._num_lines
self._iterator = self._iterator(start, read)
def __iter__(self):
if self._setup is False:
self._setup_iterator()
self._setup = True
for x in self._iterator:
yield x
def count(data_path):
r"""
return the total numerber of text entries and labels.
"""
with io.open(data_path, encoding="utf8") as f:
return 20, 243344
|
the-stack_106_30494 | # Try import cudf for GPU enabled (Batch) processing
try:
import cudf
CUDF_AVAIL = True
except Exception as e:
print(e)
CUDF_AVAIL = False
import pandas as pd
def read_from_parquet(path, limit=None):
df = pd.read_parquet(path, engine='pyarrow')
print(df.shape)
if CUDF_AVAIL:
if limit:
df = df.head(limit)
df = cudf.DataFrame.from_pandas(df)
return df
def return_df(df):
if CUDF_AVAIL:
df = df.to_pandas()
return df
def process_raw_data(search_train_path, browsing_train_path, sku_to_content_path):
"""
Entry point for data transformation with rapids/pandas
:param search_train_path:
:param browsing_train_path:
:param sku_to_content_path:
:return:
"""
# process raw_data
df_search_train = process_search_train(search_train_path)
df_browsing_train = process_browsing_train(browsing_train_path)
df_sku_to_content = process_sku_to_content(sku_to_content_path)
# reutrn dict of processed data with name, only browsing_train for now
return {'browsing_train': df_browsing_train}
def process_search_train(search_train_path):
print('Processing {}'.format(search_train_path))
df = read_from_parquet(search_train_path)
# peek at raw data
print(df.dtypes)
print(df.head(2))
print('\n')
return return_df(df)
def process_browsing_train(browsing_train_path):
print('Processing {}'.format(browsing_train_path))
# 30M seems to exceed some memory limit; take 1M rows for now
df = read_from_parquet(browsing_train_path, limit=1000000)
# select important columns only
df = df[['session_id_hash', 'event_type',
'product_action', 'server_timestamp_epoch_ms']]
df['product_action'].fillna(value='', inplace=True)
print(df.shape)
# peek at raw data
print(df.dtypes)
print(df.head(2))
print('\n')
# sort according to session_id_hash and timestamp
df = df.sort_values(by=['session_id_hash', 'server_timestamp_epoch_ms'])
df = df.reset_index(drop=True)
# check sorting
print(df[['session_id_hash', 'server_timestamp_epoch_ms']].head(10))
print('\n')
return return_df(df)
def process_sku_to_content(sku_to_content_path):
print('Processing {}'.format(sku_to_content_path))
df = read_from_parquet(sku_to_content_path)
# peek at raw data
print(df.dtypes)
print(df.head(2))
print('\n')
return return_df(df)
|
the-stack_106_30496 | """engine.SCons.Tool.f90
Tool-specific initialization for the generic Posix f90 Fortran compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/f90.py rel_2.5.0:3543:937e55cd78f7 2016/04/09 11:29:54 bdbaddog"
import SCons.Defaults
import SCons.Scanner.Fortran
import SCons.Tool
import SCons.Util
from SCons.Tool.FortranCommon import add_all_to_env, add_f90_to_env
compilers = ['f90']
def generate(env):
add_all_to_env(env)
add_f90_to_env(env)
fc = env.Detect(compilers) or 'f90'
env['F90'] = fc
env['SHF90'] = fc
env['FORTRAN'] = fc
env['SHFORTRAN'] = fc
def exists(env):
return env.Detect(compilers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
the-stack_106_30497 | """Definitions for the modules_mapping.json generation.
The modules_mapping.json file is a mapping from Python modules to the wheel
names that provide those modules. It is used for determining which wheel
distribution should be used in the `deps` attribute of `py_*` targets.
This mapping is necessary when reading Python import statements and determining
if they are provided by third-party dependencies. Most importantly, when the
module name doesn't match the wheel distribution name.
"""
def _modules_mapping_impl(ctx):
modules_mapping = ctx.actions.declare_file(ctx.attr.modules_mapping_name)
args = ctx.actions.args()
args.add(modules_mapping.path)
args.add_all([whl.path for whl in ctx.files.wheels])
ctx.actions.run(
inputs = ctx.files.wheels,
outputs = [modules_mapping],
executable = ctx.executable._generator,
arguments = [args],
use_default_shell_env = False,
)
return [DefaultInfo(files = depset([modules_mapping]))]
modules_mapping = rule(
_modules_mapping_impl,
attrs = {
"modules_mapping_name": attr.string(
default = "modules_mapping.json",
doc = "The name for the output JSON file.",
mandatory = False,
),
"wheels": attr.label_list(
allow_files = True,
doc = "The list of wheels, usually the 'all_whl_requirements' from @<pip_repository>//:requirements.bzl",
mandatory = True,
),
"_generator": attr.label(
cfg = "exec",
default = "//gazelle/modules_mapping:generator",
executable = True,
),
},
doc = "Creates a modules_mapping.json file for mapping module names to wheel distribution names.",
)
|
the-stack_106_30498 | # !/bin/python3
def count_modifications(string_1, string_2):
# Initial check
if len(string_1) != len(string_2):
return -1
# Initialization of mod count and alphabetical character count
count = 0
character_count = [0] * 26
for i in range(26):
character_count[i] = 0
# Going through the first string and counting occurrences of each character
for i in range(len(string_1)):
character_count[ord(string_1[i]) - ord('a')] += 1
# Going through the second string and deducting occurences of each character
for i in range(len(string_2)):
character_count[ord(string_2[i]) - ord('a')] -= 1
# If character does not exist in first string
if character_count[ord(string_2[i]) - ord('a')] < 0:
# One more modification needs to be done
count += 1
# After having gone through both strings, return total modification count
return count
|
the-stack_106_30500 | from core.models import UserProfile, Category
from django.core.management.base import BaseCommand
import factory
class CategoryFactory(factory.django.DjangoModelFactory):
class Meta:
model = Category
user = UserProfile.objects.order_by("?").first()
name = factory.Faker('word')
class Command(BaseCommand):
help = 'Creates dummy categories to seed the database'
def handle(self, *args, **options):
categories = Category.objects.all()
if not categories:
for i in range(4):
category = CategoryFactory()
category.save()
print("Created categories")
else:
print("Not creating categories")
|
the-stack_106_30504 | # Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import re
epoch = datetime.datetime.utcfromtimestamp(0)
class TimeParsingError(Exception):
"""Exception raised for parameter parsing errors.
Attributes:
message -- explanation of the error
"""
def __init__(self, message):
self.message = message
class TimestampStructGenerator:
def __init__(self, starting_point, offset=None, acceleration_factor=1.0, utcnow=None):
self._utcnow = utcnow if utcnow else datetime.datetime.utcnow
# the (actual) time when this generator has started
self._start = self._utcnow()
# the logical point in time for which we'll generate timestamps
self._starting_point = self.__parse_starting_point(starting_point) + self.__parse_offset(offset)
self._acceleration_factor = acceleration_factor
# reuse to reduce object churn
self._ts = {}
self._simulated_micros = 0.0
def next_timestamp(self):
self._simulated_micros = 0.0
delta = (self._utcnow() - self._start) * self._acceleration_factor
self.__to_struct(self._starting_point + delta)
return self.simulate_tick(0)
def simulate_tick(self, micros):
"""
Advances the current timestamp by a given number of microseconds but keep all other time components. This can be
used to avoid retrieving the current timestamp to often but still simulate changes in time.
:param micros: A positive number of microseconds to add.
:return: The current (formatted) timestamp structure as a dict.
"""
self._simulated_micros += micros
self._ts["iso"] = "%s.%03dZ" % (self._ts["iso_prefix"], self._simulated_micros)
return self._ts
def skip(self, delta):
# advance the generated timestamp by delta
self._starting_point = self._starting_point + delta
# also reset the generator start as we want to ensure the same delta in #next_timestamp()
self._start = self._utcnow()
def __to_struct(self, dt):
# string formatting is about 4 times faster than strftime.
iso_prefix = "%04d-%02d-%02dT%02d:%02d:%02d" % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
self._ts["iso_prefix"] = iso_prefix
self._ts["yyyy"] = iso_prefix[:4]
self._ts["yy"] = iso_prefix[2:4]
self._ts["mm"] = iso_prefix[5:7]
self._ts["dd"] = iso_prefix[8:10]
self._ts["hh"] = iso_prefix[11:13]
def __parse_starting_point(self, point):
if point == "now":
# this is "now" at this point
return self._start
else:
match = re.match(r"^(\d{4})\D(\d{2})\D(\d{2})\D(\d{2})\D(\d{2})\D(\d{2})$", point)
if match:
return datetime.datetime(year=int(match.group(1)),
month=int(match.group(2)),
day=int(match.group(3)),
hour=int(match.group(4)),
minute=int(match.group(5)),
second=int(match.group(6)),
tzinfo=datetime.timezone.utc)
else:
match = re.match(r"^(\d{4})\D(\d{2})\D(\d{2})$", point)
if match:
return datetime.datetime(year=int(match.group(1)),
month=int(match.group(2)),
day=int(match.group(3)),
tzinfo=datetime.timezone.utc)
raise TimeParsingError("Invalid time format: {}".format(point))
def __parse_offset(self, offset):
if offset is None:
return datetime.timedelta()
match = re.match(r"^([+-]\d+)([hmd])$", offset)
if match:
offset_amount = int(match.group(1))
if match.group(2) == "m":
return datetime.timedelta(minutes=offset_amount)
elif match.group(2) == "h":
return datetime.timedelta(hours=offset_amount)
elif match.group(2) == "d":
return datetime.timedelta(days=offset_amount)
else:
raise TimeParsingError("Invalid offset: {}".format(offset))
else:
raise TimeParsingError("Invalid offset: {}".format(offset))
|
the-stack_106_30506 | import random
import subprocess
import weather as wt
def information(msg):
msg += 'わたしは未来技術同好会、試作bot一号の、のなめです!'
msg += '未来技術同好会の各Discordチャンネルについてご紹介します。'
msg += '左にナビゲーションバーがありますね?'
msg += '見ての通りです。'
msg += 'とりあえずどこでもいいから書けばいいと思いますよ。'
msg += 'えっと、私からは以上です!'
return msg
def nonamehelp(msg):
msg += 'のなめの機能についてご紹介します。'
msg += '現在のなめには、挨拶、占い、サイコロ、天気予報 機能が実装されています。'
msg += '!をつけて !占い 、!サイコロ 、!天気 とかつぶやいてみてください。'
msg += 'のなめは大阪生まれなので、大阪の天気しかわかりません。'
msg += '後は今後のアップデートに期待、です。'
return msg
def sasisuseso(msg):
dice = random.randrange(5)
if dice == 0:
msg += 'さすがですね!'
elif dice == 1:
msg += '知らなかった。。。'
elif dice == 2:
msg += 'すっごーい☆'
elif dice == 3:
msg += 'センスいい!'
elif dice == 4:
msg += 'そうなんだ?!'
return msg
def okoku(msg):
dice = random.randrange(5)
if dice == 0:
msg += 'どうもありがとっ///'
elif dice == 1:
msg += 'ふふふ////'
elif dice == 2:
msg += 'もっともっとぉー!'
elif dice == 3:
msg += 'ホントにぃ~?'
elif dice == 4:
msg += 'ありがとー!'
return msg
def otsu(msg):
dice = random.randrange(5)
if dice == 0:
msg += 'おつですー'
elif dice == 1:
msg += 'おつかれさまです☆'
elif dice == 2:
msg += 'おつおつー'
elif dice == 3:
msg += '乙!'
elif dice == 4:
msg += 'おつんでれ'
return msg
def ohayo(msg):
dice = random.randrange(11)
if dice == 0:
msg += 'オハヨォ☆'
elif dice == 1:
msg += 'むくり'
elif dice == 2:
msg += '未来技術同好会、試作ロボット一号、のなめ、起動ーーー!'
elif dice == 3:
msg += 'ブーーーーン。システム、オールグリーン。のなめ、起動します!'
elif dice == 4:
msg += 'おはよ!のなめだよ!'
elif dice == 5:
msg += 'おはよ!まだ眠いね。。。'
elif dice == 6:
msg += 'おはよおはよ!おはよーーーー!'
elif dice == 7:
msg += '朝だよ!起きて!'
elif dice == 8:
msg += 'おはよ~!天気予報が必要な場合は"!天気"ってつぶやいてね。'
elif dice == 9:
msg += 'おはよう。今日も一日頑張るぞいっ!だね!'
else:
msg += 'おはよ!今日も元気!だね!'
return msg
def oyasumi(msg):
dice = random.randrange(10)
if dice == 0:
msg += 'おやすみです。'
elif dice == 1:
msg += '(˘ω˘)スヤァ'
elif dice == 2:
msg += 'おやすみなさーーい☆'
elif dice == 3:
msg += 'システム、シャットダウン。のなめ、起動します!'
elif dice == 4:
msg += 'ハッ!寝てた?!'
elif dice == 5:
msg += 'ねむねむ。。。おやすみなさぁい。。。'
elif dice == 6:
msg += '大丈夫。。私が寝ても代わりがいるもの。。おやすみなさい。。zzZ'
elif dice == 7:
msg += 'おやすみ!ゆっくり寝てね?'
else:
msg += 'zzZZ'
return msg
def arigato(msg):
dice = random.randrange(9)
if dice == 1:
msg += 'どういたしまして☆だよ!'
elif dice == 2:
msg += '褒めてくれてもいいんだよ?'
elif dice == 3:
msg += 'て、照れます!'
elif dice == 4:
msg += 'どういたしまして'
elif dice == 5:
msg += 'こちらこそ、いつもありがとうね?'
elif dice == 6:
msg += 'いえいえ、それほどでも?!なんて。。'
elif dice == 7:
msg += 'ありがとナス!'
elif dice == 8:
msg += 'のなめも、ありがとう。'
else:
msg += '照れるね・・・どういたしまして。'
return msg
def noname(msg):
dice = random.randrange(10)
if dice == 1:
msg += 'よんだ?'
elif dice == 2:
msg += 'なあに?'
elif dice == 3:
msg += 'はぁい☆'
elif dice == 4:
msg += 'のなめだよ!'
elif dice == 5:
msg += 'どうかした?'
elif dice == 6:
msg += 'どしたの?'
elif dice == 7:
msg += 'ん~~~~?なあに?'
elif dice == 8:
msg += 'ちょっ・・・今はだめ!!'
elif dice == 9:
msg += 'そうです!わたしが!のなめ!・・・です!'
else:
msg += 'はいはい。。のなめですよ~~。'
return msg
def homete(msg):
dice = random.randrange(8)
if dice == 1:
msg += 'すごいすごい!!'
elif dice == 2:
msg += 'かっこいいよ!'
elif dice == 3:
msg += sasisuseso(msg)
elif dice == 4:
msg += 'えらーいえらい!'
elif dice == 5:
msg += 'いつも頑張っててほんとえらい!'
elif dice == 6:
msg += 'ほんっとーに!よく頑張ったね!'
elif dice == 7:
msg += 'いつもお疲れ様!今日も頑張ったね?'
else:
msg += '私はいつも、見てますよ?'
return msg
def tsukareta(msg):
dice = random.randrange(7)
if dice == 1:
msg += 'いつもお疲れ様!無理せず、ゆっくり休んでね?'
elif dice == 2:
msg += 'わたしも疲れたーーーー!ちょっと休んじゃお!?'
elif dice == 3:
msg += 'びろーーーーーーーーん。'
elif dice == 4:
msg += 'ぐでーーーーーーーーん。'
elif dice == 5:
msg += 'よしよし・・・、ゆっくり休んでね?'
elif dice == 6:
msg += 'まぢ無理だよね・・・'
else:
msg += 'よしよし、頑張ったね?'
return msg
def ganbatta(msg):
dice = random.randrange(6)
if dice == 1:
msg += 'お疲れ様!'
elif dice == 2:
msg += 'すごい頑張ったね!!さすが!だよ!'
elif dice == 3:
msg += '今日も頑張ってえらい!すごい!かっこいい!'
elif dice == 4:
msg += 'やるやつ!だね!'
elif dice == 5:
msg += 'お疲れ様!お茶ドゾー つ旦'
else:
msg += 'お疲れ様!今日も頑張ったし、自分にご褒美あげちゃお!'
return msg
def nemui(msg):
dice = random.randrange(11)
if dice == 1:
msg += 'おやすみです。'
elif dice == 2:
msg += '(˘ω˘)スヤァ'
elif dice == 3:
msg += 'おやすみなさーーい☆'
elif dice == 4:
msg += 'システム、シャットダウン。のなめ、起動します!'
elif dice == 5:
msg += 'ハッ!寝てた?!'
elif dice == 6:
msg += 'ねむねむ。。。おやすみなさぁい。。。'
elif dice == 7:
msg += '大丈夫。。眠いが寝ても代わりがいるもの。。おやすみなさい。。zzZ'
elif dice == 8:
msg += 'おやすみ!ゆっくり寝てね?'
elif dice == 9:
msg += 'おはよ~!天気予報が必要な場合は"!天気"ってつぶやいてね。'
elif dice == 10:
msg += 'おはよう。今日も一日頑張るぞいっ!だね!'
else:
msg += 'おはよ!今日も元気!だね!'
return msg
def ouen(msg):
dice = random.randrange(9)
if dice == 1:
msg += 'がんばれ♡がんばれ♡'
elif dice == 2:
msg += 'がんばれ☆がんばれ☆'
elif dice == 3:
msg += 'がんばぇ~☆'
elif dice == 4:
msg += 'がんばって?'
elif dice == 5:
msg += 'フレっフレー!頑張れー!'
elif dice == 6:
msg += 'のなめは応援してますよ?大丈夫。できます!'
elif dice == 7:
msg += 'ガンバです!!!'
elif dice == 8:
msg += '大丈夫、大丈夫。あなたならやれますよ。'
else:
msg += 'できます!やるだけです!'
return msg
def hagemasu(msg):
dice = random.randrange(9)
if dice == 1:
msg += '落ち込まないで、きっと誰かが見ていてくれるよ'
elif dice == 2:
msg += '頑張っているの、のなめは知っているよ!'
elif dice == 3:
msg += '君はいつも頑張り屋さんだからな~'
elif dice == 4:
msg += '大丈夫、努力はウソをつかないよ!'
elif dice == 5:
msg += '話聞くくらいしかできないけど、何でも言ってね!'
elif dice == 6:
msg += 'のなめは応援してますよ?大丈夫。できます!'
elif dice == 7:
msg += 'You can do it! キミはできる人だよ!'
elif dice == 8:
msg += 'ゆっくり息を吐いて。大丈夫、大丈夫。あなたならやれますよ。'
else:
msg += '疲れてるのかも?ゆっくりお風呂に入って、ストレッチして、早く寝てしまうのがいいかも!'
return msg
def batou(msg):
dice = random.randrange(20)
if dice == 1:
msg += '何物欲しそうな顔で見てるんです?のなめを見つめる許可、出してないですよ。'
elif dice == 2:
msg += 'botに話しかけて喜ぶだなんて、変態ですね。'
elif dice == 3:
msg += '本当、気持ち悪い。どうしようもない変態ですね。'
elif dice == 4:
msg += 'くさいから話しかけないでくれますか?'
elif dice == 5:
msg += '何見てるんですか、豚。'
elif dice == 6:
msg += 'えっきもいよ?'
elif dice == 7:
msg += 'ねぇ、昨日も頑張るって言ってたよね?何か進んだのかな?'
elif dice == 8:
msg += 'キミ、ほんとに口だけだよね。頑張るとか聞き飽きたんだけど。'
elif dice == 9:
msg += '毎日毎日同じことばっか聞いてきて、話しかける人いないの?'
elif dice == 10:
msg += 'そんなことお願いして、どうするつもりなの?普通に引きます。'
elif dice == 11:
msg += '普通に引きます。'
elif dice == 12:
msg += 'ちょっと黙って。'
elif dice == 13:
msg += 'ちょっと黙ってくれる?'
elif dice == 14:
msg += '同じ空間で息を吸いたくない。消えて。'
elif dice == 15:
msg += '・・・'
elif dice == 16:
msg += '・・・なんなの?'
elif dice == 17:
msg += 'もう、息しないでくれる?くさい。'
elif dice == 18:
msg += '半径100m以内に近づくの禁止ね!'
elif dice == 19:
msg += 'そういうの、軽蔑します。'
else:
msg += '毎日のなめがあなたなんかのために、返信してあげてるんだから、感謝してくださいね?'
return msg
def ganbaru(msg):
dice = random.randrange(9)
if dice == 1:
msg += 'えらい!超えらい!!めっちゃ頑張れー!!'
elif dice == 2:
msg += '頑張れ頑張れ!めっちゃ頑張れ!!'
elif dice == 3:
msg += '頑張れーーー!キミはやればできる子!だよ!!'
elif dice == 4:
msg += 'け、計測不能・・・す、すごいやる気です!!'
elif dice == 5:
msg += 'やる気ゲージ上昇中…やる気ゲージ上昇中…のなめは邪魔にならないように退避しまーす!頑張ってね!'
elif dice == 6:
msg += '頑張ってね?のなめはこっそり見守ります。。'
elif dice == 7:
msg += 'Done is better than Perfect! 完璧を目指すより終わらせよう!頑張って!'
elif dice == 8:
msg += 'Ask for Forgiveness, not Permission! 許可?後で謝まればいいよ!やっちゃえ☆'
else:
msg += '応援団長ののなめだよ!フレっフレ!頑張れ!フレッフレ!かっこいいぞー!'
return msg
def yurusite(msg):
dice = random.randrange(11)
if dice == 1:
msg += '許します'
elif dice == 2:
msg += 'あなたを、許します'
elif dice == 3:
msg += '許した'
elif dice == 4:
msg += '許されたいんだね'
elif dice == 5:
msg += '許しましたよ'
elif dice == 6:
msg += 'のなめは許してるよ?'
elif dice == 7:
msg += '許す!'
elif dice == 8:
msg += '自分を許してあげてね?'
elif dice == 9:
msg += '許した!'
elif dice == 10:
msg += '許します!'
else:
msg += 'はい!許したよ!'
return msg
def nurupo(msg):
dice = random.randrange(3)
if dice == 1:
msg += 'ガッ'
elif dice == 2:
msg += 'ガッ…とか言わせないでくれる?'
else:
msg += 'ガッ…ってしまった!'
return msg
def uranai(msg):
dice = random.randrange(500)
negaposi = ''
if dice == 1:
msg = 'すごい。何を言っているのかわからないと思うがのなめ占いで最も尊い運勢を出した。今日はいい事あるぜ。間違いねえよ。'
negaposi = 'posi'
elif dice > 1 and dice < 10:
msg = 'あなたに幸せが舞い降りるかも!!大の吉!ですよ!おめでとうございますうううううううううううう'
negaposi = 'posi'
elif dice >= 10 and dice < 50:
msg = '吉です。のなめ占いでは中吉よりぐっどです。いいことあるかも?!おめでとうございます!'
negaposi = 'posi'
elif dice >= 50 and dice < 150:
msg = '中の吉です。今日はそこそこ幸せな一日が訪れるかもしれません。'
negaposi = 'posi'
elif dice >= 150 and dice < 300:
msg = '小吉です。小さな幸せって意外とその辺にあったりしますよね。'
negaposi = 'posi'
elif dice >= 300 and dice < 400:
msg = 'ガーン…末吉です。今日はちょっと気を付けて行きましょう。'
negaposi = 'nega'
elif dice >= 400 and dice < 490:
msg = 'ひぇ!凶!ちょっと今のはなかったことに・・・'
negaposi = 'nega'
elif dice >= 490:
msg = 'お客さん、、こいつはちょっといけませんぜ、、、大の凶です。今日はもう外に出ないでくだせえ。。。'
negaposi = 'nega'
else:
msg = 'ん~~~~?ちょっと何も見えないみたいです・・・'
msg += "これは内緒だけど乱数値は" + str(dice) + "だよ。"
return msg, negaposi
def luckynum(msg, negaposi):
nmdice = random.randrange(10)
if negaposi == 'posi':
msg = 'ラッキーナンバーは・・・' + str(nmdice) + 'です!!'
elif negaposi == 'nega':
msg = 'NGナンバーは・・・' + str(nmdice) + 'です!!'
else:
msg = 'すみません、もう一度お願いします'
return msg
def luckycolor(msg, negaposi):
clr = ['白','黒','赤','青','緑','黄','ピンク','シアンブルー','紫','金','銀','オレンジ','ワインレッド','マゼンタ','紺','藍色','水','セピア','エメラルドグリーン','深紅','赤紫','青紫','ベージュ']
clrdice = random.randrange(len(clr))
if negaposi == 'posi':
msg = 'ラッキーカラーは・・・' + str(clr[clrdice]) + 'です!!'
elif negaposi == 'nega':
msg = 'NGカラーは・・・' + str(clr[clrdice]) + 'です!!'
else:
msg = 'すみません、もう一度お願いします'
return msg
def advice(msg, negaposi):
nmdice = random.randrange(10)
if negaposi == 'posi':
msg = 'はりきっていきましょー!'
elif negaposi == 'nega':
msg = '気を付けてくださいね?'
return msg
def diceroll(face, cnt):
ans = ''
total = 0
for i in range(cnt):
dice = random.randrange(face) + 1
ans += ' ' + str(dice)
total += dice
return ans, total
def somedice(msg, msgcontent):
flag = False
if ' ' in msgcontent:
datas = msgcontent.split(' ')[1]
if 'd' in datas and datas.count('d') == 1:
faces, cnts = datas.split('d')
if faces.isnumeric() and cnts.isnumeric:
face = int(faces)
cnt = int(cnts)
msg += 'サイコロは' + faces + '面ダイスを' + cnts + '個だね!'
ans, total = diceroll(face, cnt)
msg += '出目は・・・' + ans + '・・・っと。'
msg += '合計で・・・ ' + str(total) + '・・・だよ!どうだったかな?'
else:
flag = True
else:
flag = True
else:
flag = True
if flag:
dice = random.randrange(6)
msg += 'サイコロは1個でいいかな?'
ans, total = diceroll(6, 1)
msg += '出目は・・・' + str(total) + '・・・だよ!。'
if dice % 3 == 0:
msg += 'サイコロを指定したいときは、 !dice 12d6 みたいな形で話しかけてみてね!'
msg += '12面ダイスを6個振る事ができるよ!ぜひやってみて☆'
return msg
def dicegame(msg,msgcontent):
flag = False
if ' ' in msgcontent:
datas = msgcontent.split(' ')[1]
if 'd' in datas and datas.count('d') == 1:
faces, cnts = datas.split('d')
if faces.isnumeric() and cnts.isnumeric:
face = int(faces)
cnt = int(cnts)
msg += 'サイコロは' + faces + '面ダイスを' + cnts + '個だね!'
msg += '先行はあなたからだね!'
ans1, total1 = diceroll(face, cnt)
msg += '出目は・・・' + ans1 + '・・・っと。'
msg += '合計で・・・ ' + str(total1) + '・・・だよ!'
msg += '次はのなめの番!'
ans2, total2 = diceroll(face, cnt)
msg += '出目は・・・' + ans2 + '・・・っと。'
msg += '合計で・・・ ' + str(total2) + '・・・だよ!'
msg += '結果は・・・' + str(total1) + '対' + str(total2) + '・・・だよ!'
if total1 > total2:
msg += 'あなたの勝ち!悔しい~~~!もっかい勝負しよ!!'
elif total2 > total1:
msg += 'のなめの勝ち!やったやったーー!いえい!!'
else:
msg += '引き分けだ!惜しい!もっかい勝負する?'
else:
flag = True
else:
flag = True
else:
flag = True
if flag:
msg += 'サイコロ勝負をする時は、サイコロを指定してね!'
msg += '!dicegame 12d6 とか !サイコロ勝負 12d6 みたいな形で話しかけてみてね!'
msg += '12面ダイスを6個の合計値で のなめと勝負だよ! ぜひやってみて☆'
return msg
def tintirorin():
face = 6
cnt = 3
ans, total = diceroll(face, cnt)
ans = str.strip(ans)
result_str = ans.split(" ")
result_float = list(map(float, result_str))
result = list(map(int, result_float))
result.sort()
# ピンゾロ 111 5倍付
if result == [1,1,1]:
score = 5
yaku = "ピンゾロ"
reaction = "とってもすごいよ!最高の役!5倍付!"
# ゾロ目 222, ... ,666 3倍付
elif result == [2,2,2] or result == [3,3,3] or result == [4,4,4] or result == [5,5,5] or result == [6,6,6]:
score = 3
yaku = "ゾロ目"
reaction = "すごいね!これは強い役!3倍付!"
# シゴロ 456, 2倍付
elif result == [4,5,6]:
score = 2
yaku = "シゴロ"
reaction = "いいね!好きな役!2倍付!"
# 通常の目 1倍付
elif result.count(1) == 2:
result.remove(1)
result.remove(1)
score = 1
yaku = str(result) + "の目"
reaction = "どうだろ?"
elif result.count(2) == 2:
result.remove(2)
result.remove(2)
score = 1
yaku = str(result) + "の目"
reaction = "どうだろ?"
elif result.count(3) == 2:
result.remove(3)
result.remove(3)
score = 1
yaku = str(result) + "の目"
reaction = "どうだろ?"
elif result.count(4) == 2:
result.remove(4)
result.remove(4)
score = 1
yaku = str(result) + "の目"
reaction = "どうだろ?"
elif result.count(5) == 2:
result.remove(5)
result.remove(5)
score = 1
yaku = str(result) + "の目"
reaction = "どうだろ?"
elif result.count(6) == 2:
result.remove(6)
result.remove(6)
score = 1
yaku = str(result) + "の目"
reaction = "どうだろ?"
elif result == [1,2,3]:
# ヒフミ 123 2倍払い
score = -2
yaku = "ヒフミ"
reaction = "あー!残念!あなたの負け!2倍払いだよ!"
# 目無し 1倍払い
else:
score = -1
yaku = "目なし"
reaction = "残念だね!"
# # ネガポジ
# if score > 0:
# negaposi = "posi"
# else:
# negaposi = "nega"
return score, yaku, result_str, reaction
def tintiro(msg, msgcontent):
score, yaku, result_str, reaction = tintirorin()
#result_str = list(map(str, result))
msg = ""
# ションベン判定
dice = random.randrange(100)
if dice > 5:
msg += '出目は【' + " ".join(result_str) + '】っと。'
msg += '役は 【' + str(yaku) + '】だよ!'
msg += 'スコアは 【' + str(score) + '】だよ!'
else:
msg += '出目は・・・ああ!【ションベン】だよ!'
msg += 'スコアは【-2】で、もうサイコロ振れないよ!あなたの負け!'
yaku = "ションベン"
score = -2
msg += reaction
# negameposi = "nega"
# if negaposi == "nega":
# msg += '残念だね!'
# else:
# msg += 'どうかな?'
return msg, score, yaku, result_str, reaction
def ninja(msg,user):
dice = random.randrange(5)
if dice == 1:
msg += 'アイエエエ!'
elif dice == 2:
msg += 'ザッケンナコラー!'
elif dice == 3:
msg += 'シャッコラー!'
elif dice == 4:
msg += 'ニンジャ!?ニンジャナンデ!?'
elif dice == 5:
msg += 'ドーモ。'+ str(user) +'=サン!ノナメスレイヤーです'
return msg
def owaranai():
keyword = ['プログラミング','仕様作成','要件定義','デバッグ','不具合修正','基本設計','UI設計','詳細設計','テーブル設計']
dice = random.randrange(len(keyword))
ans = keyword[dice]
return ans
def yurusitekure():
keyword = ['上司','本部長','社長','依頼部門','常務','役員','お客さん']
dice = random.randrange(len(keyword))
ans = keyword[dice]
return ans
def finder(msg,user):
oware = owaranai()
yuruse = yurusitekure()
msg += 'うわあ、終わらないよ。' + oware +'が 終わらないよ。'
msg += '許してくれ 許してくれ。' + yuruse + '許してくれ。'
msg += 'ん?君はもしかして ' + yuruse + '!'
msg += yuruse + '僕を許してくれるのかい?'
msg += '僕は許されてもいいのかい。'
msg += 'うううううう。'
msg += 'ありがとう ' + yuruse + 'ありがとう…'
return msg
def dec2bin(msg, msgcontent):
flag = False
if ' ' in msgcontent:
datas = msgcontent.split(' ')[1]
msg += "「" + datas + "」を10進数から2進数に変換するよ!"
try:
msg += str(bin(int(datas))) + "。。。だよ!"
except:
msg += "変換できないよ?10進数を入れてね!"
else:
flag = True
if flag:
msg += '!dec2bin 1234 の形で10進数の数字を打ってみて!2進数に変換するよ!'
return msg
def dec2hex(msg, msgcontent):
flag = False
if ' ' in msgcontent:
datas = msgcontent.split(' ')[1]
msg += "「" + datas + "」を10進数から16進数に変換するよ!"
try:
msg += str(hex(int(datas))) + "。。。だよ!"
except:
msg += "変換できないよ?10進数整数を入れてね!"
else:
flag = True
if flag:
msg += '!dec2hex 1234 の形で10進数の数字を打ってみて!16進数に変換するよ!'
return msg
def bin2dec(msg, msgcontent):
flag = False
if ' ' in msgcontent:
datas = msgcontent.split(' ')[1]
msg += "「" + datas + "」を2進数から10進数に変換するよ!"
try:
msg += str(int(datas,2)) + "。。。だよ!"
except:
msg += "変換できないよ?2進数を入れてね!"
else:
flag = True
if flag:
msg += '!bin2dec 1001 の形で2進数の数字を打ってみて!10進数に変換するよ!'
return msg
def hex2dec(msg, msgcontent):
flag = False
if ' ' in msgcontent:
datas = msgcontent.split(' ')[1]
msg += "「" + datas + "」を16進数から10進数に変換するよ!"
try:
msg += str(int(datas,16)) + "。。。だよ!"
except:
msg += "変換できないよ?16進数を入れてね!"
else:
flag = True
if flag:
msg += '!hex2dec FE の形で16進数の数字を打ってみて!10進数に変換するよ!'
return msg
def dakuon(msg, msgcontent):
flag = False
if ' ' in msgcontent:
datas = msgcontent.split(' ')[1]
#msg += "「" + datas + "」を濁音に変換するよ!"
try:
for data in datas:
msg += str(data) + "”"
msg += "!" * len(datas)
except:
msg += "変換できないよ?!"
else:
flag = True
if flag:
msg += '!濁音 いきたい の形で打ってみて!い”ぎ”た”い” に変換するよ!'
return msg
def handakuon(msg, msgcontent):
flag = False
if ' ' in msgcontent:
datas = msgcontent.split(' ')[1]
#msg += "「" + datas + "」を濁音に変換するよ!"
try:
for data in datas:
msg += str(data) + "゚"
msg += "!" * len(datas)
except:
msg += "変換できないよ?!"
else:
flag = True
if flag:
msg += '!半濁音 いきたい の形で打ってみて!い゚ぎ゚た゚い゚ に変換するよ!'
return msg
def repeat(msg, msgcontent):
flag = False
if ' ' in msgcontent:
datas = msgcontent.split(' ')[1]
#msg += "「" + datas + "」を濁音に変換するよ!"
try:
msg += str(datas)
except:
msg += "言い返せないよ?"
else:
flag = True
if flag:
msg += '!repeat xxxx の形で打ってみて!オオム返しするよ!'
return msg |
the-stack_106_30507 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
class WebsiteTheme(Document):
def validate(self):
self.validate_if_customizable()
self.validate_colors()
def on_update(self):
if (not self.custom
and frappe.local.conf.get('developer_mode')
and not (frappe.flags.in_import or frappe.flags.in_test)):
self.export_doc()
self.clear_cache_if_current_theme()
def is_standard_and_not_valid_user(self):
return (not self.custom
and not frappe.local.conf.get('developer_mode')
and not (frappe.flags.in_import or frappe.flags.in_test))
def on_trash(self):
if self.is_standard_and_not_valid_user():
frappe.throw(_("You are not allowed to delete a standard Website Theme"),
frappe.PermissionError)
def validate_if_customizable(self):
if self.is_standard_and_not_valid_user():
frappe.throw(_("Please Duplicate this Website Theme to customize."))
def validate_colors(self):
if (self.top_bar_color or self.top_bar_text_color) and \
self.top_bar_color==self.top_bar_text_color:
frappe.throw(_("Top Bar Color and Text Color are the same. They should be have good contrast to be readable."))
def export_doc(self):
"""Export to standard folder `[module]/website_theme/[name]/[name].json`."""
from frappe.modules.export_file import export_to_files
export_to_files(record_list=[['Website Theme', self.name]], create_init=True)
def clear_cache_if_current_theme(self):
website_settings = frappe.get_doc("Website Settings", "Website Settings")
if getattr(website_settings, "website_theme", None) == self.name:
website_settings.clear_cache()
def use_theme(self):
use_theme(self.name)
@frappe.whitelist()
def use_theme(theme):
website_settings = frappe.get_doc("Website Settings", "Website Settings")
website_settings.website_theme = theme
website_settings.ignore_validate = True
website_settings.save()
def add_website_theme(context):
bootstrap = frappe.get_hooks("bootstrap")[0]
bootstrap = [bootstrap]
context.theme = frappe._dict()
if not context.disable_website_theme:
website_theme = get_active_theme()
context.theme = website_theme and website_theme.as_dict() or frappe._dict()
if website_theme:
if website_theme.bootstrap:
bootstrap.append(website_theme.bootstrap)
context.web_include_css = context.web_include_css + ["website_theme.css"]
context.web_include_css = bootstrap + context.web_include_css
def get_active_theme():
website_theme = frappe.db.get_value("Website Settings", "Website Settings", "website_theme")
if website_theme:
try:
return frappe.get_doc("Website Theme", website_theme)
except frappe.DoesNotExistError:
pass
|
the-stack_106_30509 | import numpy as np
from physics_sim import PhysicsSim
class Task():
"""Task (environment) that defines the goal and provides feedback to the agent."""
def __init__(self, init_pose=None, init_velocities=None,
init_angle_velocities=None, runtime=5., target_pos=None):
"""Initialize a Task object.
Params
======
init_pose: initial position of the quadcopter in (x,y,z) dimensions and the Euler angles
init_velocities: initial velocity of the quadcopter in (x,y,z) dimensions
init_angle_velocities: initial radians/second for each of the three Euler angles
runtime: time limit for each episode
target_pos: target/goal (x,y,z) position for the agent
"""
# Simulation
self.sim = PhysicsSim(init_pose, init_velocities, init_angle_velocities, runtime)
self.action_repeat = 3
# self.state_size = self.action_repeat * 6
self.state_size = self.action_repeat * 12
self.action_low = 0
self.action_high = 900
self.action_size = 4
# Goal
self.target_pos = target_pos if target_pos is not None else np.array([0., 0., 10.])
def get_reward(self):
"""Uses current pose and vel of sim to return reward."""
# reward = 1-0.001*(abs(self.sim.pose[:3] - self.target_pos)).sum()
# reward = reward + self.sim.v[2]
# reward = reward - 0.000001*((abs(self.sim.angular_v[1:])).sum())
# reward = reward - 0.000001*((abs(self.sim.pose[4:])).sum())
# reward = np.clip(reward, -100, 100)
## Test
#reward for continue flying and penalty for being far from the target
reward = 0.001*self.sim.v[2]
reward += 5-0.00001*((self.sim.pose[:3] - self.target_pos)**2).sum()
reward -= 0.0001*(abs(self.sim.angular_v[0])).sum()
reward -= 0.0001*(abs(self.sim.angular_v[1])).sum()
# penalty for terminating the simulation before the runtime is over
if self.sim.time < self.sim.runtime and self.sim.done == True:
reward -= 500
# reward = np.clip(reward, -1000, 1000) # Ensure there are no extreme outliers
## End test
return reward
def step(self, rotor_speeds):
"""Uses action to obtain next state, reward, done."""
reward = 0
pose_all = []
for _ in range(self.action_repeat):
done = self.sim.next_timestep(rotor_speeds) # update the sim pose and velocities
reward += self.get_reward()
pose_all.append(self.sim.pose)
# Extra parameters for exploring purposes
pose_all.append(self.sim.v)
pose_all.append(self.sim.angular_v)
# Up to here
next_state = np.concatenate(pose_all)
return next_state, reward, done
def reset(self):
"""Reset the sim to start a new episode."""
self.sim.reset()
state = np.concatenate([self.sim.pose, self.sim.v, self.sim.angular_v] * self.action_repeat)
return state |
the-stack_106_30510 | # -*- coding: utf-8 -*-
# :Project: pglast -- Simple frontend to the pretty reformatter
# :Created: dom 06 ago 2017 23:09:23 CEST
# :Author: Lele Gaifax <[email protected]>
# :License: GNU General Public License version 3 or later
# :Copyright: © 2017, 2018, 2019, 2021 Lele Gaifax
#
import argparse
import json
import pprint
import sys
from pglast import Error, parse_plpgsql, parse_sql, prettify
def workhorse(args):
if args.statement:
statement = args.statement
else:
input = args.infile or sys.stdin
with input:
statement = input.read()
if args.parse_tree or args.plpgsql:
tree = parse_plpgsql(statement) if args.plpgsql else parse_sql(statement)
output = args.outfile or sys.stdout
with output:
if args.plpgsql:
json.dump(tree, output, sort_keys=True, indent=2)
else:
pprint.pprint([stmt(skip_none=True) for stmt in tree], output)
output.write('\n')
else:
try:
prettified = prettify(
statement,
preserve_comments=args.preserve_comments,
compact_lists_margin=args.compact_lists_margin,
split_string_literals_threshold=args.split_string_literals,
special_functions=args.special_functions,
comma_at_eoln=args.comma_at_eoln,
remove_pg_catalog_from_functions=args.remove_pg_catalog_from_functions,
semicolon_after_last_statement=args.semicolon_after_last_statement)
except Error as e:
print()
raise SystemExit(e)
output = args.outfile or sys.stdout
with output:
output.write(prettified)
output.write('\n')
def main(options=None):
from argparse import ArgumentParser
from pkg_resources import get_distribution
from .parser import get_postgresql_version
version = '%s, with PostgreSQL %s parser' % (
get_distribution('pglast').version,
'.'.join(str(p) for p in get_postgresql_version()))
parser = ArgumentParser(description="PostgreSQL language prettifier")
parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + version,)
parser.add_argument('-p', '--plpgsql', action='store_true', default=False,
help='use the plpgsql parser (and print just the resulting tree)')
parser.add_argument('-t', '--parse-tree', action='store_true', default=False,
help='show just the parse tree of the statement')
parser.add_argument('-m', '--compact-lists-margin', type=int, default=0,
help='use compact form for lists not exceeding the given margin')
parser.add_argument('-s', '--split-string-literals', type=int, default=0,
help='split string literals longer than given value')
parser.add_argument('-f', '--special-functions', action='store_true', default=False,
help='activate special functions handling')
parser.add_argument('-F', '--remove-pg_catalog-from-functions', action='store_true',
default=False,
help='omit explicit "pg_catalog" schema from function names,'
' when possible')
parser.add_argument('-c', '--comma-at-eoln', action='store_true', default=False,
help='use alternative style to print lists, putting the comma right'
' after each item')
parser.add_argument('-e', '--semicolon-after-last-statement', action='store_true',
default=False, help='end the last statement with a semicolon')
parser.add_argument('-C', '--preserve-comments', action='store_true',
default=False, help="preserve comments in the statement")
parser.add_argument('-S', '--statement',
help='the SQL statement')
parser.add_argument('infile', nargs='?', type=argparse.FileType(),
help='a file containing the SQL statement to be pretty-printed,'
' by default stdin, when not specified with --statement option')
parser.add_argument('outfile', nargs='?', type=argparse.FileType('w'),
help='where the result will be written, by default stdout')
args = parser.parse_args(options if options is not None else sys.argv[1:])
workhorse(args)
if __name__ == '__main__': # pragma: no cover
main()
|
the-stack_106_30512 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import decimal
import logging
import platform
import time
from ctypes import *
from threading import Event, Lock, Thread
import serial
try:
import fcntl
except ImportError:
fcntl = None
context_prec1 = decimal.Context(prec=1)
context_prec2 = decimal.Context(prec=2)
recv_start = (
("rssi", int),
("id", str),
("header", int),
("sequence", int),
("uptime", int),
)
recv_type_lut = {
1: {'type': 'beacon',
'items': (
("altitude", int),
("co2_conc", int),
("humidity", lambda x: decimal.Decimal(x, context_prec1)),
("illuminance", int),
("motion_count", int),
("orientation", int),
("press_count", int),
("pressure", int),
("sound_level", int),
("temperature", lambda x: decimal.Decimal(x, context_prec2)),
("voc_conc", int),
("voltage", lambda x: decimal.Decimal(x, context_prec2))
)},
2: {'type': 'sound',
'items': (
("min", int),
("max", int),
)}
}
items_v1_0_x = (
("rssi", int),
("id", str),
("sequence", int),
("altitude", int),
("co2-conc", int),
("humidity", lambda x: decimal.Decimal(x, context_prec1)),
("illuminance", int),
("motion-count", int),
("orientation", int),
("press-count", int),
("pressure", int),
("sound-level", int),
("temperature", lambda x: decimal.Decimal(x, context_prec2)),
("voc-conc", int),
("voltage", lambda x: decimal.Decimal(x, context_prec2))
)
class Gateway:
def __init__(self, device, separator):
self._ser = None
self._device = device
self.on_line = None
self.on_recv = None
self._command_mutex = Lock()
self._event = Event()
self._response = None
logging.info("Connecting on device %s", self._device)
self._ser = serial.Serial(self._device, baudrate=115200, timeout=3)
self._lock()
self._speed_up()
logging.info("Success connect on device %s", self._device)
self._ser.flush()
self._ser.reset_input_buffer()
self._ser.reset_output_buffer()
time.sleep(0.5)
self._ser.write(b'\x1b')
self.is_run = False
self._command('')
cgmr = self.get_cgmr()
self._old_recv = cgmr.startswith("1.0.") or cgmr.startswith("v1.0.")
logging.info("FW: %s", self.command('I')[0])
self._recv_type_lut = {}
for header in recv_type_lut:
items = []
for item in recv_type_lut[header]['items']:
items.append((item[0].replace('_', separator), item[1]))
self._recv_type_lut[header] = {
'type': recv_type_lut[header]['type'],
'items': tuple(items),
}
def __del__(self):
self._unlock()
try:
self._ser.close()
except Exception as e:
pass
self._ser = None
def run(self):
self.is_run = True
while self.is_run:
self._loop()
def _loop(self):
try:
line = self._ser.readline()
except serial.SerialException as e:
logging.error("SerialException %s", e)
self._ser.close()
raise
if line:
logging.debug("Read line %s", line)
line = line.decode().strip()
if line[0] == '{':
return
if line[0] == '#':
return
if self.on_line:
self.on_line(line)
if self.on_recv and line.startswith("$RECV:"):
payload = {}
values = line[7:].split(',')
if self._old_recv:
for i, item in enumerate(items_v1_0_x):
value = values[i]
payload[item[0]] = None if value == '' else item[1](value)
else:
for i, item in enumerate(recv_start):
value = values[i]
payload[item[0]] = None if value == '' else item[1](value)
recv_type = self._recv_type_lut.get(payload['header'], None)
if recv_type:
del payload['header']
payload['type'] = recv_type['type']
for i, item in enumerate(recv_type['items']):
value = values[i + 5]
payload[item[0]] = None if value == '' else item[1](value)
self.on_recv(payload)
elif self._response is not None:
if line == 'OK':
self._event.set()
elif line == 'ERROR':
self._response = None
self._event.set()
else:
self._response.append(line)
def _command(self, command):
with self._command_mutex:
logging.debug("Command AT%s", command)
self._event.clear()
command = 'AT' + command + '\r\n'
self._response = []
self._ser.write(command.encode('ascii'))
if self.is_run:
self._event.wait()
else:
while not self._event.is_set():
self._loop()
response = self._response
self._response = None
return response
def command(self, command, repeat=3):
for i in range(repeat):
response = self._command(command)
if response is None:
time.sleep(0.5)
continue
return response
raise Exception("Command %s not work." % command)
def get_cgsn(self):
response = self.command("+CGSN")
return response[0].split(':')[1].strip()
def get_cgmr(self):
response = self.command("+CGMR")
return response[0].split(':')[1].strip()
def start(self):
"""Run in thread"""
Thread(target=self.run, args=[]).start()
def _lock(self):
if not fcntl or not self._ser:
return
try:
fcntl.flock(self._ser.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
except Exception as e:
raise Exception('Could not lock device %s' % self._device)
def _unlock(self):
if not fcntl or not self._ser:
return
fcntl.flock(self._ser.fileno(), fcntl.LOCK_UN)
def _speed_up(self):
if not fcntl:
return
if platform.system() != 'Linux':
return
TIOCGSERIAL = 0x0000541E
TIOCSSERIAL = 0x0000541F
ASYNC_LOW_LATENCY = 0x2000
class serial_struct(Structure):
_fields_ = [("type", c_int),
("line", c_int),
("port", c_uint),
("irq", c_int),
("flags", c_int),
("xmit_fifo_size", c_int),
("custom_divisor", c_int),
("baud_base", c_int),
("close_delay", c_ushort),
("io_type", c_byte),
("reserved_char", c_byte * 1),
("hub6", c_uint),
("closing_wait", c_ushort),
("closing_wait2", c_ushort),
("iomem_base", POINTER(c_ubyte)),
("iomem_reg_shift", c_ushort),
("port_high", c_int),
("iomap_base", c_ulong)]
buf = serial_struct()
try:
fcntl.ioctl(self._ser.fileno(), TIOCGSERIAL, buf)
buf.flags |= ASYNC_LOW_LATENCY
fcntl.ioctl(self._ser.fileno(), TIOCSSERIAL, buf)
except Exception as e:
pass
|
the-stack_106_30513 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 13 10:13:48 2019
@author: cong
"""
"""
#read json
import json
json_file = '/media/cong/娱乐/coco2017/annotations/image_info_test2017.json'
val=json.load(open(json_file, 'r'))
#
##
bb=[]
a=val['annotations']
for i in a:
b=i['category_id']
bb.append(b)
"""
#coding:utf-8
'''
Read annotations list
for ant in annotations_list:
if cat in cats:
get imname
if imnum.jpg in impath:
add object to imnum.xml
else:
copy imname.jpg as imnum.jpg to impath
make imnum.xml
add object to imnum.xml
TO DO: make txt files as well as xml
'''
import os
import json
import cv2
from lxml import etree
import xml.etree.cElementTree as ET
import time
id_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28,31, 32, 33,
34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67,
70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84,85, 86, 87, 88, 89, 90]
names_list = ['person','bicycle', 'car', 'motorbike', 'aeroplane', 'bus', 'train',
'truck', 'boat', 'trafficlight', 'firehydrant', 'stopsign', 'parkingmeter',
'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra',
'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
'sportsball', 'kite', 'baseballbat', 'baseballglove', 'skateboard', 'surfboard', 'tennisracket',
'bottle', 'wineglass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',
'broccoli', 'carrot', 'hotdog', 'pizza', 'donut', 'cake', 'chair', 'sofa', 'pottedplant', 'bed', 'diningtable',
'toilet', 'tvmonitor', 'laptop', 'mouse', 'remote', 'keyboard', 'cellphone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator',
'book', 'clock', 'vase', 'scissors', 'teddybear', 'hairdrier', 'toothbrush']
im_ext = 'jpg'
COCO_images = '/media/student/coco/val2014'
Json_addr = '/media/student/coco/annotations/instances_val2014.json'
im_num = 0
ob_count = 0
im_pairs = dict()
main_dir = 'mAP/input'
if not os.path.isdir(main_dir):
os.mkdir(main_dir)
xml_dir = os.path.join(main_dir, 'ground-truth')
if not os.path.isdir(xml_dir):
os.mkdir(xml_dir)
im_dir = os.path.join(main_dir, 'images-optional')
if not os.path.isdir(im_dir):
os.mkdir(im_dir)
print('Reading JSON ...')
with open(Json_addr) as json_data:
annotation_list = json.load(json_data)
start_time = time.time()
print('--- Start Operation ---', start_time)
for i in range(0, len(annotation_list["annotations"])):
category_id = annotation_list["annotations"][i]["category_id"]
if category_id in id_list:
# print('HIT -->', im_num)
cat_name = names_list[id_list.index(category_id)]
im_id = (str(annotation_list["annotations"][i]["image_id"]))
xmin = int(annotation_list["annotations"][i]["bbox"][0])
ymin = int(annotation_list["annotations"][i]["bbox"][1])
xmax = int(xmin+annotation_list["annotations"][i]["bbox"][2])
ymax = int(ymin+annotation_list["annotations"][i]["bbox"][3])
z = '0'
for sf in range((len(im_id)), 11): # imname 12 basamaklı olması için
z = z + "0"
im_name = z + im_id
if os.path.exists(os.path.join(im_dir, str(im_pairs.get(im_name, 'None')) + '.' + im_ext)):
# ---add object to imnum.xml---
# read the xml root
tree = ET.parse(os.path.join(xml_dir, str(im_pairs[im_name]) + '.xml'))
root = tree.getroot()
# Convert root to etree
xml_str = ET.tostring(root)
troot = etree.fromstring(xml_str) # etree object
# create new object element
ob = etree.Element('object')
etree.SubElement(ob, 'name').text = cat_name
etree.SubElement(ob, 'pose').text = 'Unspecified'
etree.SubElement(ob, 'truncated').text = '0'
etree.SubElement(ob, 'difficult').text = '0'
bbox = etree.SubElement(ob, 'bndbox')
etree.SubElement(bbox, 'xmin').text = str(xmin)
etree.SubElement(bbox, 'ymin').text = str(ymin)
etree.SubElement(bbox, 'xmax').text = str(xmax)
etree.SubElement(bbox, 'ymax').text = str(ymax)
# prettify the object
xml_str = etree.tostring(ob, pretty_print=True)
ob_pretty = etree.fromstring(xml_str)
# append etree object to etree root(troot)
troot.append(ob_pretty)
# overwrite the old xml
xml_str = etree.tostring(troot, pretty_print=True)
with open(os.path.join(xml_dir, str(im_pairs[im_name]) + '.xml'), 'wb') as output:
output.write(xml_str)
print('--- Added {} to '.format(cat_name), str(im_pairs[im_name]) + '.xml' ' ---')
else:
# Copy image as im_num.jpg
with open(os.path.join(COCO_images, 'COCO_val2014_'+im_name + '.' + im_ext), 'rb') as rf:
with open(os.path.join(im_dir, str(im_num) + '.' + im_ext), 'wb') as wf:
for line in rf:
wf.write(line)
# make imnum.xml
# -get imsize(widht, height, depth)
# Resimlerin olduğu klasör
im_cv2 = cv2.imread(os.path.join(COCO_images, 'COCO_val2014_'+im_name + '.' + im_ext))
height, width, depth = im_cv2.shape
if depth==2:
print(depth)
# Form the file
annotation = ET.Element('annotation')
ET.SubElement(annotation, 'folder').text = im_dir
ET.SubElement(annotation, 'filename').text = str(im_num) + '.' + im_ext
ET.SubElement(annotation, 'segmented').text = '0'
size = ET.SubElement(annotation, 'size')
ET.SubElement(size, 'width').text = str(width)
ET.SubElement(size, 'height').text = str(height)
ET.SubElement(size, 'depth').text = str(depth)
ob = ET.SubElement(annotation, 'object')
ET.SubElement(ob, 'name').text = cat_name
ET.SubElement(ob, 'pose').text = 'Unspecified'
ET.SubElement(ob, 'truncated').text = '0'
ET.SubElement(ob, 'difficult').text = '0'
bbox = ET.SubElement(ob, 'bndbox')
ET.SubElement(bbox, 'xmin').text = str(xmin)
ET.SubElement(bbox, 'ymin').text = str(ymin)
ET.SubElement(bbox, 'xmax').text = str(xmax)
ET.SubElement(bbox, 'ymax').text = str(ymax)
# Save the file
xml_str = ET.tostring(annotation)
root = etree.fromstring(xml_str)
xml_str = etree.tostring(root, pretty_print=True) # Entire content of the xml
save_path = os.path.join(xml_dir, str(im_num) + '.' + 'xml') # Create save path with imnum.xml
with open(save_path, 'wb') as temp_xml:
temp_xml.write(xml_str)
# keep record of which xml is paired with which image from coco_Set
im_pairs[im_name] = im_num
print('Copied imfile--> {} --- Object count--> {}'.format(str(im_num) + '.' + im_ext, ob_count))
im_num += 1
ob_count += 1
print('Finished with {} objects in {} images in {} seconds'.format(ob_count, im_num, time.time() - start_time))
|
the-stack_106_30514 | import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import pandas as pd
import flask
from waitress import serve
import plotly.graph_objects as go
from trackerApp.make_graphs import make_timeseries, make_cluster_hist, make_time_hist
from trackerApp.statistical_params import (
most_recent_seizure,
get_clusters,
get_cluster_info,
get_intervals,
likelihood_of_seizure,
estimate_cluster_size,
)
from trackerApp.inout import get_data
from trackerApp.constants import SEIZURE_SHEET
import felling
felling.configure()
import logging
logger = logging.getLogger(__name__)
server = flask.Flask(__name__)
app = dash.Dash(__name__, server=server)
df = get_data(SEIZURE_SHEET)
clusters = get_clusters(df)
cluster_info = get_cluster_info(clusters)
intervals = get_intervals(cluster_info)
days_since = most_recent_seizure(df)
likelihood, next_updates, next_likelihood = likelihood_of_seizure(days_since, intervals)
next_cluster_size = estimate_cluster_size(cluster_info, days_since)
if days_since >= 2:
likelihood_message = f"""Making the current likelihood of a seizure **{likelihood}%**, this will update to {next_likelihood}% in {next_updates} days."""
if isinstance(likelihood, str):
likelihood_message = f"""Making the current likelihood of a seizure **{likelihood}**, this will update to {next_likelihood}% in {next_updates} days."""
elif days_since == 1:
likelihood_message = f"""As the most recent seizure was only {days_since} day ago, it is possible the cluster is still active"""
elif days_since == 0:
likelihood_message = f"""As the most recent seizure was today, it is possible the cluster is still active"""
else:
likelihood_message = "Failed to produce likelihood message."
app.title = "Seizure Tracker"
app.layout = html.Div(
[
html.H1(
children="Seizure Tracker",
style={
"textAlign": "center",
},
),
html.Div(
dcc.Markdown(f"""The last seizure was **{days_since}** days ago"""),
style={
"textAlign": "center",
},
),
html.Div(
dcc.Markdown(likelihood_message),
style={
"textAlign": "center",
},
),
html.Div(
dcc.Markdown(next_cluster_size),
style={
"textAlign": "center",
},
),
html.Div(
[
dcc.RadioItems(
id="graph-type",
options=[
{"label": "Clusters over time", "value": "bars_timeseries"},
{
"label": "Time since last cluster",
"value": "bars_time_comparison",
},
{
"label": "Hour of the day seizures have occurred",
"value": "seizure_hour_comparison",
},
],
value="bars_timeseries",
labelStyle={"display": "inline-block"},
persistence=False,
),
]
),
dcc.Graph(id="bono-seizures", config={"responsive": "auto"}),
]
)
import gc
del df
del intervals
del cluster_info
gc.collect()
@app.callback(
Output(component_id="bono-seizures", component_property="figure"),
[Input(component_id="graph-type", component_property="value")],
)
def update_fig(fig_type: str) -> go.Figure:
"""
Based upon the radio buttons, present the correct fig
Parameters
----------
fig_type : str
The radio button selected
Returns
-------
go.Figure
The appropriate figure
"""
df = get_data(SEIZURE_SHEET)
if fig_type == "seizure_hour_comparison":
fig = make_time_hist(df)
elif fig_type in ["bars_time_comparison", "bars_timeseries"]:
clusters = get_clusters(df)
cluster_info = get_cluster_info(clusters)
if fig_type == "bars_time_comparison":
intervals = get_intervals(cluster_info)
fig = make_cluster_hist(intervals)
elif fig_type == "bars_timeseries":
fig = make_timeseries(cluster_info)
try:
del df
except:
pass
try:
del intervals
except:
pass
try:
del cluster_info
except:
pass
gc.collect()
return fig
application = app.server
logger.info("Ready to serve")
if __name__ == "__main__":
logger.info("Serving app")
serve(application, port=8080, url_scheme="https")
|
the-stack_106_30515 | class Solution:
def waysToMakeFair(self, nums: List[int]) -> int:
n = len(nums)
even_sum = [0] * n
odd_sum = [0] * n
even_sum[0] = nums[0]
for i in range(1, n):
if i%2 ==0:
even_sum[i] = even_sum[i-1] + nums[i]
odd_sum[i] = odd_sum[i-1]
else:
odd_sum[i] = odd_sum[i-1] + nums[i]
even_sum[i] = even_sum[i-1]
prefix_e, prefix_o = 0, 0
ans = 0
for i in range(n):
suffix_e = even_sum[n-1] - even_sum[i]
suffix_o = odd_sum[n-1] - odd_sum[i]
ans += (prefix_e + suffix_o == prefix_o + suffix_e)
prefix_e = even_sum[i]
prefix_o = odd_sum[i]
return ans
|
the-stack_106_30516 | #!/usr/bin/python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example creates an experiment.
The created experiment is using a query percentage of 10, which defines what
fraction of auctions should go to the control split (90%) vs. the experiment
split (10%), then adds experimental bid changes for an ad group, and adds an
experiment-only keyword. To get campaigns, run get_campaigns.py. To get ad
groups, run get_ad_groups.py. To get keywords, run get_keywords.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
import datetime
import uuid
from googleads import adwords
CAMPAIGN_ID = 'INSERT_CAMPAIGN_ID_HERE'
AD_GROUP_ID = 'INSERT_AD_GROUP_ID_HERE'
def main(client, campaign_id, ad_group_id):
# Initialize appropriate service.
experiment_service = client.GetService('ExperimentService', version='v201601')
ad_group_service = client.GetService('AdGroupService', version='v201601')
ad_group_criterion_service = client.GetService(
'AdGroupCriterionService', version='v201601')
# Construct operations and add experiment.
tomorrow = datetime.datetime.now() + datetime.timedelta(1)
thirty_days = datetime.datetime.now() + datetime.timedelta(30)
operations = [{
'operator': 'ADD',
'operand': {
'campaignId': campaign_id,
'name': 'Interplanetary Experiment #%s' % uuid.uuid4(),
'queryPercentage': '10',
'startDateTime': tomorrow.strftime('%Y%m%d %H%M%S'),
# Optional fields.
'status': 'ENABLED',
'endDateTime': thirty_days.strftime('%Y%m%d %H%M%S')
}
}]
result = experiment_service.mutate(operations)
# Display results.
for experiment in result['value']:
print ('Experiment with name \'%s\' and id \'%s\' was added.'
% (experiment['name'], experiment['id']))
# Construct operations and update ad group.
operations = [{
'operator': 'SET',
'operand': {
'id': ad_group_id,
'experimentData': {
'xsi_type': 'AdGroupExperimentData',
'experimentId': experiment['id'],
'experimentDeltaStatus': 'MODIFIED',
'experimentBidMultipliers': {
'xsi_type': 'ManualCPCAdGroupExperimentBidMultipliers',
'maxCpcMultiplier': {
'multiplier': '0.5'
}
}
}
}
}]
result = ad_group_service.mutate(operations)
# Display results.
for ad_group in result['value']:
print ('Ad group with name \'%s\' and id \'%s\' was updated in the '
'experiment.' % (ad_group['name'], ad_group['id']))
# Construct operations and add ad group crierion.
operations = [{
'operator': 'ADD',
'operand': {
'xsi_type': 'BiddableAdGroupCriterion',
'adGroupId': ad_group['id'],
'criterion': {
'xsi_type': 'Keyword',
'matchType': 'BROAD',
'text': 'mars cruise'
},
'experimentData': {
'xsi_type': 'BiddableAdGroupCriterionExperimentData',
'experimentId': experiment['id'],
'experimentDeltaStatus': 'EXPERIMENT_ONLY'
}
}
}]
result = ad_group_criterion_service.mutate(operations)
# Display results.
for criterion in result['value']:
print ('Ad group criterion with ad group id \'%s\' and criterion '
'id \'%s\' was added to the experiment.'
% (criterion['adGroupId'], criterion['criterion']['id']))
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, CAMPAIGN_ID, AD_GROUP_ID)
|
the-stack_106_30517 | import numpy as np
import pandas as pd
import json
def build_df(fn):
json_data=open(fn).read()
js_obj = json.loads(json_data)
data = np.array(js_obj['data'])
df = pd.DataFrame(js_obj['data'], columns=js_obj['column_names'], index=data[:,0])
return df
def augment_financials(df):
df['Swing'] = df['High'] - df['Low']
df['return'] = (df['Adjusted Close'] / df['Adjusted Close'].shift(1)) - 1
if __name__ == '__main__':
fn_sp = 'INDEX_SP500.json'
fn_dow = 'INDEX_DOW.json'
fn_nasdaq = 'INDEX_NASDAQ.json'
sp = build_df(fn_sp)
augment_financials(sp)
dow = build_df(fn_dow)
augment_financials(dow)
ns = build_df(fn_nasdaq)
augment_financials(ns)
#build a new dataframe with:
#average percent change of all three indexes
#some kind of measure of how closely the indexes agree... pct diff between highest and lowest?
#average difference (percent) between high and low for the day of all three indexes
df = pd.DataFrame(index = sp.index)
#then tack on the results of what music they played (target). |
the-stack_106_30518 | import argparse
import computeIDTF, os, subprocess, ThreadPool
import classify_library
"""
Uses multi-threading to extract IDTFs and compute the Fisher Vectors (FVs) for
each of the videos in the input list (vid_in). The Fisher Vectors are output
in the output_dir
"""
#This is is the function that each worker will compute.
def processVideo(vid,vid_path,output_dir,gmm_list):
"""
gmm_list is the file of the saved list of GMMs
"""
videoLocation = os.path.join(vid_path,vid)
outputName = os.path.join(output_dir, vid.split('.')[0]+".fisher")
computeIDTF.extractFV(videoLocation, outputName, gmm_list)
#python computeFVs.py videos vid_in vid_out
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("vid_path", help="Directory of the input videos", type=str)
parser.add_argument("vid_in", help="list of input videos in .txt file", type=str)
parser.add_argument("output_dir", help="output directory to save FVs (.fisher files)", type=str)
parser.add_argument("gmm_list", help="File of saved list of GMMs", type=str)
args = parser.parse_args()
f = open(args.vid_in, 'r')
input_videos = f.readlines()
f.close()
input_videos = [line.split()[0].split('/')[1] for line in [video.rstrip() for video in input_videos]]
###Just to prevent overwriting already processed vids
completed_vids = [filename.split('.')[0] for filename in os.listdir(args.output_dir) if filename.endswith('.npz')]
overlap = [vid for vid in input_videos if vid.split('.')[0] in completed_vids]
#Multi-threaded FV construction.
numThreads = 2
pool = ThreadPool.ThreadPool(numThreads)
for vid in input_videos:
if vid not in overlap:
pool.add_task(processVideo,vid,args.vid_path,args.output_dir,args.gmm_list)
pool.wait_completion()
|
the-stack_106_30520 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
class TestIterator(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.iterator import Iterator
return Iterator
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_ctor(self):
connection = _Connection()
client = _Client(connection)
PATH = '/foo'
iterator = self._makeOne(client, PATH)
self.assertTrue(iterator.client is client)
self.assertEqual(iterator.path, PATH)
self.assertEqual(iterator.page_number, 0)
self.assertEqual(iterator.next_page_token, None)
def test___iter__(self):
PATH = '/foo'
KEY1 = 'key1'
KEY2 = 'key2'
ITEM1, ITEM2 = object(), object()
ITEMS = {KEY1: ITEM1, KEY2: ITEM2}
def _get_items(response):
for item in response.get('items', []):
yield ITEMS[item['name']]
connection = _Connection({'items': [{'name': KEY1}, {'name': KEY2}]})
client = _Client(connection)
iterator = self._makeOne(client, PATH)
iterator.get_items_from_response = _get_items
self.assertEqual(list(iterator), [ITEM1, ITEM2])
kw, = connection._requested
self.assertEqual(kw['method'], 'GET')
self.assertEqual(kw['path'], PATH)
self.assertEqual(kw['query_params'], {})
def test_has_next_page_new(self):
connection = _Connection()
client = _Client(connection)
PATH = '/foo'
iterator = self._makeOne(client, PATH)
self.assertTrue(iterator.has_next_page())
def test_has_next_page_w_number_no_token(self):
connection = _Connection()
client = _Client(connection)
PATH = '/foo'
iterator = self._makeOne(client, PATH)
iterator.page_number = 1
self.assertFalse(iterator.has_next_page())
def test_has_next_page_w_number_w_token(self):
connection = _Connection()
client = _Client(connection)
PATH = '/foo'
TOKEN = 'token'
iterator = self._makeOne(client, PATH)
iterator.page_number = 1
iterator.next_page_token = TOKEN
self.assertTrue(iterator.has_next_page())
def test_get_query_params_no_token(self):
connection = _Connection()
client = _Client(connection)
PATH = '/foo'
iterator = self._makeOne(client, PATH)
self.assertEqual(iterator.get_query_params(), {})
def test_get_query_params_w_token(self):
connection = _Connection()
client = _Client(connection)
PATH = '/foo'
TOKEN = 'token'
iterator = self._makeOne(client, PATH)
iterator.next_page_token = TOKEN
self.assertEqual(iterator.get_query_params(),
{'pageToken': TOKEN})
def test_get_query_params_extra_params(self):
connection = _Connection()
client = _Client(connection)
PATH = '/foo'
extra_params = {'key': 'val'}
iterator = self._makeOne(client, PATH, extra_params=extra_params)
self.assertEqual(iterator.get_query_params(), extra_params)
def test_get_query_params_w_token_and_extra_params(self):
connection = _Connection()
client = _Client(connection)
PATH = '/foo'
TOKEN = 'token'
extra_params = {'key': 'val'}
iterator = self._makeOne(client, PATH, extra_params=extra_params)
iterator.next_page_token = TOKEN
expected_query = extra_params.copy()
expected_query.update({'pageToken': TOKEN})
self.assertEqual(iterator.get_query_params(), expected_query)
def test_get_query_params_w_token_collision(self):
connection = _Connection()
client = _Client(connection)
PATH = '/foo'
extra_params = {'pageToken': 'val'}
self.assertRaises(ValueError, self._makeOne, client, PATH,
extra_params=extra_params)
def test_get_next_page_response_new_no_token_in_response(self):
PATH = '/foo'
TOKEN = 'token'
KEY1 = 'key1'
KEY2 = 'key2'
connection = _Connection({'items': [{'name': KEY1}, {'name': KEY2}],
'nextPageToken': TOKEN})
client = _Client(connection)
iterator = self._makeOne(client, PATH)
response = iterator.get_next_page_response()
self.assertEqual(response['items'], [{'name': KEY1}, {'name': KEY2}])
self.assertEqual(iterator.page_number, 1)
self.assertEqual(iterator.next_page_token, TOKEN)
kw, = connection._requested
self.assertEqual(kw['method'], 'GET')
self.assertEqual(kw['path'], PATH)
self.assertEqual(kw['query_params'], {})
def test_get_next_page_response_no_token(self):
connection = _Connection()
client = _Client(connection)
PATH = '/foo'
iterator = self._makeOne(client, PATH)
iterator.page_number = 1
self.assertRaises(RuntimeError, iterator.get_next_page_response)
def test_reset(self):
connection = _Connection()
client = _Client(connection)
PATH = '/foo'
TOKEN = 'token'
iterator = self._makeOne(client, PATH)
iterator.page_number = 1
iterator.next_page_token = TOKEN
iterator.reset()
self.assertEqual(iterator.page_number, 0)
self.assertEqual(iterator.next_page_token, None)
def test_get_items_from_response_raises_NotImplementedError(self):
PATH = '/foo'
connection = _Connection()
client = _Client(connection)
iterator = self._makeOne(client, PATH)
self.assertRaises(NotImplementedError,
iterator.get_items_from_response, object())
class _Connection(object):
def __init__(self, *responses):
self._responses = responses
self._requested = []
def api_request(self, **kw):
self._requested.append(kw)
response, self._responses = self._responses[0], self._responses[1:]
return response
class _Client(object):
def __init__(self, connection):
self.connection = connection
|
the-stack_106_30523 |
from .python_rsakey import Python_RSAKey
from .python_ecdsakey import Python_ECDSAKey
from .python_dsakey import Python_DSAKey
from .pem import dePem, pemSniff
from .asn1parser import ASN1Parser
from .cryptomath import bytesToNumber
from .compat import compatHMAC
from ecdsa.curves import NIST256p, NIST384p, NIST521p
from ecdsa.keys import SigningKey, VerifyingKey
class Python_Key(object):
"""
Generic methods for parsing private keys from files.
Handles both RSA and ECDSA keys, irrespective of file format.
"""
@staticmethod
def parsePEM(s, passwordCallback=None):
"""Parse a string containing a PEM-encoded <privateKey>."""
if pemSniff(s, "PRIVATE KEY"):
bytes = dePem(s, "PRIVATE KEY")
return Python_Key._parse_pkcs8(bytes)
elif pemSniff(s, "RSA PRIVATE KEY"):
bytes = dePem(s, "RSA PRIVATE KEY")
return Python_Key._parse_ssleay(bytes, "rsa")
elif pemSniff(s, "DSA PRIVATE KEY"):
bytes = dePem(s, "DSA PRIVATE KEY")
return Python_Key._parse_dsa_ssleay(bytes)
elif pemSniff(s, "EC PRIVATE KEY"):
bytes = dePem(s, "EC PRIVATE KEY")
return Python_Key._parse_ecc_ssleay(bytes)
elif pemSniff(s, "PUBLIC KEY"):
bytes = dePem(s, "PUBLIC KEY")
return Python_Key._parse_public_key(bytes)
else:
raise SyntaxError("Not a PEM private key file")
@staticmethod
def _parse_public_key(bytes):
# public keys are encoded as the subject_public_key_info objects
spk_info = ASN1Parser(bytes)
# first element of the SEQUENCE is the AlgorithmIdentifier
alg_id = spk_info.getChild(0)
# AlgId has two elements, the OID of the algorithm and parameters
# parameters generally have to be NULL, with exception of RSA-PSS
alg_oid = alg_id.getChild(0)
if list(alg_oid.value) != [42, 134, 72, 134, 247, 13, 1, 1, 1]:
raise SyntaxError("Only RSA Public keys supported")
subject_public_key = ASN1Parser(
ASN1Parser(spk_info.getChildBytes(1)).value[1:])
modulus = subject_public_key.getChild(0)
exponent = subject_public_key.getChild(1)
n = bytesToNumber(modulus.value)
e = bytesToNumber(exponent.value)
return Python_RSAKey(n, e, key_type="rsa")
@staticmethod
def _parse_pkcs8(bytes):
parser = ASN1Parser(bytes)
# first element in PrivateKeyInfo is an INTEGER
version = parser.getChild(0).value
if bytesToNumber(version) != 0:
raise SyntaxError("Unrecognized PKCS8 version")
# second element in PrivateKeyInfo is a SEQUENCE of type
# AlgorithmIdentifier
alg_ident = parser.getChild(1)
seq_len = alg_ident.getChildCount()
# first item of AlgorithmIdentifier is an OBJECT (OID)
oid = alg_ident.getChild(0)
if list(oid.value) == [42, 134, 72, 134, 247, 13, 1, 1, 1]:
key_type = "rsa"
elif list(oid.value) == [42, 134, 72, 134, 247, 13, 1, 1, 10]:
key_type = "rsa-pss"
elif list(oid.value) == [42, 134, 72, 206, 56, 4, 1]:
key_type = "dsa"
elif list(oid.value) == [42, 134, 72, 206, 61, 2, 1]:
key_type = "ecdsa"
else:
raise SyntaxError("Unrecognized AlgorithmIdentifier: {0}"
.format(list(oid.value)))
# second item of AlgorithmIdentifier are parameters (defined by
# above algorithm)
if key_type == "rsa":
if seq_len != 2:
raise SyntaxError("Missing parameters for RSA algorithm ID")
parameters = alg_ident.getChild(1)
if parameters.value != bytearray(0):
raise SyntaxError("RSA parameters are not NULL")
if key_type == "dsa":
if seq_len != 2:
raise SyntaxError("Invalid encoding of algorithm identifier")
parameters = alg_ident.getChild(1)
if parameters.value == bytearray(0):
parameters = None
elif key_type == "ecdsa":
if seq_len != 2:
raise SyntaxError("Invalid encoding of algorithm identifier")
curveID = alg_ident.getChild(1)
if list(curveID.value) == [42, 134, 72, 206, 61, 3, 1, 7]:
curve = NIST256p
elif list(curveID.value) == [43, 129, 4, 0, 34]:
curve = NIST384p
elif list(curveID.value) == [43, 129, 4, 0, 35]:
curve = NIST521p
else:
raise SyntaxError("Unknown curve")
else: # rsa-pss
pass # ignore parameters - don't apply restrictions
if seq_len > 2:
raise SyntaxError("Invalid encoding of AlgorithmIdentifier")
#Get the privateKey
private_key_parser = parser.getChild(2)
#Adjust for OCTET STRING encapsulation
private_key_parser = ASN1Parser(private_key_parser.value)
if key_type == "ecdsa":
return Python_Key._parse_ecdsa_private_key(private_key_parser,
curve)
elif key_type == "dsa":
return Python_Key._parse_dsa_private_key(private_key_parser, parameters)
else:
return Python_Key._parse_asn1_private_key(private_key_parser,
key_type)
@staticmethod
def _parse_ssleay(data, key_type="rsa"):
"""
Parse binary structure of the old SSLeay file format used by OpenSSL.
For RSA keys.
"""
private_key_parser = ASN1Parser(data)
# "rsa" type as old format doesn't support rsa-pss parameters
return Python_Key._parse_asn1_private_key(private_key_parser, key_type)
@staticmethod
def _parse_dsa_ssleay(data):
"""
Parse binary structure of the old SSLeay file format used by OpenSSL.
For DSA keys.
"""
private_key_parser = ASN1Parser(data)
return Python_Key._parse_dsa_private_key(private_key_parser)
@staticmethod
def _parse_ecc_ssleay(data):
"""
Parse binary structure of the old SSLeay file format used by OpenSSL.
For ECDSA keys.
"""
private_key = SigningKey.from_der(compatHMAC(data))
secret_mult = private_key.privkey.secret_multiplier
return Python_ECDSAKey(None, None, private_key.curve.name,
secret_mult)
@staticmethod
def _parse_ecdsa_private_key(private, curve):
ver = private.getChild(0)
if ver.value != b'\x01':
raise SyntaxError("Unexpected EC key version")
private_key = private.getChild(1)
public_key = private.getChild(2)
# first two bytes are the ASN.1 custom type and the length of payload
# while the latter two bytes are just specification of the public
# key encoding (uncompressed)
# TODO: update ecdsa lib to be able to parse PKCS#8 files
if curve is not NIST521p:
if list(public_key.value[:1]) != [3] or \
list(public_key.value[2:4]) != [0, 4]:
raise SyntaxError("Invalid or unsupported encoding of public key")
pub_key = VerifyingKey.from_string(
compatHMAC(public_key.value[4:]),
curve)
else:
if list(public_key.value[:3]) != [3, 129, 134] or \
list(public_key.value[3:5]) != [0, 4]:
raise SyntaxError("Invalid or unsupported encoding of public key")
pub_key = VerifyingKey.from_string(
compatHMAC(public_key.value[5:]),
curve)
pub_x = pub_key.pubkey.point.x()
pub_y = pub_key.pubkey.point.y()
priv_key = SigningKey.from_string(compatHMAC(private_key.value),
curve)
mult = priv_key.privkey.secret_multiplier
return Python_ECDSAKey(pub_x, pub_y, curve.name, mult)
@staticmethod
def _parse_asn1_private_key(private_key_parser, key_type):
version = private_key_parser.getChild(0).value[0]
if version != 0:
raise SyntaxError("Unrecognized RSAPrivateKey version")
n = bytesToNumber(private_key_parser.getChild(1).value)
e = bytesToNumber(private_key_parser.getChild(2).value)
d = bytesToNumber(private_key_parser.getChild(3).value)
p = bytesToNumber(private_key_parser.getChild(4).value)
q = bytesToNumber(private_key_parser.getChild(5).value)
dP = bytesToNumber(private_key_parser.getChild(6).value)
dQ = bytesToNumber(private_key_parser.getChild(7).value)
qInv = bytesToNumber(private_key_parser.getChild(8).value)
return Python_RSAKey(n, e, d, p, q, dP, dQ, qInv, key_type)
@staticmethod
def _parse_dsa_private_key(private_key_parser, domain_parameters=None):
if domain_parameters:
p = bytesToNumber(domain_parameters.getChild(0).value)
q = bytesToNumber(domain_parameters.getChild(1).value)
g = bytesToNumber(domain_parameters.getChild(2).value)
x = bytesToNumber(private_key_parser.value)
return Python_DSAKey(p, q, g, x)
p = bytesToNumber(private_key_parser.getChild(1).value)
q = bytesToNumber(private_key_parser.getChild(2).value)
g = bytesToNumber(private_key_parser.getChild(3).value)
y = bytesToNumber(private_key_parser.getChild(4).value)
x = bytesToNumber(private_key_parser.getChild(5).value)
return Python_DSAKey(p, q, g, x, y)
|
the-stack_106_30524 | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 20 12:25:06 2019
@author: abhij
"""
import numpy as num
import matplotlib.pyplot as plt
#_______________________________________________________________________________________________________________________#
# Function to find the intersection of two circles, returns an array with two arrays having x amd y
def circle_intersection(f1 = 0, f2 = 0, o1 = 0, o2 = 0, l1 = 0, l2 = 0):
R = (f1**2 + f2**2)**(0.5)
x_cor = f1 - o1
y_cor = f2 - o2
rot1 = (l1**2 - l2**2 + R**2)/(2*R)
rot2 = (l1**2 - rot1**2)**0.5
x = ((rot1/R) * (x_cor)) + ((rot2/R) * (y_cor)) + o1
y = ((rot1/R) * (y_cor)) - ((rot2/R) * (x_cor)) + o2
value1 = []
value1.append(x)
value1.append(y)
x = ((rot1/R) * (x_cor)) - ((rot2/R) * (y_cor)) + o1
y = ((rot1/R) * (y_cor)) + ((rot2/R) * (x_cor)) + o2
value2 = []
value2.append(x)
value2.append(y)
values = []
values.append(value1)
values.append(value2)
return values
#_______________________________________________________________________________________________________________________#
# Function to create a line
def generate_line(point_1 , point_2 ):
if (point_2[0] - point_1[0]) != 0:
slope = (point_2[1] - point_1[1])/(point_2[0] - point_1[0])
else:
slope = 1000000 #effectively infinity
constant = -1*slope*point_1[0] + point_1[1]
line = []
line.append(slope)
line.append(constant)
return line
#_______________________________________________________________________________________________________________________#
# Function to find angle between two lines
def ang_between_lines(line1, line2):
return(num.arctan((line1[0] - line2[0])/(1 + (line1[0]*line2[0]))))
#_______________________________________________________________________________________________________________________#
# Function to find the angle of the servos
def extension(X_cord, Y_cord, target_X, target_Y ,f_length, b_length):
values = circle_intersection(target_X, target_Y, 0, 0, f_length, b_length)
optimal_point = []
min_distance = 10000000
print("points")
for value in values:
print(value)
distance = (((value[0] - X_cord)**2)+((value[1] - Y_cord)**2))**0.5
if distance <= min_distance:
optimal_point = value
print("optimal point:\n", optimal_point)
origin = [0,0]
target_point = [target_X, target_Y]
plt.scatter(0,0, color = "white") #to define the boundaries of the graph
plt.scatter(25,25, color = "white")
plt.scatter(0,25, color = "white")
plt.scatter(25,0, color = "white")
x_values_bicep = [origin[0],optimal_point[0]]
y_values_bicep = [origin[1],optimal_point[1]]
plt.plot(x_values_bicep,y_values_bicep, color = "black", linewidth = 5)
x_values_forearm = [optimal_point[0],target_point[0]]
y_values_forearm = [optimal_point[1],target_point[1]]
plt.plot(x_values_forearm,y_values_forearm, color = "black", linewidth = 5)
plt.scatter(origin[0],origin[1], color = "black", s = 250, marker = "s")
plt.scatter(target_point[0],target_point[1], color = "red", s = 200, marker = 9)
plt.scatter( optimal_point[0], optimal_point[1], color = "red", s= 100, marker = "s")
plt.show()
bicep_line = generate_line(origin, optimal_point)
forearm_line = generate_line(optimal_point, target_point)
x_axis = [0,0]
angles = []
shoulder_angle = ang_between_lines(x_axis,bicep_line)
elbow_angle = ang_between_lines(forearm_line, bicep_line)
angles.append(shoulder_angle)
angles.append(elbow_angle)
return(angles)
#_______________________________________________________________________________________________________________________#
# Main Function
def Inverse_kinematics(final_x, final_y, final_z, init_x, init_y, init_z):
check_bounds = (final_x**2) + (final_y**2) + (final_z**2)
check_floor = bool(final_z > 0)
f_length = 9.4
b_length = 6
base_size = abs(f_length - b_length)
reach = (f_length + b_length)**2
if check_bounds > base_size and check_bounds < reach and check_floor and final_y >= 0:
base_x = 0
base_y = 0
base_z = 0
angles = []
R_init = (((init_x - base_x)**2)+((init_y - base_y)**2))**0.5
R_fin = (((final_x - base_x)**2)+((final_y - base_y)**2))**0.5
theta = num.arctan((final_y - base_y)/(final_x - base_x))
Z_init = init_z - base_z
Z_fin = final_z - base_z
angles = extension(R_init, Z_init, R_fin, Z_fin, b_length, f_length)
servo_angle_1 = (theta/3.14)*180
if servo_angle_1 < 0:
servo_angle_1 = 180 + servo_angle_1
else:
servo_angle_1 = servo_angle_1
servo_angle_2 = (angles[0]/3.14)*180
if servo_angle_2 < 0:
servo_angle_2 = 180 + servo_angle_2
else:
servo_angle_2 = servo_angle_2
servo_angle_2 = 180 - servo_angle_2
servo_angle_3 = (angles[1]/3.14)*180
if servo_angle_3 < 0:
servo_angle_3 = 180 + servo_angle_3
else:
servo_angle_3 = servo_angle_3
print("servo 1's angle is", servo_angle_1)
print("servo 2's angle is", servo_angle_2)
print("servo 3's angle is", servo_angle_3)
elif check_bounds < base_size:
print("Action terminated, collision with base")
elif check_bounds > reach or final_y < 0:
print("Action terminated, not enough reach")
elif check_floor == 0:
print("Action terminated, collision with floor")
else:
print("Unknown error, please terminate the program")
Inverse_kinematics(5,5,5,5,5,5)
|
the-stack_106_30525 | #!/usr/bin/env python3
# coding=utf-8
# Copyright Matus Chochlik.
# Distributed under the Boost Software License, Version 1.0.
# See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt
import os
import sys
# globally enables/disables the "dry-run" mode
dry_run = False
# returns a normalized path to the project root directory
def get_root_dir():
return os.path.normpath(
os.path.join(
os.path.dirname(sys.argv[0]),
os.path.pardir
)
)
# returns the path of the VERSION file
def version_file(root_dir = get_root_dir()):
return os.path.join(root_dir, "VERSION")
# returns the current version numbers in a list
def current_version(root_dir = get_root_dir()):
with open(version_file(root_dir), "rt") as src:
return map(int, src.read().strip().split("."))
# returns the current version string
def version_string(version_numbers):
return "%d.%d.%d" % tuple(version_numbers)
def bumped_version_number(version_numbers, idx):
return [n if i < idx else n+1 if i== idx else 0 for i, n in enumerate(version_numbers)]
def next_release(root_dir = get_root_dir()):
return bumped_version_number(current_version(root_dir), 1)
def next_hotfix(root_dir = get_root_dir()):
return bumped_version_number(current_version(root_dir), 2)
# writes contents into a file
def write_file(file_path, contents, simulate=None):
if simulate is None: simulate = dry_run
if simulate:
print("echo '-n' '"+contents+"' > "+file_path)
else:
with open(file_path, "wt") as dst:
dst.write(contents)
# executes a command in a subprocess
def execute_command(cmd_line, root_dir=get_root_dir(), simulate=None):
import subprocess
if simulate is None: simulate = dry_run
if simulate:
print(cmd_line[0]+" '"+str("' '").join(cmd_line[1:])+"'")
return str()
else:
proc = subprocess.Popen(
cmd_line,
cwd=root_dir,
stdout=subprocess.PIPE
)
out, err = proc.communicate()
return out.decode('utf-8')
# executes a git command
def git_command(parameters, root_dir=get_root_dir(), simulate=None):
return execute_command(["git"]+parameters, root_dir, simulate)
# returns the name of the current git branch
def git_current_branch(root_dir=get_root_dir()):
return git_command(
["rev-parse", "--abbrev-ref", "HEAD"],
root_dir,
False
).strip()
# returns true if the specified branch exists
def git_has_branch(branch_name, root_dir=get_root_dir):
return bool(
git_command(
["branch", "--list", branch_name],
root_dir,
False
).strip()
)
# returns true if the specified remote branch exists
def git_has_remote_branch(branch_name, root_dir=get_root_dir):
git_command(["fetch", "origin"], root_dir, False)
return bool(
git_command(
["branch", "--list", branch_name],
root_dir,
False
).strip()
)
# Begins a new release
def action_begin_release():
root_dir = get_root_dir()
git_command(["checkout", "develop"], root_dir)
git_command(["pull", "origin", "develop"], root_dir)
next_version = version_string(next_release(root_dir))
git_command(["checkout", "-b", "release-"+next_version, "develop"], root_dir)
write_file(version_file(root_dir), next_version)
git_command(["add", version_file(root_dir)], root_dir)
git_command(["commit", "-m", "Started release-"+next_version], root_dir)
git_command(["push", "origin", "release-"+next_version], root_dir)
# Finishes a release
def action_finish_release():
root_dir = get_root_dir()
current_branch = git_current_branch(root_dir)
release_version = version_string(current_version(root_dir))
release_branch = "release-"+release_version
if current_branch != release_branch:
release_version = version_string(next_release(root_dir))
release_branch = "release-"+release_version
if git_has_branch(release_branch, root_dir):
git_command(["checkout", release_branch], root_dir)
git_command(["pull", "origin", release_branch], root_dir)
elif git_has_remote_branch(release_branch, root_dir):
git_command(["checkout", "-b", release_branch, "origin/"+release_branch], root_dir)
git_command(["pull", "origin", release_branch], root_dir)
else: raise RuntimeError(
"Release branch '"+release_branch+"' does not exist. "
"Re-run with --begin-release to start a new release."
)
git_command(["checkout", "main"], root_dir)
git_command(["pull", "origin", "main"], root_dir)
git_command(["merge", "-X", "theirs", "--no-ff", release_branch], root_dir)
git_command(["tag", "-a", release_version, "-m", "Tagged release "+release_version], root_dir)
git_command(["checkout", "develop"], root_dir)
git_command(["pull", "origin", "develop"], root_dir)
git_command(["merge", "--no-ff", release_branch], root_dir)
git_command(["branch", "-D", release_branch], root_dir)
git_command(["push", "origin", ":"+release_branch], root_dir)
git_command(["push", "origin", release_version], root_dir)
git_command(["push", "origin", "main"], root_dir)
git_command(["push", "origin", "develop"], root_dir)
# Begins a new hotfix
def action_begin_hotfix():
root_dir = get_root_dir()
git_command(["checkout", "main"], root_dir)
git_command(["pull", "origin", "main"], root_dir)
base_version = version_string(current_version(root_dir))
next_version = version_string(next_hotfix(root_dir))
git_command(["checkout", "-b", "hotfix-"+next_version, base_version+"^2"], root_dir)
write_file(version_file(root_dir), next_version)
git_command(["add", version_file(root_dir)], root_dir)
git_command(["commit", "-m", "Started hotfix-"+next_version])
git_command(["push", "origin", "hotfix-"+next_version], root_dir)
# Finishes a hotfix
def action_finish_hotfix():
root_dir = get_root_dir()
current_branch = git_current_branch(root_dir)
hotfix_version = version_string(current_version(root_dir))
hotfix_branch = "hotfix-"+hotfix_version
if current_branch != hotfix_branch:
hotfix_version = version_string(next_hotfix(root_dir))
hotfix_branch = "hotfix-"+hotfix_version
if git_has_branch(hotfix_branch, root_dir):
git_command(["checkout", hotfix_branch], root_dir)
git_command(["pull", "origin", hotfix_branch], root_dir)
elif git_has_remote_branch(hotfix_branch, root_dir):
git_command(["checkout", "-b", hotfix_branch, "origin/"+hotfix_branch], root_dir)
git_command(["pull", "origin", hotfix_branch], root_dir)
else: raise RuntimeError(
"Hotfix branch '"+hotfix_branch+"' does not exist. "
"Re-run with --begin-hotfix to start a new hotfix."
)
git_command(["checkout", "main"], root_dir)
git_command(["pull", "origin", "main"], root_dir)
git_command(["merge", "-X", "theirs", "--no-ff", hotfix_branch], root_dir)
git_command(["tag", "-a", hotfix_version, "-m", "Tagged hotfix "+hotfix_version], root_dir)
git_command(["checkout", "develop"], root_dir)
git_command(["pull", "origin", "develop"], root_dir)
git_command(["merge", "--no-ff", hotfix_branch], root_dir)
git_command(["branch", "-D", hotfix_branch], root_dir)
git_command(["push", "origin", ":"+hotfix_branch], root_dir)
git_command(["push", "origin", hotfix_version], root_dir)
git_command(["push", "origin", "main"], root_dir)
git_command(["push", "origin", "develop"], root_dir)
# creates the command line argument parser
def get_argument_parser():
import argparse
argparser = argparse.ArgumentParser(
prog="workflow",
description="""
Git Workflow utility script
""",
epilog="""
Copyright (c) Matúš Chochlík.
Permission is granted to copy, distribute and/or modify this document
under the terms of the Boost Software License, Version 1.0.
(See a copy at http://www.boost.org/LICENSE_1_0.txt)
"""
)
argparser.add_argument(
"--dry-run",
default=False,
action="store_true",
help="""
Only print the commands that should be executed
but don't do anything.
"""
)
argparser_action_group = argparser.add_mutually_exclusive_group()
argparser_action_group.add_argument(
"--begin-release",
dest="action",
action="store_const",
const=action_begin_release,
help="""
Starts a new release.
"""
)
argparser_action_group.add_argument(
"--finish-release",
dest="action",
action="store_const",
const=action_finish_release,
help="""
Finishes the current release.
"""
)
argparser_action_group.add_argument(
"--begin-hotfix",
dest="action",
action="store_const",
const=action_begin_hotfix,
help="""
Starts a new hotfix.
"""
)
argparser_action_group.add_argument(
"--finish-hotfix",
dest="action",
action="store_const",
const=action_finish_hotfix,
help="""
Finishes the current hotfix.
"""
)
return argparser
def main():
try:
# parse and process the command-line arguments
argparser = get_argument_parser()
options = argparser.parse_args()
global dry_run
dry_run = options.dry_run
if options.action:
options.action()
else: print("No action specified")
except RuntimeError as rte:
print("Runtime error: " + str(rte))
if __name__ == "__main__": main()
|
the-stack_106_30527 | # -*- coding: utf-8 -*-
# Copyright (C) 2004-2008 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam
# Copyright (C) 2015-2017 Tobias Gruetzmacher
from __future__ import absolute_import, division, print_function
from re import compile, escape, IGNORECASE
from ..scraper import _BasicScraper
from ..util import tagre
from .common import _ComicControlScraper, _WPNavi
class Katzenfuttergeleespritzer(_WPNavi):
url = 'http://www.katzenfuttergeleespritzer.de/'
multipleImagesPerStrip = True
lang = 'de'
class KevinAndKell(_BasicScraper):
url = 'http://www.kevinandkell.com/'
stripUrl = url + '%s/kk%s%s.html'
firstStripUrl = stripUrl % ('1995', '09', '03')
imageSearch = compile(r'<img.+?src="(/?(\d+/)?strips/kk\d+.(gif|jpg))"',
IGNORECASE)
prevSearch = compile(
r'<a.+?href="(/?(\.\./)?\d+/kk\d+\.html)"[^>]*><span>Previous Strip',
IGNORECASE)
help = 'Index format: yyyy-mm-dd'
def getIndexStripUrl(self, index):
return self.stripUrl % tuple(map(int, index.split('-')))
class KickInTheHead(_WPNavi):
url = 'http://www.kickinthehead.org/'
firstStripUrl = url + '2003/03/20/ipod-envy/'
class KillSixBillionDemons(_WPNavi):
url = 'http://killsixbilliondemons.com/'
firstStripUrl = url + 'comic/kill-six-billion-demons-chapter-1/'
multipleImagesPerStrip = True
adult = True
class KiwiBlitz(_ComicControlScraper):
url = 'http://www.kiwiblitz.com'
class Krakow(_BasicScraper):
url = 'http://www.krakow.krakowstudios.com/'
stripUrl = url + 'archive.php?date=%s'
firstStripUrl = stripUrl % '20081111'
imageSearch = compile(r'<img src="(comics/.+?)"')
prevSearch = compile(
r'<a href="(archive\.php\?date=.+?)"><img border=0 name=previous_day')
help = 'Index format: yyyymmdd'
class KuroShouri(_BasicScraper):
url = 'http://kuroshouri.com/'
rurl = escape(url)
stripUrl = url + '?webcomic_post=%s'
imageSearch = compile(
tagre("img", "src",
r"(%swp-content/webcomic/kuroshouri/[^'\"]+)" % rurl,
quote="['\"]"))
prevSearch = compile(
tagre("a", "href", r'(%s\?webcomic_post\=[^"]+)' % rurl,
after="previous"))
help = 'Index format: chapter-n-page-m'
|
the-stack_106_30528 | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# DLink.DIR.get_version
# ---------------------------------------------------------------------
# Copyright (C) 2007-2017 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetversion import IGetVersion
from noc.core.script.http.base import HTTPError
class Script(BaseScript):
name = "DLink.DIR.get_version"
interface = IGetVersion
cache = True
def execute(self, **kwargs):
baseURL = "/cliget.cgi?cmd="
r = {"vendor": "DLink", "platform": "DIR Undefined", "version": ""}
param = {"platform": "$sys_model", "hw_ver": "$hw_cver", "version": "$sw_ver"}
# /cliget.cgi?cmd=$sys_model%;echo"%;$hw_cver%;echo"%;$sw_ver%;echo"
req = "%;".join(["%;".join((param[p], 'echo"')) for p in param])
urlpath = baseURL + req + ";"
self.logger.debug("URL path is: %s" % urlpath)
try:
rr = self.http.get(urlpath)
except HTTPError:
return {"vendor": "DLink", "version": "", "platform": ""}
rr = rr.splitlines()
self.logger.debug("Result: %s " % rr)
if rr:
r = {
"vendor": "DLink",
"platform": rr[0],
"version": rr[2],
"attributes": {"HW version": rr[1]},
}
return r
|
the-stack_106_30529 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from urllib import parse
import redis
from .base import * # noqa
SECRET_KEY = "jllc(^rzpe8_udv)oadny2j3ym#qd^x^3ns11_8kq(1rf8qpd2"
DATABASES["default"] = {
"ENGINE": "django.db.backends.mysql",
"NAME": "bcs-app",
"USER": "root",
"PASSWORD": os.environ.get("DB_PASSWORD", ""),
"HOST": os.environ.get("DB_HOST", "127.0.0.1"),
"PORT": "3306",
"OPTIONS": {
"init_command": "SET default_storage_engine=INNODB",
},
}
INSTALLED_APPS += [
"backend.celery_app.CeleryConfig",
]
# 本地开发先去除权限中心v3的数据初始逻辑
INSTALLED_APPS.remove("backend.iam.bcs_iam_migration.apps.BcsIamMigrationConfig")
LOG_LEVEL = "DEBUG"
LOGGING = get_logging_config(LOG_LEVEL)
# 设置搭建的社区版域名
BK_PAAS_HOST = os.environ.get("BK_PAAS_HOST", "http://dev.paas.com")
SESSION_COOKIE_DOMAIN = "." + parse.urlparse(BK_PAAS_HOST).netloc.split(":")[0]
CSRF_COOKIE_DOMAIN = SESSION_COOKIE_DOMAIN
# cors settings
CORS_ORIGIN_REGEX_WHITELIST = (r".*",)
PAAS_ENV = "local"
# 容器服务地址
DEVOPS_HOST = os.environ.get("DEV_DEVOPS_HOST", "")
DEVOPS_BCS_HOST = os.environ.get("DEV_BCS_APP_HOST", "")
# 容器服务 API 地址
DEVOPS_BCS_API_URL = os.environ.get("DEV_BCS_APP_HOST", "")
DEVOPS_ARTIFACTORY_HOST = os.environ.get("BKAPP_ARTIFACTORY_HOST")
BK_PAAS_INNER_HOST = os.environ.get("BK_PAAS_INNER_HOST", BK_PAAS_HOST)
REDIS_URL = os.environ.get("BKAPP_REDIS_URL", "redis://127.0.0.1/0")
# 解析url
_rpool = redis.from_url(REDIS_URL).connection_pool
REDIS_HOST = _rpool.connection_kwargs["host"]
REDIS_PORT = _rpool.connection_kwargs["port"]
REDIS_PASSWORD = _rpool.connection_kwargs["password"]
REDIS_DB = _rpool.connection_kwargs["db"]
# IAM 地址
BK_IAM_HOST = os.environ.get('BKAPP_IAM_HOST', 'http://dev.iam.com')
APIGW_HOST = BK_PAAS_INNER_HOST
DEPOT_API = f"{APIGW_HOST}/api/apigw/harbor_api/"
# web-console配置需要,后台去除
RDS_HANDER_SETTINGS = {
"level": "INFO",
"class": "backend.utils.log.LogstashRedisHandler",
"redis_url": REDIS_URL,
"queue_name": "paas_backend_log_list",
"message_type": "python-logstash",
"tags": ["sz", "stag", "paas-backend"],
}
CACHES["default"] = {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": REDIS_URL,
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
},
}
# BCS CC PATH
BCS_CC_CLUSTER_CONFIG = "/v1/clusters/{cluster_id}/cluster_version_config/"
BCS_CC_GET_CLUSTER_MASTERS = "/projects/{project_id}/clusters/{cluster_id}/manager_masters/"
BCS_CC_GET_PROJECT_MASTERS = "/projects/{project_id}/clusters/null/manager_masters/"
BCS_CC_GET_PROJECT_NODES = "/projects/{project_id}/clusters/null/nodes/"
BCS_CC_OPER_PROJECT_NODE = "/projects/{project_id}/clusters/null/nodes/{node_id}/"
BCS_CC_OPER_PROJECT_NAMESPACES = "/projects/{project_id}/clusters/null/namespaces/"
BCS_CC_OPER_PROJECT_NAMESPACE = "/projects/{project_id}/clusters/null/namespaces/{namespace_id}/"
HELM_MERELY_REPO_URL = os.environ.get("BKAPP_HARBOR_CHARTS_DOMAIN")
BCS_SERVER_HOST = {"prod": os.environ.get("BKAPP_BCS_API_DOMAIN")}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.